##// END OF EJS Templates
run-tests: allow run-tests.py to run tests outside current directory...
David R. MacIver -
r28180:2836a43c default
parent child Browse files
Show More
@@ -1,2453 +1,2453 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import print_function
47 47
48 48 from distutils import version
49 49 import difflib
50 50 import errno
51 51 import json
52 52 import optparse
53 53 import os
54 54 import shutil
55 55 import subprocess
56 56 import signal
57 57 import socket
58 58 import sys
59 59 import tempfile
60 60 import time
61 61 import random
62 62 import re
63 63 import threading
64 64 import killdaemons as killmod
65 65 try:
66 66 import Queue as queue
67 67 except ImportError:
68 68 import queue
69 69 from xml.dom import minidom
70 70 import unittest
71 71
72 72 osenvironb = getattr(os, 'environb', os.environ)
73 73 processlock = threading.Lock()
74 74
75 75 if sys.version_info > (3, 5, 0):
76 76 PYTHON3 = True
77 77 xrange = range # we use xrange in one place, and we'd rather not use range
78 78 def _bytespath(p):
79 79 return p.encode('utf-8')
80 80
81 81 def _strpath(p):
82 82 return p.decode('utf-8')
83 83
84 84 elif sys.version_info >= (3, 0, 0):
85 85 print('%s is only supported on Python 3.5+ and 2.6-2.7, not %s' %
86 86 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
87 87 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
88 88 else:
89 89 PYTHON3 = False
90 90
91 91 # In python 2.x, path operations are generally done using
92 92 # bytestrings by default, so we don't have to do any extra
93 93 # fiddling there. We define the wrapper functions anyway just to
94 94 # help keep code consistent between platforms.
95 95 def _bytespath(p):
96 96 return p
97 97
98 98 _strpath = _bytespath
99 99
100 100 # For Windows support
101 101 wifexited = getattr(os, "WIFEXITED", lambda x: False)
102 102
103 103 def checkportisavailable(port):
104 104 """return true if a port seems free to bind on localhost"""
105 105 try:
106 106 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
107 107 s.bind(('localhost', port))
108 108 s.close()
109 109 return True
110 110 except socket.error as exc:
111 111 if not exc.errno == errno.EADDRINUSE:
112 112 raise
113 113 return False
114 114
115 115 closefds = os.name == 'posix'
116 116 def Popen4(cmd, wd, timeout, env=None):
117 117 processlock.acquire()
118 118 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
119 119 close_fds=closefds,
120 120 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
121 121 stderr=subprocess.STDOUT)
122 122 processlock.release()
123 123
124 124 p.fromchild = p.stdout
125 125 p.tochild = p.stdin
126 126 p.childerr = p.stderr
127 127
128 128 p.timeout = False
129 129 if timeout:
130 130 def t():
131 131 start = time.time()
132 132 while time.time() - start < timeout and p.returncode is None:
133 133 time.sleep(.1)
134 134 p.timeout = True
135 135 if p.returncode is None:
136 136 terminate(p)
137 137 threading.Thread(target=t).start()
138 138
139 139 return p
140 140
141 141 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
142 142 IMPL_PATH = b'PYTHONPATH'
143 143 if 'java' in sys.platform:
144 144 IMPL_PATH = b'JYTHONPATH'
145 145
146 146 defaults = {
147 147 'jobs': ('HGTEST_JOBS', 1),
148 148 'timeout': ('HGTEST_TIMEOUT', 180),
149 149 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
150 150 'port': ('HGTEST_PORT', 20059),
151 151 'shell': ('HGTEST_SHELL', 'sh'),
152 152 }
153 153
154 154 def parselistfiles(files, listtype, warn=True):
155 155 entries = dict()
156 156 for filename in files:
157 157 try:
158 158 path = os.path.expanduser(os.path.expandvars(filename))
159 159 f = open(path, "rb")
160 160 except IOError as err:
161 161 if err.errno != errno.ENOENT:
162 162 raise
163 163 if warn:
164 164 print("warning: no such %s file: %s" % (listtype, filename))
165 165 continue
166 166
167 167 for line in f.readlines():
168 168 line = line.split(b'#', 1)[0].strip()
169 169 if line:
170 170 entries[line] = filename
171 171
172 172 f.close()
173 173 return entries
174 174
175 175 def getparser():
176 176 """Obtain the OptionParser used by the CLI."""
177 177 parser = optparse.OptionParser("%prog [options] [tests]")
178 178
179 179 # keep these sorted
180 180 parser.add_option("--blacklist", action="append",
181 181 help="skip tests listed in the specified blacklist file")
182 182 parser.add_option("--whitelist", action="append",
183 183 help="always run tests listed in the specified whitelist file")
184 184 parser.add_option("--changed", type="string",
185 185 help="run tests that are changed in parent rev or working directory")
186 186 parser.add_option("-C", "--annotate", action="store_true",
187 187 help="output files annotated with coverage")
188 188 parser.add_option("-c", "--cover", action="store_true",
189 189 help="print a test coverage report")
190 190 parser.add_option("-d", "--debug", action="store_true",
191 191 help="debug mode: write output of test scripts to console"
192 192 " rather than capturing and diffing it (disables timeout)")
193 193 parser.add_option("-f", "--first", action="store_true",
194 194 help="exit on the first test failure")
195 195 parser.add_option("-H", "--htmlcov", action="store_true",
196 196 help="create an HTML report of the coverage of the files")
197 197 parser.add_option("-i", "--interactive", action="store_true",
198 198 help="prompt to accept changed output")
199 199 parser.add_option("-j", "--jobs", type="int",
200 200 help="number of jobs to run in parallel"
201 201 " (default: $%s or %d)" % defaults['jobs'])
202 202 parser.add_option("--keep-tmpdir", action="store_true",
203 203 help="keep temporary directory after running tests")
204 204 parser.add_option("-k", "--keywords",
205 205 help="run tests matching keywords")
206 206 parser.add_option("-l", "--local", action="store_true",
207 207 help="shortcut for --with-hg=<testdir>/../hg")
208 208 parser.add_option("--loop", action="store_true",
209 209 help="loop tests repeatedly")
210 210 parser.add_option("--runs-per-test", type="int", dest="runs_per_test",
211 211 help="run each test N times (default=1)", default=1)
212 212 parser.add_option("-n", "--nodiff", action="store_true",
213 213 help="skip showing test changes")
214 214 parser.add_option("-p", "--port", type="int",
215 215 help="port on which servers should listen"
216 216 " (default: $%s or %d)" % defaults['port'])
217 217 parser.add_option("--compiler", type="string",
218 218 help="compiler to build with")
219 219 parser.add_option("--pure", action="store_true",
220 220 help="use pure Python code instead of C extensions")
221 221 parser.add_option("-R", "--restart", action="store_true",
222 222 help="restart at last error")
223 223 parser.add_option("-r", "--retest", action="store_true",
224 224 help="retest failed tests")
225 225 parser.add_option("-S", "--noskips", action="store_true",
226 226 help="don't report skip tests verbosely")
227 227 parser.add_option("--shell", type="string",
228 228 help="shell to use (default: $%s or %s)" % defaults['shell'])
229 229 parser.add_option("-t", "--timeout", type="int",
230 230 help="kill errant tests after TIMEOUT seconds"
231 231 " (default: $%s or %d)" % defaults['timeout'])
232 232 parser.add_option("--slowtimeout", type="int",
233 233 help="kill errant slow tests after SLOWTIMEOUT seconds"
234 234 " (default: $%s or %d)" % defaults['slowtimeout'])
235 235 parser.add_option("--time", action="store_true",
236 236 help="time how long each test takes")
237 237 parser.add_option("--json", action="store_true",
238 238 help="store test result data in 'report.json' file")
239 239 parser.add_option("--tmpdir", type="string",
240 240 help="run tests in the given temporary directory"
241 241 " (implies --keep-tmpdir)")
242 242 parser.add_option("-v", "--verbose", action="store_true",
243 243 help="output verbose messages")
244 244 parser.add_option("--xunit", type="string",
245 245 help="record xunit results at specified path")
246 246 parser.add_option("--view", type="string",
247 247 help="external diff viewer")
248 248 parser.add_option("--with-hg", type="string",
249 249 metavar="HG",
250 250 help="test using specified hg script rather than a "
251 251 "temporary installation")
252 252 parser.add_option("--chg", action="store_true",
253 253 help="install and use chg wrapper in place of hg")
254 254 parser.add_option("--with-chg", metavar="CHG",
255 255 help="use specified chg wrapper in place of hg")
256 256 parser.add_option("-3", "--py3k-warnings", action="store_true",
257 257 help="enable Py3k warnings on Python 2.6+")
258 258 parser.add_option('--extra-config-opt', action="append",
259 259 help='set the given config opt in the test hgrc')
260 260 parser.add_option('--random', action="store_true",
261 261 help='run tests in random order')
262 262 parser.add_option('--profile-runner', action='store_true',
263 263 help='run statprof on run-tests')
264 264 parser.add_option('--allow-slow-tests', action='store_true',
265 265 help='allow extremely slow tests')
266 266 parser.add_option('--showchannels', action='store_true',
267 267 help='show scheduling channels')
268 268
269 269 for option, (envvar, default) in defaults.items():
270 270 defaults[option] = type(default)(os.environ.get(envvar, default))
271 271 parser.set_defaults(**defaults)
272 272
273 273 return parser
274 274
275 275 def parseargs(args, parser):
276 276 """Parse arguments with our OptionParser and validate results."""
277 277 (options, args) = parser.parse_args(args)
278 278
279 279 # jython is always pure
280 280 if 'java' in sys.platform or '__pypy__' in sys.modules:
281 281 options.pure = True
282 282
283 283 if options.with_hg:
284 284 options.with_hg = os.path.realpath(
285 285 os.path.expanduser(_bytespath(options.with_hg)))
286 286 if not (os.path.isfile(options.with_hg) and
287 287 os.access(options.with_hg, os.X_OK)):
288 288 parser.error('--with-hg must specify an executable hg script')
289 289 if not os.path.basename(options.with_hg) == b'hg':
290 290 sys.stderr.write('warning: --with-hg should specify an hg script\n')
291 291 if options.local:
292 292 testdir = os.path.dirname(_bytespath(os.path.realpath(sys.argv[0])))
293 293 hgbin = os.path.join(os.path.dirname(testdir), b'hg')
294 294 if os.name != 'nt' and not os.access(hgbin, os.X_OK):
295 295 parser.error('--local specified, but %r not found or not executable'
296 296 % hgbin)
297 297 options.with_hg = hgbin
298 298
299 299 if (options.chg or options.with_chg) and os.name == 'nt':
300 300 parser.error('chg does not work on %s' % os.name)
301 301 if options.with_chg:
302 302 options.chg = False # no installation to temporary location
303 303 options.with_chg = os.path.realpath(
304 304 os.path.expanduser(_bytespath(options.with_chg)))
305 305 if not (os.path.isfile(options.with_chg) and
306 306 os.access(options.with_chg, os.X_OK)):
307 307 parser.error('--with-chg must specify a chg executable')
308 308 if options.chg and options.with_hg:
309 309 # chg shares installation location with hg
310 310 parser.error('--chg does not work when --with-hg is specified '
311 311 '(use --with-chg instead)')
312 312
313 313 options.anycoverage = options.cover or options.annotate or options.htmlcov
314 314 if options.anycoverage:
315 315 try:
316 316 import coverage
317 317 covver = version.StrictVersion(coverage.__version__).version
318 318 if covver < (3, 3):
319 319 parser.error('coverage options require coverage 3.3 or later')
320 320 except ImportError:
321 321 parser.error('coverage options now require the coverage package')
322 322
323 323 if options.anycoverage and options.local:
324 324 # this needs some path mangling somewhere, I guess
325 325 parser.error("sorry, coverage options do not work when --local "
326 326 "is specified")
327 327
328 328 if options.anycoverage and options.with_hg:
329 329 parser.error("sorry, coverage options do not work when --with-hg "
330 330 "is specified")
331 331
332 332 global verbose
333 333 if options.verbose:
334 334 verbose = ''
335 335
336 336 if options.tmpdir:
337 337 options.tmpdir = os.path.expanduser(options.tmpdir)
338 338
339 339 if options.jobs < 1:
340 340 parser.error('--jobs must be positive')
341 341 if options.interactive and options.debug:
342 342 parser.error("-i/--interactive and -d/--debug are incompatible")
343 343 if options.debug:
344 344 if options.timeout != defaults['timeout']:
345 345 sys.stderr.write(
346 346 'warning: --timeout option ignored with --debug\n')
347 347 if options.slowtimeout != defaults['slowtimeout']:
348 348 sys.stderr.write(
349 349 'warning: --slowtimeout option ignored with --debug\n')
350 350 options.timeout = 0
351 351 options.slowtimeout = 0
352 352 if options.py3k_warnings:
353 353 if PYTHON3:
354 354 parser.error(
355 355 '--py3k-warnings can only be used on Python 2.6 and 2.7')
356 356 if options.blacklist:
357 357 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
358 358 if options.whitelist:
359 359 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
360 360 else:
361 361 options.whitelisted = {}
362 362
363 363 if options.showchannels:
364 364 options.nodiff = True
365 365
366 366 return (options, args)
367 367
368 368 def rename(src, dst):
369 369 """Like os.rename(), trade atomicity and opened files friendliness
370 370 for existing destination support.
371 371 """
372 372 shutil.copy(src, dst)
373 373 os.remove(src)
374 374
375 375 _unified_diff = difflib.unified_diff
376 376 if PYTHON3:
377 377 import functools
378 378 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
379 379
380 380 def getdiff(expected, output, ref, err):
381 381 servefail = False
382 382 lines = []
383 383 for line in _unified_diff(expected, output, ref, err):
384 384 if line.startswith(b'+++') or line.startswith(b'---'):
385 385 line = line.replace(b'\\', b'/')
386 386 if line.endswith(b' \n'):
387 387 line = line[:-2] + b'\n'
388 388 lines.append(line)
389 389 if not servefail and line.startswith(
390 390 b'+ abort: child process failed to start'):
391 391 servefail = True
392 392
393 393 return servefail, lines
394 394
395 395 verbose = False
396 396 def vlog(*msg):
397 397 """Log only when in verbose mode."""
398 398 if verbose is False:
399 399 return
400 400
401 401 return log(*msg)
402 402
403 403 # Bytes that break XML even in a CDATA block: control characters 0-31
404 404 # sans \t, \n and \r
405 405 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
406 406
407 407 def cdatasafe(data):
408 408 """Make a string safe to include in a CDATA block.
409 409
410 410 Certain control characters are illegal in a CDATA block, and
411 411 there's no way to include a ]]> in a CDATA either. This function
412 412 replaces illegal bytes with ? and adds a space between the ]] so
413 413 that it won't break the CDATA block.
414 414 """
415 415 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
416 416
417 417 def log(*msg):
418 418 """Log something to stdout.
419 419
420 420 Arguments are strings to print.
421 421 """
422 422 with iolock:
423 423 if verbose:
424 424 print(verbose, end=' ')
425 425 for m in msg:
426 426 print(m, end=' ')
427 427 print()
428 428 sys.stdout.flush()
429 429
430 430 def terminate(proc):
431 431 """Terminate subprocess (with fallback for Python versions < 2.6)"""
432 432 vlog('# Terminating process %d' % proc.pid)
433 433 try:
434 434 getattr(proc, 'terminate', lambda : os.kill(proc.pid, signal.SIGTERM))()
435 435 except OSError:
436 436 pass
437 437
438 438 def killdaemons(pidfile):
439 439 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
440 440 logfn=vlog)
441 441
442 442 class Test(unittest.TestCase):
443 443 """Encapsulates a single, runnable test.
444 444
445 445 While this class conforms to the unittest.TestCase API, it differs in that
446 446 instances need to be instantiated manually. (Typically, unittest.TestCase
447 447 classes are instantiated automatically by scanning modules.)
448 448 """
449 449
450 450 # Status code reserved for skipped tests (used by hghave).
451 451 SKIPPED_STATUS = 80
452 452
453 453 def __init__(self, path, tmpdir, keeptmpdir=False,
454 454 debug=False,
455 455 timeout=defaults['timeout'],
456 456 startport=defaults['port'], extraconfigopts=None,
457 457 py3kwarnings=False, shell=None, hgcommand=None,
458 458 slowtimeout=defaults['slowtimeout']):
459 459 """Create a test from parameters.
460 460
461 461 path is the full path to the file defining the test.
462 462
463 463 tmpdir is the main temporary directory to use for this test.
464 464
465 465 keeptmpdir determines whether to keep the test's temporary directory
466 466 after execution. It defaults to removal (False).
467 467
468 468 debug mode will make the test execute verbosely, with unfiltered
469 469 output.
470 470
471 471 timeout controls the maximum run time of the test. It is ignored when
472 472 debug is True. See slowtimeout for tests with #require slow.
473 473
474 474 slowtimeout overrides timeout if the test has #require slow.
475 475
476 476 startport controls the starting port number to use for this test. Each
477 477 test will reserve 3 port numbers for execution. It is the caller's
478 478 responsibility to allocate a non-overlapping port range to Test
479 479 instances.
480 480
481 481 extraconfigopts is an iterable of extra hgrc config options. Values
482 482 must have the form "key=value" (something understood by hgrc). Values
483 483 of the form "foo.key=value" will result in "[foo] key=value".
484 484
485 485 py3kwarnings enables Py3k warnings.
486 486
487 487 shell is the shell to execute tests in.
488 488 """
489 489 self.path = path
490 490 self.bname = os.path.basename(path)
491 491 self.name = _strpath(self.bname)
492 492 self._testdir = os.path.dirname(path)
493 493 self.errpath = os.path.join(self._testdir, b'%s.err' % self.bname)
494 494
495 495 self._threadtmp = tmpdir
496 496 self._keeptmpdir = keeptmpdir
497 497 self._debug = debug
498 498 self._timeout = timeout
499 499 self._slowtimeout = slowtimeout
500 500 self._startport = startport
501 501 self._extraconfigopts = extraconfigopts or []
502 502 self._py3kwarnings = py3kwarnings
503 503 self._shell = _bytespath(shell)
504 504 self._hgcommand = hgcommand or b'hg'
505 505
506 506 self._aborted = False
507 507 self._daemonpids = []
508 508 self._finished = None
509 509 self._ret = None
510 510 self._out = None
511 511 self._skipped = None
512 512 self._testtmp = None
513 513
514 514 # If we're not in --debug mode and reference output file exists,
515 515 # check test output against it.
516 516 if debug:
517 517 self._refout = None # to match "out is None"
518 518 elif os.path.exists(self.refpath):
519 519 f = open(self.refpath, 'rb')
520 520 self._refout = f.read().splitlines(True)
521 521 f.close()
522 522 else:
523 523 self._refout = []
524 524
525 525 # needed to get base class __repr__ running
526 526 @property
527 527 def _testMethodName(self):
528 528 return self.name
529 529
530 530 def __str__(self):
531 531 return self.name
532 532
533 533 def shortDescription(self):
534 534 return self.name
535 535
536 536 def setUp(self):
537 537 """Tasks to perform before run()."""
538 538 self._finished = False
539 539 self._ret = None
540 540 self._out = None
541 541 self._skipped = None
542 542
543 543 try:
544 544 os.mkdir(self._threadtmp)
545 545 except OSError as e:
546 546 if e.errno != errno.EEXIST:
547 547 raise
548 548
549 549 self._testtmp = os.path.join(self._threadtmp,
550 550 os.path.basename(self.path))
551 551 os.mkdir(self._testtmp)
552 552
553 553 # Remove any previous output files.
554 554 if os.path.exists(self.errpath):
555 555 try:
556 556 os.remove(self.errpath)
557 557 except OSError as e:
558 558 # We might have raced another test to clean up a .err
559 559 # file, so ignore ENOENT when removing a previous .err
560 560 # file.
561 561 if e.errno != errno.ENOENT:
562 562 raise
563 563
564 564 def run(self, result):
565 565 """Run this test and report results against a TestResult instance."""
566 566 # This function is extremely similar to unittest.TestCase.run(). Once
567 567 # we require Python 2.7 (or at least its version of unittest), this
568 568 # function can largely go away.
569 569 self._result = result
570 570 result.startTest(self)
571 571 try:
572 572 try:
573 573 self.setUp()
574 574 except (KeyboardInterrupt, SystemExit):
575 575 self._aborted = True
576 576 raise
577 577 except Exception:
578 578 result.addError(self, sys.exc_info())
579 579 return
580 580
581 581 success = False
582 582 try:
583 583 self.runTest()
584 584 except KeyboardInterrupt:
585 585 self._aborted = True
586 586 raise
587 587 except SkipTest as e:
588 588 result.addSkip(self, str(e))
589 589 # The base class will have already counted this as a
590 590 # test we "ran", but we want to exclude skipped tests
591 591 # from those we count towards those run.
592 592 result.testsRun -= 1
593 593 except IgnoreTest as e:
594 594 result.addIgnore(self, str(e))
595 595 # As with skips, ignores also should be excluded from
596 596 # the number of tests executed.
597 597 result.testsRun -= 1
598 598 except WarnTest as e:
599 599 result.addWarn(self, str(e))
600 600 except ReportedTest as e:
601 601 pass
602 602 except self.failureException as e:
603 603 # This differs from unittest in that we don't capture
604 604 # the stack trace. This is for historical reasons and
605 605 # this decision could be revisited in the future,
606 606 # especially for PythonTest instances.
607 607 if result.addFailure(self, str(e)):
608 608 success = True
609 609 except Exception:
610 610 result.addError(self, sys.exc_info())
611 611 else:
612 612 success = True
613 613
614 614 try:
615 615 self.tearDown()
616 616 except (KeyboardInterrupt, SystemExit):
617 617 self._aborted = True
618 618 raise
619 619 except Exception:
620 620 result.addError(self, sys.exc_info())
621 621 success = False
622 622
623 623 if success:
624 624 result.addSuccess(self)
625 625 finally:
626 626 result.stopTest(self, interrupted=self._aborted)
627 627
628 628 def runTest(self):
629 629 """Run this test instance.
630 630
631 631 This will return a tuple describing the result of the test.
632 632 """
633 633 env = self._getenv()
634 634 self._daemonpids.append(env['DAEMON_PIDS'])
635 635 self._createhgrc(env['HGRCPATH'])
636 636
637 637 vlog('# Test', self.name)
638 638
639 639 ret, out = self._run(env)
640 640 self._finished = True
641 641 self._ret = ret
642 642 self._out = out
643 643
644 644 def describe(ret):
645 645 if ret < 0:
646 646 return 'killed by signal: %d' % -ret
647 647 return 'returned error code %d' % ret
648 648
649 649 self._skipped = False
650 650
651 651 if ret == self.SKIPPED_STATUS:
652 652 if out is None: # Debug mode, nothing to parse.
653 653 missing = ['unknown']
654 654 failed = None
655 655 else:
656 656 missing, failed = TTest.parsehghaveoutput(out)
657 657
658 658 if not missing:
659 659 missing = ['skipped']
660 660
661 661 if failed:
662 662 self.fail('hg have failed checking for %s' % failed[-1])
663 663 else:
664 664 self._skipped = True
665 665 raise SkipTest(missing[-1])
666 666 elif ret == 'timeout':
667 667 self.fail('timed out')
668 668 elif ret is False:
669 669 raise WarnTest('no result code from test')
670 670 elif out != self._refout:
671 671 # Diff generation may rely on written .err file.
672 672 if (ret != 0 or out != self._refout) and not self._skipped \
673 673 and not self._debug:
674 674 f = open(self.errpath, 'wb')
675 675 for line in out:
676 676 f.write(line)
677 677 f.close()
678 678
679 679 # The result object handles diff calculation for us.
680 680 if self._result.addOutputMismatch(self, ret, out, self._refout):
681 681 # change was accepted, skip failing
682 682 return
683 683
684 684 if ret:
685 685 msg = 'output changed and ' + describe(ret)
686 686 else:
687 687 msg = 'output changed'
688 688
689 689 self.fail(msg)
690 690 elif ret:
691 691 self.fail(describe(ret))
692 692
693 693 def tearDown(self):
694 694 """Tasks to perform after run()."""
695 695 for entry in self._daemonpids:
696 696 killdaemons(entry)
697 697 self._daemonpids = []
698 698
699 699 if self._keeptmpdir:
700 700 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
701 701 (self._testtmp, self._threadtmp))
702 702 else:
703 703 shutil.rmtree(self._testtmp, True)
704 704 shutil.rmtree(self._threadtmp, True)
705 705
706 706 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
707 707 and not self._debug and self._out:
708 708 f = open(self.errpath, 'wb')
709 709 for line in self._out:
710 710 f.write(line)
711 711 f.close()
712 712
713 713 vlog("# Ret was:", self._ret, '(%s)' % self.name)
714 714
715 715 def _run(self, env):
716 716 # This should be implemented in child classes to run tests.
717 717 raise SkipTest('unknown test type')
718 718
719 719 def abort(self):
720 720 """Terminate execution of this test."""
721 721 self._aborted = True
722 722
723 723 def _portmap(self, i):
724 724 offset = '' if i == 0 else '%s' % i
725 725 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
726 726
727 727 def _getreplacements(self):
728 728 """Obtain a mapping of text replacements to apply to test output.
729 729
730 730 Test output needs to be normalized so it can be compared to expected
731 731 output. This function defines how some of that normalization will
732 732 occur.
733 733 """
734 734 r = [
735 735 # This list should be parallel to defineport in _getenv
736 736 self._portmap(0),
737 737 self._portmap(1),
738 738 self._portmap(2),
739 739 (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
740 740 br'\1 (glob)'),
741 741 ]
742 742 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
743 743
744 744 return r
745 745
746 746 def _escapepath(self, p):
747 747 if os.name == 'nt':
748 748 return (
749 749 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
750 750 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
751 751 for c in p))
752 752 )
753 753 else:
754 754 return re.escape(p)
755 755
756 756 def _getenv(self):
757 757 """Obtain environment variables to use during test execution."""
758 758 def defineport(i):
759 759 offset = '' if i == 0 else '%s' % i
760 760 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
761 761 env = os.environ.copy()
762 762 env['TESTTMP'] = self._testtmp
763 763 env['HOME'] = self._testtmp
764 764 # This number should match portneeded in _getport
765 765 for port in xrange(3):
766 766 # This list should be parallel to _portmap in _getreplacements
767 767 defineport(port)
768 768 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
769 769 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
770 770 env["HGEDITOR"] = ('"' + sys.executable + '"'
771 771 + ' -c "import sys; sys.exit(0)"')
772 772 env["HGMERGE"] = "internal:merge"
773 773 env["HGUSER"] = "test"
774 774 env["HGENCODING"] = "ascii"
775 775 env["HGENCODINGMODE"] = "strict"
776 776
777 777 # Reset some environment variables to well-known values so that
778 778 # the tests produce repeatable output.
779 779 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
780 780 env['TZ'] = 'GMT'
781 781 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
782 782 env['COLUMNS'] = '80'
783 783 env['TERM'] = 'xterm'
784 784
785 785 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
786 786 'NO_PROXY').split():
787 787 if k in env:
788 788 del env[k]
789 789
790 790 # unset env related to hooks
791 791 for k in env.keys():
792 792 if k.startswith('HG_'):
793 793 del env[k]
794 794
795 795 return env
796 796
797 797 def _createhgrc(self, path):
798 798 """Create an hgrc file for this test."""
799 799 hgrc = open(path, 'wb')
800 800 hgrc.write(b'[ui]\n')
801 801 hgrc.write(b'slash = True\n')
802 802 hgrc.write(b'interactive = False\n')
803 803 hgrc.write(b'mergemarkers = detailed\n')
804 804 hgrc.write(b'promptecho = True\n')
805 805 hgrc.write(b'[defaults]\n')
806 806 hgrc.write(b'backout = -d "0 0"\n')
807 807 hgrc.write(b'commit = -d "0 0"\n')
808 808 hgrc.write(b'shelve = --date "0 0"\n')
809 809 hgrc.write(b'tag = -d "0 0"\n')
810 810 hgrc.write(b'[devel]\n')
811 811 hgrc.write(b'all-warnings = true\n')
812 812 hgrc.write(b'[largefiles]\n')
813 813 hgrc.write(b'usercache = %s\n' %
814 814 (os.path.join(self._testtmp, b'.cache/largefiles')))
815 815
816 816 for opt in self._extraconfigopts:
817 817 section, key = opt.split('.', 1)
818 818 assert '=' in key, ('extra config opt %s must '
819 819 'have an = for assignment' % opt)
820 820 hgrc.write(b'[%s]\n%s\n' % (section, key))
821 821 hgrc.close()
822 822
823 823 def fail(self, msg):
824 824 # unittest differentiates between errored and failed.
825 825 # Failed is denoted by AssertionError (by default at least).
826 826 raise AssertionError(msg)
827 827
828 828 def _runcommand(self, cmd, env, normalizenewlines=False):
829 829 """Run command in a sub-process, capturing the output (stdout and
830 830 stderr).
831 831
832 832 Return a tuple (exitcode, output). output is None in debug mode.
833 833 """
834 834 if self._debug:
835 835 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
836 836 env=env)
837 837 ret = proc.wait()
838 838 return (ret, None)
839 839
840 840 proc = Popen4(cmd, self._testtmp, self._timeout, env)
841 841 def cleanup():
842 842 terminate(proc)
843 843 ret = proc.wait()
844 844 if ret == 0:
845 845 ret = signal.SIGTERM << 8
846 846 killdaemons(env['DAEMON_PIDS'])
847 847 return ret
848 848
849 849 output = ''
850 850 proc.tochild.close()
851 851
852 852 try:
853 853 output = proc.fromchild.read()
854 854 except KeyboardInterrupt:
855 855 vlog('# Handling keyboard interrupt')
856 856 cleanup()
857 857 raise
858 858
859 859 ret = proc.wait()
860 860 if wifexited(ret):
861 861 ret = os.WEXITSTATUS(ret)
862 862
863 863 if proc.timeout:
864 864 ret = 'timeout'
865 865
866 866 if ret:
867 867 killdaemons(env['DAEMON_PIDS'])
868 868
869 869 for s, r in self._getreplacements():
870 870 output = re.sub(s, r, output)
871 871
872 872 if normalizenewlines:
873 873 output = output.replace('\r\n', '\n')
874 874
875 875 return ret, output.splitlines(True)
876 876
877 877 class PythonTest(Test):
878 878 """A Python-based test."""
879 879
880 880 @property
881 881 def refpath(self):
882 882 return os.path.join(self._testdir, b'%s.out' % self.bname)
883 883
884 884 def _run(self, env):
885 885 py3kswitch = self._py3kwarnings and b' -3' or b''
886 886 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
887 887 vlog("# Running", cmd)
888 888 normalizenewlines = os.name == 'nt'
889 889 result = self._runcommand(cmd, env,
890 890 normalizenewlines=normalizenewlines)
891 891 if self._aborted:
892 892 raise KeyboardInterrupt()
893 893
894 894 return result
895 895
896 896 # This script may want to drop globs from lines matching these patterns on
897 897 # Windows, but check-code.py wants a glob on these lines unconditionally. Don't
898 898 # warn if that is the case for anything matching these lines.
899 899 checkcodeglobpats = [
900 900 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
901 901 re.compile(br'^moving \S+/.*[^)]$'),
902 902 re.compile(br'^pulling from \$TESTTMP/.*[^)]$')
903 903 ]
904 904
905 905 bchr = chr
906 906 if PYTHON3:
907 907 bchr = lambda x: bytes([x])
908 908
909 909 class TTest(Test):
910 910 """A "t test" is a test backed by a .t file."""
911 911
912 912 SKIPPED_PREFIX = 'skipped: '
913 913 FAILED_PREFIX = 'hghave check failed: '
914 914 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
915 915
916 916 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
917 917 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
918 918 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
919 919
920 920 @property
921 921 def refpath(self):
922 922 return os.path.join(self._testdir, self.bname)
923 923
924 924 def _run(self, env):
925 925 f = open(self.path, 'rb')
926 926 lines = f.readlines()
927 927 f.close()
928 928
929 929 salt, script, after, expected = self._parsetest(lines)
930 930
931 931 # Write out the generated script.
932 932 fname = b'%s.sh' % self._testtmp
933 933 f = open(fname, 'wb')
934 934 for l in script:
935 935 f.write(l)
936 936 f.close()
937 937
938 938 cmd = b'%s "%s"' % (self._shell, fname)
939 939 vlog("# Running", cmd)
940 940
941 941 exitcode, output = self._runcommand(cmd, env)
942 942
943 943 if self._aborted:
944 944 raise KeyboardInterrupt()
945 945
946 946 # Do not merge output if skipped. Return hghave message instead.
947 947 # Similarly, with --debug, output is None.
948 948 if exitcode == self.SKIPPED_STATUS or output is None:
949 949 return exitcode, output
950 950
951 951 return self._processoutput(exitcode, output, salt, after, expected)
952 952
953 953 def _hghave(self, reqs):
954 954 # TODO do something smarter when all other uses of hghave are gone.
955 955 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
956 956 tdir = runtestdir.replace(b'\\', b'/')
957 957 proc = Popen4(b'%s -c "%s/hghave %s"' %
958 958 (self._shell, tdir, b' '.join(reqs)),
959 959 self._testtmp, 0, self._getenv())
960 960 stdout, stderr = proc.communicate()
961 961 ret = proc.wait()
962 962 if wifexited(ret):
963 963 ret = os.WEXITSTATUS(ret)
964 964 if ret == 2:
965 965 print(stdout)
966 966 sys.exit(1)
967 967
968 968 if ret != 0:
969 969 return False, stdout
970 970
971 971 if 'slow' in reqs:
972 972 self._timeout = self._slowtimeout
973 973 return True, None
974 974
975 975 def _parsetest(self, lines):
976 976 # We generate a shell script which outputs unique markers to line
977 977 # up script results with our source. These markers include input
978 978 # line number and the last return code.
979 979 salt = b"SALT%d" % time.time()
980 980 def addsalt(line, inpython):
981 981 if inpython:
982 982 script.append(b'%s %d 0\n' % (salt, line))
983 983 else:
984 984 script.append(b'echo %s %d $?\n' % (salt, line))
985 985
986 986 script = []
987 987
988 988 # After we run the shell script, we re-unify the script output
989 989 # with non-active parts of the source, with synchronization by our
990 990 # SALT line number markers. The after table contains the non-active
991 991 # components, ordered by line number.
992 992 after = {}
993 993
994 994 # Expected shell script output.
995 995 expected = {}
996 996
997 997 pos = prepos = -1
998 998
999 999 # True or False when in a true or false conditional section
1000 1000 skipping = None
1001 1001
1002 1002 # We keep track of whether or not we're in a Python block so we
1003 1003 # can generate the surrounding doctest magic.
1004 1004 inpython = False
1005 1005
1006 1006 if self._debug:
1007 1007 script.append(b'set -x\n')
1008 1008 if self._hgcommand != b'hg':
1009 1009 script.append(b'alias hg="%s"\n' % self._hgcommand)
1010 1010 if os.getenv('MSYSTEM'):
1011 1011 script.append(b'alias pwd="pwd -W"\n')
1012 1012
1013 1013 for n, l in enumerate(lines):
1014 1014 if not l.endswith(b'\n'):
1015 1015 l += b'\n'
1016 1016 if l.startswith(b'#require'):
1017 1017 lsplit = l.split()
1018 1018 if len(lsplit) < 2 or lsplit[0] != b'#require':
1019 1019 after.setdefault(pos, []).append(' !!! invalid #require\n')
1020 1020 haveresult, message = self._hghave(lsplit[1:])
1021 1021 if not haveresult:
1022 1022 script = [b'echo "%s"\nexit 80\n' % message]
1023 1023 break
1024 1024 after.setdefault(pos, []).append(l)
1025 1025 elif l.startswith(b'#if'):
1026 1026 lsplit = l.split()
1027 1027 if len(lsplit) < 2 or lsplit[0] != b'#if':
1028 1028 after.setdefault(pos, []).append(' !!! invalid #if\n')
1029 1029 if skipping is not None:
1030 1030 after.setdefault(pos, []).append(' !!! nested #if\n')
1031 1031 skipping = not self._hghave(lsplit[1:])[0]
1032 1032 after.setdefault(pos, []).append(l)
1033 1033 elif l.startswith(b'#else'):
1034 1034 if skipping is None:
1035 1035 after.setdefault(pos, []).append(' !!! missing #if\n')
1036 1036 skipping = not skipping
1037 1037 after.setdefault(pos, []).append(l)
1038 1038 elif l.startswith(b'#endif'):
1039 1039 if skipping is None:
1040 1040 after.setdefault(pos, []).append(' !!! missing #if\n')
1041 1041 skipping = None
1042 1042 after.setdefault(pos, []).append(l)
1043 1043 elif skipping:
1044 1044 after.setdefault(pos, []).append(l)
1045 1045 elif l.startswith(b' >>> '): # python inlines
1046 1046 after.setdefault(pos, []).append(l)
1047 1047 prepos = pos
1048 1048 pos = n
1049 1049 if not inpython:
1050 1050 # We've just entered a Python block. Add the header.
1051 1051 inpython = True
1052 1052 addsalt(prepos, False) # Make sure we report the exit code.
1053 1053 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1054 1054 addsalt(n, True)
1055 1055 script.append(l[2:])
1056 1056 elif l.startswith(b' ... '): # python inlines
1057 1057 after.setdefault(prepos, []).append(l)
1058 1058 script.append(l[2:])
1059 1059 elif l.startswith(b' $ '): # commands
1060 1060 if inpython:
1061 1061 script.append(b'EOF\n')
1062 1062 inpython = False
1063 1063 after.setdefault(pos, []).append(l)
1064 1064 prepos = pos
1065 1065 pos = n
1066 1066 addsalt(n, False)
1067 1067 cmd = l[4:].split()
1068 1068 if len(cmd) == 2 and cmd[0] == b'cd':
1069 1069 l = b' $ cd %s || exit 1\n' % cmd[1]
1070 1070 script.append(l[4:])
1071 1071 elif l.startswith(b' > '): # continuations
1072 1072 after.setdefault(prepos, []).append(l)
1073 1073 script.append(l[4:])
1074 1074 elif l.startswith(b' '): # results
1075 1075 # Queue up a list of expected results.
1076 1076 expected.setdefault(pos, []).append(l[2:])
1077 1077 else:
1078 1078 if inpython:
1079 1079 script.append(b'EOF\n')
1080 1080 inpython = False
1081 1081 # Non-command/result. Queue up for merged output.
1082 1082 after.setdefault(pos, []).append(l)
1083 1083
1084 1084 if inpython:
1085 1085 script.append(b'EOF\n')
1086 1086 if skipping is not None:
1087 1087 after.setdefault(pos, []).append(' !!! missing #endif\n')
1088 1088 addsalt(n + 1, False)
1089 1089
1090 1090 return salt, script, after, expected
1091 1091
1092 1092 def _processoutput(self, exitcode, output, salt, after, expected):
1093 1093 # Merge the script output back into a unified test.
1094 1094 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1095 1095 if exitcode != 0:
1096 1096 warnonly = 3
1097 1097
1098 1098 pos = -1
1099 1099 postout = []
1100 1100 for l in output:
1101 1101 lout, lcmd = l, None
1102 1102 if salt in l:
1103 1103 lout, lcmd = l.split(salt, 1)
1104 1104
1105 1105 while lout:
1106 1106 if not lout.endswith(b'\n'):
1107 1107 lout += b' (no-eol)\n'
1108 1108
1109 1109 # Find the expected output at the current position.
1110 1110 el = None
1111 1111 if expected.get(pos, None):
1112 1112 el = expected[pos].pop(0)
1113 1113
1114 1114 r = TTest.linematch(el, lout)
1115 1115 if isinstance(r, str):
1116 1116 if r == '+glob':
1117 1117 lout = el[:-1] + ' (glob)\n'
1118 1118 r = '' # Warn only this line.
1119 1119 elif r == '-glob':
1120 1120 lout = ''.join(el.rsplit(' (glob)', 1))
1121 1121 r = '' # Warn only this line.
1122 1122 elif r == "retry":
1123 1123 postout.append(b' ' + el)
1124 1124 continue
1125 1125 else:
1126 1126 log('\ninfo, unknown linematch result: %r\n' % r)
1127 1127 r = False
1128 1128 if r:
1129 1129 postout.append(b' ' + el)
1130 1130 else:
1131 1131 if self.NEEDESCAPE(lout):
1132 1132 lout = TTest._stringescape(b'%s (esc)\n' %
1133 1133 lout.rstrip(b'\n'))
1134 1134 postout.append(b' ' + lout) # Let diff deal with it.
1135 1135 if r != '': # If line failed.
1136 1136 warnonly = 3 # for sure not
1137 1137 elif warnonly == 1: # Is "not yet" and line is warn only.
1138 1138 warnonly = 2 # Yes do warn.
1139 1139 break
1140 1140
1141 1141 # clean up any optional leftovers
1142 1142 while expected.get(pos, None):
1143 1143 el = expected[pos].pop(0)
1144 1144 if not el.endswith(b" (?)\n"):
1145 1145 expected[pos].insert(0, el)
1146 1146 break
1147 1147 postout.append(b' ' + el)
1148 1148
1149 1149 if lcmd:
1150 1150 # Add on last return code.
1151 1151 ret = int(lcmd.split()[1])
1152 1152 if ret != 0:
1153 1153 postout.append(b' [%d]\n' % ret)
1154 1154 if pos in after:
1155 1155 # Merge in non-active test bits.
1156 1156 postout += after.pop(pos)
1157 1157 pos = int(lcmd.split()[0])
1158 1158
1159 1159 if pos in after:
1160 1160 postout += after.pop(pos)
1161 1161
1162 1162 if warnonly == 2:
1163 1163 exitcode = False # Set exitcode to warned.
1164 1164
1165 1165 return exitcode, postout
1166 1166
1167 1167 @staticmethod
1168 1168 def rematch(el, l):
1169 1169 try:
1170 1170 # use \Z to ensure that the regex matches to the end of the string
1171 1171 if os.name == 'nt':
1172 1172 return re.match(el + br'\r?\n\Z', l)
1173 1173 return re.match(el + br'\n\Z', l)
1174 1174 except re.error:
1175 1175 # el is an invalid regex
1176 1176 return False
1177 1177
1178 1178 @staticmethod
1179 1179 def globmatch(el, l):
1180 1180 # The only supported special characters are * and ? plus / which also
1181 1181 # matches \ on windows. Escaping of these characters is supported.
1182 1182 if el + b'\n' == l:
1183 1183 if os.altsep:
1184 1184 # matching on "/" is not needed for this line
1185 1185 for pat in checkcodeglobpats:
1186 1186 if pat.match(el):
1187 1187 return True
1188 1188 return b'-glob'
1189 1189 return True
1190 1190 i, n = 0, len(el)
1191 1191 res = b''
1192 1192 while i < n:
1193 1193 c = el[i:i + 1]
1194 1194 i += 1
1195 1195 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1196 1196 res += el[i - 1:i + 1]
1197 1197 i += 1
1198 1198 elif c == b'*':
1199 1199 res += b'.*'
1200 1200 elif c == b'?':
1201 1201 res += b'.'
1202 1202 elif c == b'/' and os.altsep:
1203 1203 res += b'[/\\\\]'
1204 1204 else:
1205 1205 res += re.escape(c)
1206 1206 return TTest.rematch(res, l)
1207 1207
1208 1208 @staticmethod
1209 1209 def linematch(el, l):
1210 1210 retry = False
1211 1211 if el == l: # perfect match (fast)
1212 1212 return True
1213 1213 if el:
1214 1214 if el.endswith(b" (?)\n"):
1215 1215 retry = "retry"
1216 1216 el = el[:-5] + "\n"
1217 1217 if el.endswith(b" (esc)\n"):
1218 1218 if PYTHON3:
1219 1219 el = el[:-7].decode('unicode_escape') + '\n'
1220 1220 el = el.encode('utf-8')
1221 1221 else:
1222 1222 el = el[:-7].decode('string-escape') + '\n'
1223 1223 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1224 1224 return True
1225 1225 if el.endswith(b" (re)\n"):
1226 1226 return TTest.rematch(el[:-6], l) or retry
1227 1227 if el.endswith(b" (glob)\n"):
1228 1228 # ignore '(glob)' added to l by 'replacements'
1229 1229 if l.endswith(b" (glob)\n"):
1230 1230 l = l[:-8] + b"\n"
1231 1231 return TTest.globmatch(el[:-8], l)
1232 1232 if os.altsep and l.replace(b'\\', b'/') == el:
1233 1233 return b'+glob'
1234 1234 return retry
1235 1235
1236 1236 @staticmethod
1237 1237 def parsehghaveoutput(lines):
1238 1238 '''Parse hghave log lines.
1239 1239
1240 1240 Return tuple of lists (missing, failed):
1241 1241 * the missing/unknown features
1242 1242 * the features for which existence check failed'''
1243 1243 missing = []
1244 1244 failed = []
1245 1245 for line in lines:
1246 1246 if line.startswith(TTest.SKIPPED_PREFIX):
1247 1247 line = line.splitlines()[0]
1248 1248 missing.append(line[len(TTest.SKIPPED_PREFIX):])
1249 1249 elif line.startswith(TTest.FAILED_PREFIX):
1250 1250 line = line.splitlines()[0]
1251 1251 failed.append(line[len(TTest.FAILED_PREFIX):])
1252 1252
1253 1253 return missing, failed
1254 1254
1255 1255 @staticmethod
1256 1256 def _escapef(m):
1257 1257 return TTest.ESCAPEMAP[m.group(0)]
1258 1258
1259 1259 @staticmethod
1260 1260 def _stringescape(s):
1261 1261 return TTest.ESCAPESUB(TTest._escapef, s)
1262 1262
1263 1263 iolock = threading.RLock()
1264 1264
1265 1265 class SkipTest(Exception):
1266 1266 """Raised to indicate that a test is to be skipped."""
1267 1267
1268 1268 class IgnoreTest(Exception):
1269 1269 """Raised to indicate that a test is to be ignored."""
1270 1270
1271 1271 class WarnTest(Exception):
1272 1272 """Raised to indicate that a test warned."""
1273 1273
1274 1274 class ReportedTest(Exception):
1275 1275 """Raised to indicate that a test already reported."""
1276 1276
1277 1277 class TestResult(unittest._TextTestResult):
1278 1278 """Holds results when executing via unittest."""
1279 1279 # Don't worry too much about accessing the non-public _TextTestResult.
1280 1280 # It is relatively common in Python testing tools.
1281 1281 def __init__(self, options, *args, **kwargs):
1282 1282 super(TestResult, self).__init__(*args, **kwargs)
1283 1283
1284 1284 self._options = options
1285 1285
1286 1286 # unittest.TestResult didn't have skipped until 2.7. We need to
1287 1287 # polyfill it.
1288 1288 self.skipped = []
1289 1289
1290 1290 # We have a custom "ignored" result that isn't present in any Python
1291 1291 # unittest implementation. It is very similar to skipped. It may make
1292 1292 # sense to map it into skip some day.
1293 1293 self.ignored = []
1294 1294
1295 1295 # We have a custom "warned" result that isn't present in any Python
1296 1296 # unittest implementation. It is very similar to failed. It may make
1297 1297 # sense to map it into fail some day.
1298 1298 self.warned = []
1299 1299
1300 1300 self.times = []
1301 1301 self._firststarttime = None
1302 1302 # Data stored for the benefit of generating xunit reports.
1303 1303 self.successes = []
1304 1304 self.faildata = {}
1305 1305
1306 1306 def addFailure(self, test, reason):
1307 1307 self.failures.append((test, reason))
1308 1308
1309 1309 if self._options.first:
1310 1310 self.stop()
1311 1311 else:
1312 1312 with iolock:
1313 1313 if reason == "timed out":
1314 1314 self.stream.write('t')
1315 1315 else:
1316 1316 if not self._options.nodiff:
1317 1317 self.stream.write('\nERROR: %s output changed\n' % test)
1318 1318 self.stream.write('!')
1319 1319
1320 1320 self.stream.flush()
1321 1321
1322 1322 def addSuccess(self, test):
1323 1323 with iolock:
1324 1324 super(TestResult, self).addSuccess(test)
1325 1325 self.successes.append(test)
1326 1326
1327 1327 def addError(self, test, err):
1328 1328 super(TestResult, self).addError(test, err)
1329 1329 if self._options.first:
1330 1330 self.stop()
1331 1331
1332 1332 # Polyfill.
1333 1333 def addSkip(self, test, reason):
1334 1334 self.skipped.append((test, reason))
1335 1335 with iolock:
1336 1336 if self.showAll:
1337 1337 self.stream.writeln('skipped %s' % reason)
1338 1338 else:
1339 1339 self.stream.write('s')
1340 1340 self.stream.flush()
1341 1341
1342 1342 def addIgnore(self, test, reason):
1343 1343 self.ignored.append((test, reason))
1344 1344 with iolock:
1345 1345 if self.showAll:
1346 1346 self.stream.writeln('ignored %s' % reason)
1347 1347 else:
1348 1348 if reason not in ('not retesting', "doesn't match keyword"):
1349 1349 self.stream.write('i')
1350 1350 else:
1351 1351 self.testsRun += 1
1352 1352 self.stream.flush()
1353 1353
1354 1354 def addWarn(self, test, reason):
1355 1355 self.warned.append((test, reason))
1356 1356
1357 1357 if self._options.first:
1358 1358 self.stop()
1359 1359
1360 1360 with iolock:
1361 1361 if self.showAll:
1362 1362 self.stream.writeln('warned %s' % reason)
1363 1363 else:
1364 1364 self.stream.write('~')
1365 1365 self.stream.flush()
1366 1366
1367 1367 def addOutputMismatch(self, test, ret, got, expected):
1368 1368 """Record a mismatch in test output for a particular test."""
1369 1369 if self.shouldStop:
1370 1370 # don't print, some other test case already failed and
1371 1371 # printed, we're just stale and probably failed due to our
1372 1372 # temp dir getting cleaned up.
1373 1373 return
1374 1374
1375 1375 accepted = False
1376 1376 lines = []
1377 1377
1378 1378 with iolock:
1379 1379 if self._options.nodiff:
1380 1380 pass
1381 1381 elif self._options.view:
1382 1382 v = self._options.view
1383 1383 if PYTHON3:
1384 1384 v = _bytespath(v)
1385 1385 os.system(b"%s %s %s" %
1386 1386 (v, test.refpath, test.errpath))
1387 1387 else:
1388 1388 servefail, lines = getdiff(expected, got,
1389 1389 test.refpath, test.errpath)
1390 1390 if servefail:
1391 1391 self.addFailure(
1392 1392 test,
1393 1393 'server failed to start (HGPORT=%s)' % test._startport)
1394 1394 raise ReportedTest('server failed to start')
1395 1395 else:
1396 1396 self.stream.write('\n')
1397 1397 for line in lines:
1398 1398 if PYTHON3:
1399 1399 self.stream.flush()
1400 1400 self.stream.buffer.write(line)
1401 1401 self.stream.buffer.flush()
1402 1402 else:
1403 1403 self.stream.write(line)
1404 1404 self.stream.flush()
1405 1405
1406 1406 # handle interactive prompt without releasing iolock
1407 1407 if self._options.interactive:
1408 1408 self.stream.write('Accept this change? [n] ')
1409 1409 answer = sys.stdin.readline().strip()
1410 1410 if answer.lower() in ('y', 'yes'):
1411 1411 if test.name.endswith('.t'):
1412 1412 rename(test.errpath, test.path)
1413 1413 else:
1414 1414 rename(test.errpath, '%s.out' % test.path)
1415 1415 accepted = True
1416 1416 if not accepted:
1417 1417 self.faildata[test.name] = b''.join(lines)
1418 1418
1419 1419 return accepted
1420 1420
1421 1421 def startTest(self, test):
1422 1422 super(TestResult, self).startTest(test)
1423 1423
1424 1424 # os.times module computes the user time and system time spent by
1425 1425 # child's processes along with real elapsed time taken by a process.
1426 1426 # This module has one limitation. It can only work for Linux user
1427 1427 # and not for Windows.
1428 1428 test.started = os.times()
1429 1429 if self._firststarttime is None: # thread racy but irrelevant
1430 1430 self._firststarttime = test.started[4]
1431 1431
1432 1432 def stopTest(self, test, interrupted=False):
1433 1433 super(TestResult, self).stopTest(test)
1434 1434
1435 1435 test.stopped = os.times()
1436 1436
1437 1437 starttime = test.started
1438 1438 endtime = test.stopped
1439 1439 origin = self._firststarttime
1440 1440 self.times.append((test.name,
1441 1441 endtime[2] - starttime[2], # user space CPU time
1442 1442 endtime[3] - starttime[3], # sys space CPU time
1443 1443 endtime[4] - starttime[4], # real time
1444 1444 starttime[4] - origin, # start date in run context
1445 1445 endtime[4] - origin, # end date in run context
1446 1446 ))
1447 1447
1448 1448 if interrupted:
1449 1449 with iolock:
1450 1450 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1451 1451 test.name, self.times[-1][3]))
1452 1452
1453 1453 class TestSuite(unittest.TestSuite):
1454 1454 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1455 1455
1456 1456 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1457 1457 retest=False, keywords=None, loop=False, runs_per_test=1,
1458 1458 loadtest=None, showchannels=False,
1459 1459 *args, **kwargs):
1460 1460 """Create a new instance that can run tests with a configuration.
1461 1461
1462 1462 testdir specifies the directory where tests are executed from. This
1463 1463 is typically the ``tests`` directory from Mercurial's source
1464 1464 repository.
1465 1465
1466 1466 jobs specifies the number of jobs to run concurrently. Each test
1467 1467 executes on its own thread. Tests actually spawn new processes, so
1468 1468 state mutation should not be an issue.
1469 1469
1470 1470 If there is only one job, it will use the main thread.
1471 1471
1472 1472 whitelist and blacklist denote tests that have been whitelisted and
1473 1473 blacklisted, respectively. These arguments don't belong in TestSuite.
1474 1474 Instead, whitelist and blacklist should be handled by the thing that
1475 1475 populates the TestSuite with tests. They are present to preserve
1476 1476 backwards compatible behavior which reports skipped tests as part
1477 1477 of the results.
1478 1478
1479 1479 retest denotes whether to retest failed tests. This arguably belongs
1480 1480 outside of TestSuite.
1481 1481
1482 1482 keywords denotes key words that will be used to filter which tests
1483 1483 to execute. This arguably belongs outside of TestSuite.
1484 1484
1485 1485 loop denotes whether to loop over tests forever.
1486 1486 """
1487 1487 super(TestSuite, self).__init__(*args, **kwargs)
1488 1488
1489 1489 self._jobs = jobs
1490 1490 self._whitelist = whitelist
1491 1491 self._blacklist = blacklist
1492 1492 self._retest = retest
1493 1493 self._keywords = keywords
1494 1494 self._loop = loop
1495 1495 self._runs_per_test = runs_per_test
1496 1496 self._loadtest = loadtest
1497 1497 self._showchannels = showchannels
1498 1498
1499 1499 def run(self, result):
1500 1500 # We have a number of filters that need to be applied. We do this
1501 1501 # here instead of inside Test because it makes the running logic for
1502 1502 # Test simpler.
1503 1503 tests = []
1504 1504 num_tests = [0]
1505 1505 for test in self._tests:
1506 1506 def get():
1507 1507 num_tests[0] += 1
1508 1508 if getattr(test, 'should_reload', False):
1509 return self._loadtest(test.bname, num_tests[0])
1509 return self._loadtest(test.path, num_tests[0])
1510 1510 return test
1511 1511 if not os.path.exists(test.path):
1512 1512 result.addSkip(test, "Doesn't exist")
1513 1513 continue
1514 1514
1515 1515 if not (self._whitelist and test.name in self._whitelist):
1516 1516 if self._blacklist and test.bname in self._blacklist:
1517 1517 result.addSkip(test, 'blacklisted')
1518 1518 continue
1519 1519
1520 1520 if self._retest and not os.path.exists(test.errpath):
1521 1521 result.addIgnore(test, 'not retesting')
1522 1522 continue
1523 1523
1524 1524 if self._keywords:
1525 1525 f = open(test.path, 'rb')
1526 1526 t = f.read().lower() + test.bname.lower()
1527 1527 f.close()
1528 1528 ignored = False
1529 1529 for k in self._keywords.lower().split():
1530 1530 if k not in t:
1531 1531 result.addIgnore(test, "doesn't match keyword")
1532 1532 ignored = True
1533 1533 break
1534 1534
1535 1535 if ignored:
1536 1536 continue
1537 1537 for _ in xrange(self._runs_per_test):
1538 1538 tests.append(get())
1539 1539
1540 1540 runtests = list(tests)
1541 1541 done = queue.Queue()
1542 1542 running = 0
1543 1543
1544 1544 channels = [""] * self._jobs
1545 1545
1546 1546 def job(test, result):
1547 1547 for n, v in enumerate(channels):
1548 1548 if not v:
1549 1549 channel = n
1550 1550 break
1551 1551 channels[channel] = "=" + test.name[5:].split(".")[0]
1552 1552 try:
1553 1553 test(result)
1554 1554 done.put(None)
1555 1555 except KeyboardInterrupt:
1556 1556 pass
1557 1557 except: # re-raises
1558 1558 done.put(('!', test, 'run-test raised an error, see traceback'))
1559 1559 raise
1560 1560 try:
1561 1561 channels[channel] = ''
1562 1562 except IndexError:
1563 1563 pass
1564 1564
1565 1565 def stat():
1566 1566 count = 0
1567 1567 while channels:
1568 1568 d = '\n%03s ' % count
1569 1569 for n, v in enumerate(channels):
1570 1570 if v:
1571 1571 d += v[0]
1572 1572 channels[n] = v[1:] or '.'
1573 1573 else:
1574 1574 d += ' '
1575 1575 d += ' '
1576 1576 with iolock:
1577 1577 sys.stdout.write(d + ' ')
1578 1578 sys.stdout.flush()
1579 1579 for x in xrange(10):
1580 1580 if channels:
1581 1581 time.sleep(.1)
1582 1582 count += 1
1583 1583
1584 1584 stoppedearly = False
1585 1585
1586 1586 if self._showchannels:
1587 1587 statthread = threading.Thread(target=stat, name="stat")
1588 1588 statthread.start()
1589 1589
1590 1590 try:
1591 1591 while tests or running:
1592 1592 if not done.empty() or running == self._jobs or not tests:
1593 1593 try:
1594 1594 done.get(True, 1)
1595 1595 running -= 1
1596 1596 if result and result.shouldStop:
1597 1597 stoppedearly = True
1598 1598 break
1599 1599 except queue.Empty:
1600 1600 continue
1601 1601 if tests and not running == self._jobs:
1602 1602 test = tests.pop(0)
1603 1603 if self._loop:
1604 1604 if getattr(test, 'should_reload', False):
1605 1605 num_tests[0] += 1
1606 1606 tests.append(
1607 1607 self._loadtest(test.name, num_tests[0]))
1608 1608 else:
1609 1609 tests.append(test)
1610 1610 if self._jobs == 1:
1611 1611 job(test, result)
1612 1612 else:
1613 1613 t = threading.Thread(target=job, name=test.name,
1614 1614 args=(test, result))
1615 1615 t.start()
1616 1616 running += 1
1617 1617
1618 1618 # If we stop early we still need to wait on started tests to
1619 1619 # finish. Otherwise, there is a race between the test completing
1620 1620 # and the test's cleanup code running. This could result in the
1621 1621 # test reporting incorrect.
1622 1622 if stoppedearly:
1623 1623 while running:
1624 1624 try:
1625 1625 done.get(True, 1)
1626 1626 running -= 1
1627 1627 except queue.Empty:
1628 1628 continue
1629 1629 except KeyboardInterrupt:
1630 1630 for test in runtests:
1631 1631 test.abort()
1632 1632
1633 1633 channels = []
1634 1634
1635 1635 return result
1636 1636
1637 1637 # Save the most recent 5 wall-clock runtimes of each test to a
1638 1638 # human-readable text file named .testtimes. Tests are sorted
1639 1639 # alphabetically, while times for each test are listed from oldest to
1640 1640 # newest.
1641 1641
1642 1642 def loadtimes(testdir):
1643 1643 times = []
1644 1644 try:
1645 1645 with open(os.path.join(testdir, '.testtimes-')) as fp:
1646 1646 for line in fp:
1647 1647 ts = line.split()
1648 1648 times.append((ts[0], [float(t) for t in ts[1:]]))
1649 1649 except IOError as err:
1650 1650 if err.errno != errno.ENOENT:
1651 1651 raise
1652 1652 return times
1653 1653
1654 1654 def savetimes(testdir, result):
1655 1655 saved = dict(loadtimes(testdir))
1656 1656 maxruns = 5
1657 1657 skipped = set([str(t[0]) for t in result.skipped])
1658 1658 for tdata in result.times:
1659 1659 test, real = tdata[0], tdata[3]
1660 1660 if test not in skipped:
1661 1661 ts = saved.setdefault(test, [])
1662 1662 ts.append(real)
1663 1663 ts[:] = ts[-maxruns:]
1664 1664
1665 1665 fd, tmpname = tempfile.mkstemp(prefix='.testtimes',
1666 1666 dir=testdir, text=True)
1667 1667 with os.fdopen(fd, 'w') as fp:
1668 1668 for name, ts in sorted(saved.iteritems()):
1669 1669 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
1670 1670 timepath = os.path.join(testdir, '.testtimes')
1671 1671 try:
1672 1672 os.unlink(timepath)
1673 1673 except OSError:
1674 1674 pass
1675 1675 try:
1676 1676 os.rename(tmpname, timepath)
1677 1677 except OSError:
1678 1678 pass
1679 1679
1680 1680 class TextTestRunner(unittest.TextTestRunner):
1681 1681 """Custom unittest test runner that uses appropriate settings."""
1682 1682
1683 1683 def __init__(self, runner, *args, **kwargs):
1684 1684 super(TextTestRunner, self).__init__(*args, **kwargs)
1685 1685
1686 1686 self._runner = runner
1687 1687
1688 1688 def run(self, test):
1689 1689 result = TestResult(self._runner.options, self.stream,
1690 1690 self.descriptions, self.verbosity)
1691 1691
1692 1692 test(result)
1693 1693
1694 1694 failed = len(result.failures)
1695 1695 warned = len(result.warned)
1696 1696 skipped = len(result.skipped)
1697 1697 ignored = len(result.ignored)
1698 1698
1699 1699 with iolock:
1700 1700 self.stream.writeln('')
1701 1701
1702 1702 if not self._runner.options.noskips:
1703 1703 for test, msg in result.skipped:
1704 1704 self.stream.writeln('Skipped %s: %s' % (test.name, msg))
1705 1705 for test, msg in result.warned:
1706 1706 self.stream.writeln('Warned %s: %s' % (test.name, msg))
1707 1707 for test, msg in result.failures:
1708 1708 self.stream.writeln('Failed %s: %s' % (test.name, msg))
1709 1709 for test, msg in result.errors:
1710 1710 self.stream.writeln('Errored %s: %s' % (test.name, msg))
1711 1711
1712 1712 if self._runner.options.xunit:
1713 1713 with open(self._runner.options.xunit, 'wb') as xuf:
1714 1714 timesd = dict((t[0], t[3]) for t in result.times)
1715 1715 doc = minidom.Document()
1716 1716 s = doc.createElement('testsuite')
1717 1717 s.setAttribute('name', 'run-tests')
1718 1718 s.setAttribute('tests', str(result.testsRun))
1719 1719 s.setAttribute('errors', "0") # TODO
1720 1720 s.setAttribute('failures', str(failed))
1721 1721 s.setAttribute('skipped', str(skipped + ignored))
1722 1722 doc.appendChild(s)
1723 1723 for tc in result.successes:
1724 1724 t = doc.createElement('testcase')
1725 1725 t.setAttribute('name', tc.name)
1726 1726 t.setAttribute('time', '%.3f' % timesd[tc.name])
1727 1727 s.appendChild(t)
1728 1728 for tc, err in sorted(result.faildata.items()):
1729 1729 t = doc.createElement('testcase')
1730 1730 t.setAttribute('name', tc)
1731 1731 t.setAttribute('time', '%.3f' % timesd[tc])
1732 1732 # createCDATASection expects a unicode or it will
1733 1733 # convert using default conversion rules, which will
1734 1734 # fail if string isn't ASCII.
1735 1735 err = cdatasafe(err).decode('utf-8', 'replace')
1736 1736 cd = doc.createCDATASection(err)
1737 1737 t.appendChild(cd)
1738 1738 s.appendChild(t)
1739 1739 xuf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
1740 1740
1741 1741 if self._runner.options.json:
1742 1742 jsonpath = os.path.join(self._runner._testdir, 'report.json')
1743 1743 with open(jsonpath, 'w') as fp:
1744 1744 timesd = {}
1745 1745 for tdata in result.times:
1746 1746 test = tdata[0]
1747 1747 timesd[test] = tdata[1:]
1748 1748
1749 1749 outcome = {}
1750 1750 groups = [('success', ((tc, None)
1751 1751 for tc in result.successes)),
1752 1752 ('failure', result.failures),
1753 1753 ('skip', result.skipped)]
1754 1754 for res, testcases in groups:
1755 1755 for tc, __ in testcases:
1756 1756 if tc.name in timesd:
1757 1757 tres = {'result': res,
1758 1758 'time': ('%0.3f' % timesd[tc.name][2]),
1759 1759 'cuser': ('%0.3f' % timesd[tc.name][0]),
1760 1760 'csys': ('%0.3f' % timesd[tc.name][1]),
1761 1761 'start': ('%0.3f' % timesd[tc.name][3]),
1762 1762 'end': ('%0.3f' % timesd[tc.name][4]),
1763 1763 'diff': result.faildata.get(tc.name,
1764 1764 ''),
1765 1765 }
1766 1766 else:
1767 1767 # blacklisted test
1768 1768 tres = {'result': res}
1769 1769
1770 1770 outcome[tc.name] = tres
1771 1771 jsonout = json.dumps(outcome, sort_keys=True, indent=4)
1772 1772 fp.writelines(("testreport =", jsonout))
1773 1773
1774 1774 self._runner._checkhglib('Tested')
1775 1775
1776 1776 savetimes(self._runner._testdir, result)
1777 1777 self.stream.writeln(
1778 1778 '# Ran %d tests, %d skipped, %d warned, %d failed.'
1779 1779 % (result.testsRun,
1780 1780 skipped + ignored, warned, failed))
1781 1781 if failed:
1782 1782 self.stream.writeln('python hash seed: %s' %
1783 1783 os.environ['PYTHONHASHSEED'])
1784 1784 if self._runner.options.time:
1785 1785 self.printtimes(result.times)
1786 1786
1787 1787 return result
1788 1788
1789 1789 def printtimes(self, times):
1790 1790 # iolock held by run
1791 1791 self.stream.writeln('# Producing time report')
1792 1792 times.sort(key=lambda t: (t[3]))
1793 1793 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
1794 1794 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
1795 1795 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
1796 1796 for tdata in times:
1797 1797 test = tdata[0]
1798 1798 cuser, csys, real, start, end = tdata[1:6]
1799 1799 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
1800 1800
1801 1801 class TestRunner(object):
1802 1802 """Holds context for executing tests.
1803 1803
1804 1804 Tests rely on a lot of state. This object holds it for them.
1805 1805 """
1806 1806
1807 1807 # Programs required to run tests.
1808 1808 REQUIREDTOOLS = [
1809 1809 os.path.basename(_bytespath(sys.executable)),
1810 1810 b'diff',
1811 1811 b'grep',
1812 1812 b'unzip',
1813 1813 b'gunzip',
1814 1814 b'bunzip2',
1815 1815 b'sed',
1816 1816 ]
1817 1817
1818 1818 # Maps file extensions to test class.
1819 1819 TESTTYPES = [
1820 1820 (b'.py', PythonTest),
1821 1821 (b'.t', TTest),
1822 1822 ]
1823 1823
1824 1824 def __init__(self):
1825 1825 self.options = None
1826 1826 self._hgroot = None
1827 1827 self._testdir = None
1828 1828 self._hgtmp = None
1829 1829 self._installdir = None
1830 1830 self._bindir = None
1831 1831 self._tmpbinddir = None
1832 1832 self._pythondir = None
1833 1833 self._coveragefile = None
1834 1834 self._createdfiles = []
1835 1835 self._hgcommand = None
1836 1836 self._hgpath = None
1837 1837 self._chgsockdir = None
1838 1838 self._portoffset = 0
1839 1839 self._ports = {}
1840 1840
1841 1841 def run(self, args, parser=None):
1842 1842 """Run the test suite."""
1843 1843 oldmask = os.umask(0o22)
1844 1844 try:
1845 1845 parser = parser or getparser()
1846 1846 options, args = parseargs(args, parser)
1847 1847 # positional arguments are paths to test files to run, so
1848 1848 # we make sure they're all bytestrings
1849 1849 args = [_bytespath(a) for a in args]
1850 1850 self.options = options
1851 1851
1852 1852 self._checktools()
1853 1853 tests = self.findtests(args)
1854 1854 if options.profile_runner:
1855 1855 import statprof
1856 1856 statprof.start()
1857 1857 result = self._run(tests)
1858 1858 if options.profile_runner:
1859 1859 statprof.stop()
1860 1860 statprof.display()
1861 1861 return result
1862 1862
1863 1863 finally:
1864 1864 os.umask(oldmask)
1865 1865
1866 1866 def _run(self, tests):
1867 1867 if self.options.random:
1868 1868 random.shuffle(tests)
1869 1869 else:
1870 1870 # keywords for slow tests
1871 1871 slow = {b'svn': 10,
1872 1872 b'cvs': 10,
1873 1873 b'hghave': 10,
1874 1874 b'largefiles-update': 10,
1875 1875 b'run-tests': 10,
1876 1876 b'corruption': 10,
1877 1877 b'race': 10,
1878 1878 b'i18n': 10,
1879 1879 b'check': 100,
1880 1880 b'gendoc': 100,
1881 1881 b'contrib-perf': 200,
1882 1882 }
1883 1883 perf = {}
1884 1884 def sortkey(f):
1885 1885 # run largest tests first, as they tend to take the longest
1886 1886 try:
1887 1887 return perf[f]
1888 1888 except KeyError:
1889 1889 try:
1890 1890 val = -os.stat(f).st_size
1891 1891 except OSError as e:
1892 1892 if e.errno != errno.ENOENT:
1893 1893 raise
1894 1894 perf[f] = -1e9 # file does not exist, tell early
1895 1895 return -1e9
1896 1896 for kw, mul in slow.items():
1897 1897 if kw in f:
1898 1898 val *= mul
1899 1899 if f.endswith(b'.py'):
1900 1900 val /= 10.0
1901 1901 perf[f] = val / 1000.0
1902 1902 return perf[f]
1903 1903 tests.sort(key=sortkey)
1904 1904
1905 1905 self._testdir = osenvironb[b'TESTDIR'] = getattr(
1906 1906 os, 'getcwdb', os.getcwd)()
1907 1907
1908 1908 if 'PYTHONHASHSEED' not in os.environ:
1909 1909 # use a random python hash seed all the time
1910 1910 # we do the randomness ourself to know what seed is used
1911 1911 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
1912 1912
1913 1913 if self.options.tmpdir:
1914 1914 self.options.keep_tmpdir = True
1915 1915 tmpdir = _bytespath(self.options.tmpdir)
1916 1916 if os.path.exists(tmpdir):
1917 1917 # Meaning of tmpdir has changed since 1.3: we used to create
1918 1918 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
1919 1919 # tmpdir already exists.
1920 1920 print("error: temp dir %r already exists" % tmpdir)
1921 1921 return 1
1922 1922
1923 1923 # Automatically removing tmpdir sounds convenient, but could
1924 1924 # really annoy anyone in the habit of using "--tmpdir=/tmp"
1925 1925 # or "--tmpdir=$HOME".
1926 1926 #vlog("# Removing temp dir", tmpdir)
1927 1927 #shutil.rmtree(tmpdir)
1928 1928 os.makedirs(tmpdir)
1929 1929 else:
1930 1930 d = None
1931 1931 if os.name == 'nt':
1932 1932 # without this, we get the default temp dir location, but
1933 1933 # in all lowercase, which causes troubles with paths (issue3490)
1934 1934 d = osenvironb.get(b'TMP', None)
1935 1935 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
1936 1936
1937 1937 self._hgtmp = osenvironb[b'HGTMP'] = (
1938 1938 os.path.realpath(tmpdir))
1939 1939
1940 1940 if self.options.with_hg:
1941 1941 self._installdir = None
1942 1942 whg = self.options.with_hg
1943 1943 self._bindir = os.path.dirname(os.path.realpath(whg))
1944 1944 assert isinstance(self._bindir, bytes)
1945 1945 self._hgcommand = os.path.basename(whg)
1946 1946 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
1947 1947 os.makedirs(self._tmpbindir)
1948 1948
1949 1949 # This looks redundant with how Python initializes sys.path from
1950 1950 # the location of the script being executed. Needed because the
1951 1951 # "hg" specified by --with-hg is not the only Python script
1952 1952 # executed in the test suite that needs to import 'mercurial'
1953 1953 # ... which means it's not really redundant at all.
1954 1954 self._pythondir = self._bindir
1955 1955 else:
1956 1956 self._installdir = os.path.join(self._hgtmp, b"install")
1957 1957 self._bindir = os.path.join(self._installdir, b"bin")
1958 1958 self._hgcommand = b'hg'
1959 1959 self._tmpbindir = self._bindir
1960 1960 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
1961 1961
1962 1962 # set up crafted chg environment, then replace "hg" command by "chg"
1963 1963 chgbindir = self._bindir
1964 1964 if self.options.chg or self.options.with_chg:
1965 1965 self._chgsockdir = d = os.path.join(self._hgtmp, b'chgsock')
1966 1966 os.mkdir(d)
1967 1967 osenvironb[b'CHGSOCKNAME'] = os.path.join(d, b"server")
1968 1968 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
1969 1969 if self.options.chg:
1970 1970 self._hgcommand = b'chg'
1971 1971 elif self.options.with_chg:
1972 1972 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
1973 1973 self._hgcommand = os.path.basename(self.options.with_chg)
1974 1974
1975 1975 osenvironb[b"BINDIR"] = self._bindir
1976 1976 osenvironb[b"PYTHON"] = PYTHON
1977 1977
1978 1978 fileb = _bytespath(__file__)
1979 1979 runtestdir = os.path.abspath(os.path.dirname(fileb))
1980 1980 osenvironb[b'RUNTESTDIR'] = runtestdir
1981 1981 if PYTHON3:
1982 1982 sepb = _bytespath(os.pathsep)
1983 1983 else:
1984 1984 sepb = os.pathsep
1985 1985 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
1986 1986 if os.path.islink(__file__):
1987 1987 # test helper will likely be at the end of the symlink
1988 1988 realfile = os.path.realpath(fileb)
1989 1989 realdir = os.path.abspath(os.path.dirname(realfile))
1990 1990 path.insert(2, realdir)
1991 1991 if chgbindir != self._bindir:
1992 1992 path.insert(1, chgbindir)
1993 1993 if self._testdir != runtestdir:
1994 1994 path = [self._testdir] + path
1995 1995 if self._tmpbindir != self._bindir:
1996 1996 path = [self._tmpbindir] + path
1997 1997 osenvironb[b"PATH"] = sepb.join(path)
1998 1998
1999 1999 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2000 2000 # can run .../tests/run-tests.py test-foo where test-foo
2001 2001 # adds an extension to HGRC. Also include run-test.py directory to
2002 2002 # import modules like heredoctest.
2003 2003 pypath = [self._pythondir, self._testdir, runtestdir]
2004 2004 # We have to augment PYTHONPATH, rather than simply replacing
2005 2005 # it, in case external libraries are only available via current
2006 2006 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2007 2007 # are in /opt/subversion.)
2008 2008 oldpypath = osenvironb.get(IMPL_PATH)
2009 2009 if oldpypath:
2010 2010 pypath.append(oldpypath)
2011 2011 osenvironb[IMPL_PATH] = sepb.join(pypath)
2012 2012
2013 2013 if self.options.pure:
2014 2014 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2015 2015
2016 2016 if self.options.allow_slow_tests:
2017 2017 os.environ["HGTEST_SLOW"] = "slow"
2018 2018 elif 'HGTEST_SLOW' in os.environ:
2019 2019 del os.environ['HGTEST_SLOW']
2020 2020
2021 2021 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2022 2022
2023 2023 vlog("# Using TESTDIR", self._testdir)
2024 2024 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2025 2025 vlog("# Using HGTMP", self._hgtmp)
2026 2026 vlog("# Using PATH", os.environ["PATH"])
2027 2027 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2028 2028
2029 2029 try:
2030 2030 return self._runtests(tests) or 0
2031 2031 finally:
2032 2032 time.sleep(.1)
2033 2033 self._cleanup()
2034 2034
2035 2035 def findtests(self, args):
2036 2036 """Finds possible test files from arguments.
2037 2037
2038 2038 If you wish to inject custom tests into the test harness, this would
2039 2039 be a good function to monkeypatch or override in a derived class.
2040 2040 """
2041 2041 if not args:
2042 2042 if self.options.changed:
2043 2043 proc = Popen4('hg st --rev "%s" -man0 .' %
2044 2044 self.options.changed, None, 0)
2045 2045 stdout, stderr = proc.communicate()
2046 2046 args = stdout.strip(b'\0').split(b'\0')
2047 2047 else:
2048 2048 args = os.listdir(b'.')
2049 2049
2050 2050 return [t for t in args
2051 2051 if os.path.basename(t).startswith(b'test-')
2052 2052 and (t.endswith(b'.py') or t.endswith(b'.t'))]
2053 2053
2054 2054 def _runtests(self, tests):
2055 2055 try:
2056 2056 if self._installdir:
2057 2057 self._installhg()
2058 2058 self._checkhglib("Testing")
2059 2059 else:
2060 2060 self._usecorrectpython()
2061 2061 if self.options.chg:
2062 2062 assert self._installdir
2063 2063 self._installchg()
2064 2064
2065 2065 if self.options.restart:
2066 2066 orig = list(tests)
2067 2067 while tests:
2068 2068 if os.path.exists(tests[0] + ".err"):
2069 2069 break
2070 2070 tests.pop(0)
2071 2071 if not tests:
2072 2072 print("running all tests")
2073 2073 tests = orig
2074 2074
2075 2075 tests = [self._gettest(t, i) for i, t in enumerate(tests)]
2076 2076
2077 2077 failed = False
2078 2078 warned = False
2079 2079 kws = self.options.keywords
2080 2080 if kws is not None and PYTHON3:
2081 2081 kws = kws.encode('utf-8')
2082 2082
2083 2083 suite = TestSuite(self._testdir,
2084 2084 jobs=self.options.jobs,
2085 2085 whitelist=self.options.whitelisted,
2086 2086 blacklist=self.options.blacklist,
2087 2087 retest=self.options.retest,
2088 2088 keywords=kws,
2089 2089 loop=self.options.loop,
2090 2090 runs_per_test=self.options.runs_per_test,
2091 2091 showchannels=self.options.showchannels,
2092 2092 tests=tests, loadtest=self._gettest)
2093 2093 verbosity = 1
2094 2094 if self.options.verbose:
2095 2095 verbosity = 2
2096 2096 runner = TextTestRunner(self, verbosity=verbosity)
2097 2097 result = runner.run(suite)
2098 2098
2099 2099 if result.failures:
2100 2100 failed = True
2101 2101 if result.warned:
2102 2102 warned = True
2103 2103
2104 2104 if self.options.anycoverage:
2105 2105 self._outputcoverage()
2106 2106 except KeyboardInterrupt:
2107 2107 failed = True
2108 2108 print("\ninterrupted!")
2109 2109
2110 2110 if failed:
2111 2111 return 1
2112 2112 if warned:
2113 2113 return 80
2114 2114
2115 2115 def _getport(self, count):
2116 2116 port = self._ports.get(count) # do we have a cached entry?
2117 2117 if port is None:
2118 2118 portneeded = 3
2119 2119 # above 100 tries we just give up and let test reports failure
2120 2120 for tries in xrange(100):
2121 2121 allfree = True
2122 2122 port = self.options.port + self._portoffset
2123 2123 for idx in xrange(portneeded):
2124 2124 if not checkportisavailable(port + idx):
2125 2125 allfree = False
2126 2126 break
2127 2127 self._portoffset += portneeded
2128 2128 if allfree:
2129 2129 break
2130 2130 self._ports[count] = port
2131 2131 return port
2132 2132
2133 2133 def _gettest(self, test, count):
2134 2134 """Obtain a Test by looking at its filename.
2135 2135
2136 2136 Returns a Test instance. The Test may not be runnable if it doesn't
2137 2137 map to a known type.
2138 2138 """
2139 2139 lctest = test.lower()
2140 2140 testcls = Test
2141 2141
2142 2142 for ext, cls in self.TESTTYPES:
2143 2143 if lctest.endswith(ext):
2144 2144 testcls = cls
2145 2145 break
2146 2146
2147 2147 refpath = os.path.join(self._testdir, test)
2148 2148 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2149 2149
2150 2150 t = testcls(refpath, tmpdir,
2151 2151 keeptmpdir=self.options.keep_tmpdir,
2152 2152 debug=self.options.debug,
2153 2153 timeout=self.options.timeout,
2154 2154 startport=self._getport(count),
2155 2155 extraconfigopts=self.options.extra_config_opt,
2156 2156 py3kwarnings=self.options.py3k_warnings,
2157 2157 shell=self.options.shell,
2158 2158 hgcommand=self._hgcommand)
2159 2159 t.should_reload = True
2160 2160 return t
2161 2161
2162 2162 def _cleanup(self):
2163 2163 """Clean up state from this test invocation."""
2164 2164 if self._chgsockdir:
2165 2165 self._killchgdaemons()
2166 2166
2167 2167 if self.options.keep_tmpdir:
2168 2168 return
2169 2169
2170 2170 vlog("# Cleaning up HGTMP", self._hgtmp)
2171 2171 shutil.rmtree(self._hgtmp, True)
2172 2172 for f in self._createdfiles:
2173 2173 try:
2174 2174 os.remove(f)
2175 2175 except OSError:
2176 2176 pass
2177 2177
2178 2178 def _usecorrectpython(self):
2179 2179 """Configure the environment to use the appropriate Python in tests."""
2180 2180 # Tests must use the same interpreter as us or bad things will happen.
2181 2181 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2182 2182 if getattr(os, 'symlink', None):
2183 2183 vlog("# Making python executable in test path a symlink to '%s'" %
2184 2184 sys.executable)
2185 2185 mypython = os.path.join(self._tmpbindir, pyexename)
2186 2186 try:
2187 2187 if os.readlink(mypython) == sys.executable:
2188 2188 return
2189 2189 os.unlink(mypython)
2190 2190 except OSError as err:
2191 2191 if err.errno != errno.ENOENT:
2192 2192 raise
2193 2193 if self._findprogram(pyexename) != sys.executable:
2194 2194 try:
2195 2195 os.symlink(sys.executable, mypython)
2196 2196 self._createdfiles.append(mypython)
2197 2197 except OSError as err:
2198 2198 # child processes may race, which is harmless
2199 2199 if err.errno != errno.EEXIST:
2200 2200 raise
2201 2201 else:
2202 2202 exedir, exename = os.path.split(sys.executable)
2203 2203 vlog("# Modifying search path to find %s as %s in '%s'" %
2204 2204 (exename, pyexename, exedir))
2205 2205 path = os.environ['PATH'].split(os.pathsep)
2206 2206 while exedir in path:
2207 2207 path.remove(exedir)
2208 2208 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2209 2209 if not self._findprogram(pyexename):
2210 2210 print("WARNING: Cannot find %s in search path" % pyexename)
2211 2211
2212 2212 def _installhg(self):
2213 2213 """Install hg into the test environment.
2214 2214
2215 2215 This will also configure hg with the appropriate testing settings.
2216 2216 """
2217 2217 vlog("# Performing temporary installation of HG")
2218 2218 installerrs = os.path.join(b"tests", b"install.err")
2219 2219 compiler = ''
2220 2220 if self.options.compiler:
2221 2221 compiler = '--compiler ' + self.options.compiler
2222 2222 if self.options.pure:
2223 2223 pure = b"--pure"
2224 2224 else:
2225 2225 pure = b""
2226 2226 py3 = ''
2227 2227
2228 2228 # Run installer in hg root
2229 2229 script = os.path.realpath(sys.argv[0])
2230 2230 exe = sys.executable
2231 2231 if PYTHON3:
2232 2232 py3 = b'--c2to3'
2233 2233 compiler = _bytespath(compiler)
2234 2234 script = _bytespath(script)
2235 2235 exe = _bytespath(exe)
2236 2236 hgroot = os.path.dirname(os.path.dirname(script))
2237 2237 self._hgroot = hgroot
2238 2238 os.chdir(hgroot)
2239 2239 nohome = b'--home=""'
2240 2240 if os.name == 'nt':
2241 2241 # The --home="" trick works only on OS where os.sep == '/'
2242 2242 # because of a distutils convert_path() fast-path. Avoid it at
2243 2243 # least on Windows for now, deal with .pydistutils.cfg bugs
2244 2244 # when they happen.
2245 2245 nohome = b''
2246 2246 cmd = (b'%(exe)s setup.py %(py3)s %(pure)s clean --all'
2247 2247 b' build %(compiler)s --build-base="%(base)s"'
2248 2248 b' install --force --prefix="%(prefix)s"'
2249 2249 b' --install-lib="%(libdir)s"'
2250 2250 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2251 2251 % {b'exe': exe, b'py3': py3, b'pure': pure,
2252 2252 b'compiler': compiler,
2253 2253 b'base': os.path.join(self._hgtmp, b"build"),
2254 2254 b'prefix': self._installdir, b'libdir': self._pythondir,
2255 2255 b'bindir': self._bindir,
2256 2256 b'nohome': nohome, b'logfile': installerrs})
2257 2257
2258 2258 # setuptools requires install directories to exist.
2259 2259 def makedirs(p):
2260 2260 try:
2261 2261 os.makedirs(p)
2262 2262 except OSError as e:
2263 2263 if e.errno != errno.EEXIST:
2264 2264 raise
2265 2265 makedirs(self._pythondir)
2266 2266 makedirs(self._bindir)
2267 2267
2268 2268 vlog("# Running", cmd)
2269 2269 if os.system(cmd) == 0:
2270 2270 if not self.options.verbose:
2271 2271 try:
2272 2272 os.remove(installerrs)
2273 2273 except OSError as e:
2274 2274 if e.errno != errno.ENOENT:
2275 2275 raise
2276 2276 else:
2277 2277 f = open(installerrs, 'rb')
2278 2278 for line in f:
2279 2279 if PYTHON3:
2280 2280 sys.stdout.buffer.write(line)
2281 2281 else:
2282 2282 sys.stdout.write(line)
2283 2283 f.close()
2284 2284 sys.exit(1)
2285 2285 os.chdir(self._testdir)
2286 2286
2287 2287 self._usecorrectpython()
2288 2288
2289 2289 if self.options.py3k_warnings and not self.options.anycoverage:
2290 2290 vlog("# Updating hg command to enable Py3k Warnings switch")
2291 2291 f = open(os.path.join(self._bindir, 'hg'), 'rb')
2292 2292 lines = [line.rstrip() for line in f]
2293 2293 lines[0] += ' -3'
2294 2294 f.close()
2295 2295 f = open(os.path.join(self._bindir, 'hg'), 'wb')
2296 2296 for line in lines:
2297 2297 f.write(line + '\n')
2298 2298 f.close()
2299 2299
2300 2300 hgbat = os.path.join(self._bindir, b'hg.bat')
2301 2301 if os.path.isfile(hgbat):
2302 2302 # hg.bat expects to be put in bin/scripts while run-tests.py
2303 2303 # installation layout put it in bin/ directly. Fix it
2304 2304 f = open(hgbat, 'rb')
2305 2305 data = f.read()
2306 2306 f.close()
2307 2307 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2308 2308 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2309 2309 b'"%~dp0python" "%~dp0hg" %*')
2310 2310 f = open(hgbat, 'wb')
2311 2311 f.write(data)
2312 2312 f.close()
2313 2313 else:
2314 2314 print('WARNING: cannot fix hg.bat reference to python.exe')
2315 2315
2316 2316 if self.options.anycoverage:
2317 2317 custom = os.path.join(self._testdir, 'sitecustomize.py')
2318 2318 target = os.path.join(self._pythondir, 'sitecustomize.py')
2319 2319 vlog('# Installing coverage trigger to %s' % target)
2320 2320 shutil.copyfile(custom, target)
2321 2321 rc = os.path.join(self._testdir, '.coveragerc')
2322 2322 vlog('# Installing coverage rc to %s' % rc)
2323 2323 os.environ['COVERAGE_PROCESS_START'] = rc
2324 2324 covdir = os.path.join(self._installdir, '..', 'coverage')
2325 2325 try:
2326 2326 os.mkdir(covdir)
2327 2327 except OSError as e:
2328 2328 if e.errno != errno.EEXIST:
2329 2329 raise
2330 2330
2331 2331 os.environ['COVERAGE_DIR'] = covdir
2332 2332
2333 2333 def _checkhglib(self, verb):
2334 2334 """Ensure that the 'mercurial' package imported by python is
2335 2335 the one we expect it to be. If not, print a warning to stderr."""
2336 2336 if ((self._bindir == self._pythondir) and
2337 2337 (self._bindir != self._tmpbindir)):
2338 2338 # The pythondir has been inferred from --with-hg flag.
2339 2339 # We cannot expect anything sensible here.
2340 2340 return
2341 2341 expecthg = os.path.join(self._pythondir, b'mercurial')
2342 2342 actualhg = self._gethgpath()
2343 2343 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2344 2344 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2345 2345 ' (expected %s)\n'
2346 2346 % (verb, actualhg, expecthg))
2347 2347 def _gethgpath(self):
2348 2348 """Return the path to the mercurial package that is actually found by
2349 2349 the current Python interpreter."""
2350 2350 if self._hgpath is not None:
2351 2351 return self._hgpath
2352 2352
2353 2353 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2354 2354 cmd = cmd % PYTHON
2355 2355 if PYTHON3:
2356 2356 cmd = _strpath(cmd)
2357 2357 pipe = os.popen(cmd)
2358 2358 try:
2359 2359 self._hgpath = _bytespath(pipe.read().strip())
2360 2360 finally:
2361 2361 pipe.close()
2362 2362
2363 2363 return self._hgpath
2364 2364
2365 2365 def _installchg(self):
2366 2366 """Install chg into the test environment"""
2367 2367 vlog('# Performing temporary installation of CHG')
2368 2368 assert os.path.dirname(self._bindir) == self._installdir
2369 2369 assert self._hgroot, 'must be called after _installhg()'
2370 2370 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
2371 2371 % {b'make': 'make', # TODO: switch by option or environment?
2372 2372 b'prefix': self._installdir})
2373 2373 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
2374 2374 vlog("# Running", cmd)
2375 2375 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
2376 2376 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
2377 2377 stderr=subprocess.STDOUT)
2378 2378 out, _err = proc.communicate()
2379 2379 if proc.returncode != 0:
2380 2380 if PYTHON3:
2381 2381 sys.stdout.buffer.write(out)
2382 2382 else:
2383 2383 sys.stdout.write(out)
2384 2384 sys.exit(1)
2385 2385
2386 2386 def _killchgdaemons(self):
2387 2387 """Kill all background chg command servers spawned by tests"""
2388 2388 for f in os.listdir(self._chgsockdir):
2389 2389 if not f.endswith(b'.pid'):
2390 2390 continue
2391 2391 killdaemons(os.path.join(self._chgsockdir, f))
2392 2392
2393 2393 def _outputcoverage(self):
2394 2394 """Produce code coverage output."""
2395 2395 from coverage import coverage
2396 2396
2397 2397 vlog('# Producing coverage report')
2398 2398 # chdir is the easiest way to get short, relative paths in the
2399 2399 # output.
2400 2400 os.chdir(self._hgroot)
2401 2401 covdir = os.path.join(self._installdir, '..', 'coverage')
2402 2402 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2403 2403
2404 2404 # Map install directory paths back to source directory.
2405 2405 cov.config.paths['srcdir'] = ['.', self._pythondir]
2406 2406
2407 2407 cov.combine()
2408 2408
2409 2409 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2410 2410 cov.report(ignore_errors=True, omit=omit)
2411 2411
2412 2412 if self.options.htmlcov:
2413 2413 htmldir = os.path.join(self._testdir, 'htmlcov')
2414 2414 cov.html_report(directory=htmldir, omit=omit)
2415 2415 if self.options.annotate:
2416 2416 adir = os.path.join(self._testdir, 'annotated')
2417 2417 if not os.path.isdir(adir):
2418 2418 os.mkdir(adir)
2419 2419 cov.annotate(directory=adir, omit=omit)
2420 2420
2421 2421 def _findprogram(self, program):
2422 2422 """Search PATH for a executable program"""
2423 2423 dpb = _bytespath(os.defpath)
2424 2424 sepb = _bytespath(os.pathsep)
2425 2425 for p in osenvironb.get(b'PATH', dpb).split(sepb):
2426 2426 name = os.path.join(p, program)
2427 2427 if os.name == 'nt' or os.access(name, os.X_OK):
2428 2428 return name
2429 2429 return None
2430 2430
2431 2431 def _checktools(self):
2432 2432 """Ensure tools required to run tests are present."""
2433 2433 for p in self.REQUIREDTOOLS:
2434 2434 if os.name == 'nt' and not p.endswith('.exe'):
2435 2435 p += '.exe'
2436 2436 found = self._findprogram(p)
2437 2437 if found:
2438 2438 vlog("# Found prerequisite", p, "at", found)
2439 2439 else:
2440 2440 print("WARNING: Did not find prerequisite tool: %s " % p)
2441 2441
2442 2442 if __name__ == '__main__':
2443 2443 runner = TestRunner()
2444 2444
2445 2445 try:
2446 2446 import msvcrt
2447 2447 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
2448 2448 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2449 2449 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
2450 2450 except ImportError:
2451 2451 pass
2452 2452
2453 2453 sys.exit(runner.run(sys.argv[1:]))
@@ -1,720 +1,730 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ unset HGTEST_JOBS
6 6 $ unset HGTEST_TIMEOUT
7 7 $ unset HGTEST_PORT
8 8 $ unset HGTEST_SHELL
9 9
10 10 Smoke test with install
11 11 ============
12 12
13 13 $ run-tests.py $HGTEST_RUN_TESTS_PURE -l
14 14
15 15 # Ran 0 tests, 0 skipped, 0 warned, 0 failed.
16 16
17 17 Define a helper to avoid the install step
18 18 =============
19 19 $ rt()
20 20 > {
21 21 > run-tests.py --with-hg=`which hg` "$@"
22 22 > }
23 23
24 24 error paths
25 25
26 26 #if symlink
27 27 $ ln -s `which true` hg
28 28 $ run-tests.py --with-hg=./hg
29 29 warning: --with-hg should specify an hg script
30 30
31 31 # Ran 0 tests, 0 skipped, 0 warned, 0 failed.
32 32 $ rm hg
33 33 #endif
34 34
35 35 #if execbit
36 36 $ touch hg
37 37 $ run-tests.py --with-hg=./hg
38 38 Usage: run-tests.py [options] [tests]
39 39
40 40 run-tests.py: error: --with-hg must specify an executable hg script
41 41 [2]
42 42 $ rm hg
43 43 #endif
44 44
45 45 a succesful test
46 46 =======================
47 47
48 48 $ cat > test-success.t << EOF
49 49 > $ echo babar
50 50 > babar
51 51 > $ echo xyzzy
52 52 > never happens (?)
53 53 > xyzzy
54 54 > nor this (?)
55 55 > EOF
56 56
57 57 $ rt
58 58 .
59 59 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
60 60
61 61 failing test
62 62 ==================
63 63
64 64 $ cat > test-failure.t << EOF
65 65 > $ echo babar
66 66 > rataxes
67 67 > This is a noop statement so that
68 68 > this test is still more bytes than success.
69 69 > EOF
70 70
71 71 >>> fh = open('test-failure-unicode.t', 'wb')
72 72 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
73 73 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
74 74
75 75 $ rt
76 76
77 77 --- $TESTTMP/test-failure.t
78 78 +++ $TESTTMP/test-failure.t.err
79 79 @@ -1,4 +1,4 @@
80 80 $ echo babar
81 81 - rataxes
82 82 + babar
83 83 This is a noop statement so that
84 84 this test is still more bytes than success.
85 85
86 86 ERROR: test-failure.t output changed
87 87 !.
88 88 --- $TESTTMP/test-failure-unicode.t
89 89 +++ $TESTTMP/test-failure-unicode.t.err
90 90 @@ -1,2 +1,2 @@
91 91 $ echo babar\xce\xb1 (esc)
92 92 - l\xce\xb5\xce\xb5t (esc)
93 93 + babar\xce\xb1 (esc)
94 94
95 95 ERROR: test-failure-unicode.t output changed
96 96 !
97 97 Failed test-failure.t: output changed
98 98 Failed test-failure-unicode.t: output changed
99 99 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
100 100 python hash seed: * (glob)
101 101 [1]
102 102
103 103 test --xunit support
104 104 $ rt --xunit=xunit.xml
105 105
106 106 --- $TESTTMP/test-failure.t
107 107 +++ $TESTTMP/test-failure.t.err
108 108 @@ -1,4 +1,4 @@
109 109 $ echo babar
110 110 - rataxes
111 111 + babar
112 112 This is a noop statement so that
113 113 this test is still more bytes than success.
114 114
115 115 ERROR: test-failure.t output changed
116 116 !.
117 117 --- $TESTTMP/test-failure-unicode.t
118 118 +++ $TESTTMP/test-failure-unicode.t.err
119 119 @@ -1,2 +1,2 @@
120 120 $ echo babar\xce\xb1 (esc)
121 121 - l\xce\xb5\xce\xb5t (esc)
122 122 + babar\xce\xb1 (esc)
123 123
124 124 ERROR: test-failure-unicode.t output changed
125 125 !
126 126 Failed test-failure.t: output changed
127 127 Failed test-failure-unicode.t: output changed
128 128 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
129 129 python hash seed: * (glob)
130 130 [1]
131 131 $ cat xunit.xml
132 132 <?xml version="1.0" encoding="utf-8"?>
133 133 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
134 134 <testcase name="test-success.t" time="*"/> (glob)
135 135 <testcase name="test-failure-unicode.t" time="*"> (glob)
136 136 <![CDATA[--- $TESTTMP/test-failure-unicode.t
137 137 +++ $TESTTMP/test-failure-unicode.t.err
138 138 @@ -1,2 +1,2 @@
139 139 $ echo babar\xce\xb1 (esc)
140 140 - l\xce\xb5\xce\xb5t (esc)
141 141 + babar\xce\xb1 (esc)
142 142 ]]> </testcase>
143 143 <testcase name="test-failure.t" time="*"> (glob)
144 144 <![CDATA[--- $TESTTMP/test-failure.t
145 145 +++ $TESTTMP/test-failure.t.err
146 146 @@ -1,4 +1,4 @@
147 147 $ echo babar
148 148 - rataxes
149 149 + babar
150 150 This is a noop statement so that
151 151 this test is still more bytes than success.
152 152 ]]> </testcase>
153 153 </testsuite>
154 154
155 155 $ rm test-failure-unicode.t
156 156
157 157 test for --retest
158 158 ====================
159 159
160 160 $ rt --retest
161 161
162 162 --- $TESTTMP/test-failure.t
163 163 +++ $TESTTMP/test-failure.t.err
164 164 @@ -1,4 +1,4 @@
165 165 $ echo babar
166 166 - rataxes
167 167 + babar
168 168 This is a noop statement so that
169 169 this test is still more bytes than success.
170 170
171 171 ERROR: test-failure.t output changed
172 172 !
173 173 Failed test-failure.t: output changed
174 174 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
175 175 python hash seed: * (glob)
176 176 [1]
177 177
178 178 Selecting Tests To Run
179 179 ======================
180 180
181 181 successful
182 182
183 183 $ rt test-success.t
184 184 .
185 185 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
186 186
187 187 success w/ keyword
188 188 $ rt -k xyzzy
189 189 .
190 190 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
191 191
192 192 failed
193 193
194 194 $ rt test-failure.t
195 195
196 196 --- $TESTTMP/test-failure.t
197 197 +++ $TESTTMP/test-failure.t.err
198 198 @@ -1,4 +1,4 @@
199 199 $ echo babar
200 200 - rataxes
201 201 + babar
202 202 This is a noop statement so that
203 203 this test is still more bytes than success.
204 204
205 205 ERROR: test-failure.t output changed
206 206 !
207 207 Failed test-failure.t: output changed
208 208 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
209 209 python hash seed: * (glob)
210 210 [1]
211 211
212 212 failure w/ keyword
213 213 $ rt -k rataxes
214 214
215 215 --- $TESTTMP/test-failure.t
216 216 +++ $TESTTMP/test-failure.t.err
217 217 @@ -1,4 +1,4 @@
218 218 $ echo babar
219 219 - rataxes
220 220 + babar
221 221 This is a noop statement so that
222 222 this test is still more bytes than success.
223 223
224 224 ERROR: test-failure.t output changed
225 225 !
226 226 Failed test-failure.t: output changed
227 227 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
228 228 python hash seed: * (glob)
229 229 [1]
230 230
231 231 Verify that when a process fails to start we show a useful message
232 232 ==================================================================
233 233
234 234 $ cat > test-serve-fail.t <<EOF
235 235 > $ echo 'abort: child process failed to start blah'
236 236 > EOF
237 237 $ rt test-serve-fail.t
238 238
239 239 ERROR: test-serve-fail.t output changed
240 240 !
241 241 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
242 242 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
243 243 python hash seed: * (glob)
244 244 [1]
245 245 $ rm test-serve-fail.t
246 246
247 247 Verify that we can try other ports
248 248 ===================================
249 249 $ hg init inuse
250 250 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
251 251 $ cat blocks.pid >> $DAEMON_PIDS
252 252 $ cat > test-serve-inuse.t <<EOF
253 253 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
254 254 > $ cat hg.pid >> \$DAEMON_PIDS
255 255 > EOF
256 256 $ rt test-serve-inuse.t
257 257 .
258 258 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
259 259 $ rm test-serve-inuse.t
260 260
261 261 Running In Debug Mode
262 262 ======================
263 263
264 264 $ rt --debug 2>&1 | grep -v pwd
265 265 + echo *SALT* 0 0 (glob)
266 266 *SALT* 0 0 (glob)
267 267 + echo babar
268 268 babar
269 269 + echo *SALT* 4 0 (glob)
270 270 *SALT* 4 0 (glob)
271 271 *+ echo *SALT* 0 0 (glob)
272 272 *SALT* 0 0 (glob)
273 273 + echo babar
274 274 babar
275 275 + echo *SALT* 2 0 (glob)
276 276 *SALT* 2 0 (glob)
277 277 + echo xyzzy
278 278 xyzzy
279 279 + echo *SALT* 6 0 (glob)
280 280 *SALT* 6 0 (glob)
281 281 .
282 282 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
283 283
284 284 Parallel runs
285 285 ==============
286 286
287 287 (duplicate the failing test to get predictable output)
288 288 $ cp test-failure.t test-failure-copy.t
289 289
290 290 $ rt --jobs 2 test-failure*.t -n
291 291 !!
292 292 Failed test-failure*.t: output changed (glob)
293 293 Failed test-failure*.t: output changed (glob)
294 294 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
295 295 python hash seed: * (glob)
296 296 [1]
297 297
298 298 failures in parallel with --first should only print one failure
299 299 >>> f = open('test-nothing.t', 'w')
300 300 >>> f.write('foo\n' * 1024) and None
301 301 >>> f.write(' $ sleep 1') and None
302 302 $ rt --jobs 2 --first
303 303
304 304 --- $TESTTMP/test-failure*.t (glob)
305 305 +++ $TESTTMP/test-failure*.t.err (glob)
306 306 @@ -1,4 +1,4 @@
307 307 $ echo babar
308 308 - rataxes
309 309 + babar
310 310 This is a noop statement so that
311 311 this test is still more bytes than success.
312 312
313 313 Failed test-failure*.t: output changed (glob)
314 314 Failed test-nothing.t: output changed
315 315 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
316 316 python hash seed: * (glob)
317 317 [1]
318 318
319 319
320 320 (delete the duplicated test file)
321 321 $ rm test-failure-copy.t test-nothing.t
322 322
323 323
324 324 Interactive run
325 325 ===============
326 326
327 327 (backup the failing test)
328 328 $ cp test-failure.t backup
329 329
330 330 Refuse the fix
331 331
332 332 $ echo 'n' | rt -i
333 333
334 334 --- $TESTTMP/test-failure.t
335 335 +++ $TESTTMP/test-failure.t.err
336 336 @@ -1,4 +1,4 @@
337 337 $ echo babar
338 338 - rataxes
339 339 + babar
340 340 This is a noop statement so that
341 341 this test is still more bytes than success.
342 342 Accept this change? [n]
343 343 ERROR: test-failure.t output changed
344 344 !.
345 345 Failed test-failure.t: output changed
346 346 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
347 347 python hash seed: * (glob)
348 348 [1]
349 349
350 350 $ cat test-failure.t
351 351 $ echo babar
352 352 rataxes
353 353 This is a noop statement so that
354 354 this test is still more bytes than success.
355 355
356 356 Interactive with custom view
357 357
358 358 $ echo 'n' | rt -i --view echo
359 359 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
360 360 Accept this change? [n]* (glob)
361 361 ERROR: test-failure.t output changed
362 362 !.
363 363 Failed test-failure.t: output changed
364 364 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
365 365 python hash seed: * (glob)
366 366 [1]
367 367
368 368 View the fix
369 369
370 370 $ echo 'y' | rt --view echo
371 371 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
372 372
373 373 ERROR: test-failure.t output changed
374 374 !.
375 375 Failed test-failure.t: output changed
376 376 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
377 377 python hash seed: * (glob)
378 378 [1]
379 379
380 380 Accept the fix
381 381
382 382 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
383 383 $ echo " saved backup bundle to \$TESTTMP/foo.hg" >> test-failure.t
384 384 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
385 385 $ echo " saved backup bundle to \$TESTTMP/foo.hg (glob)" >> test-failure.t
386 386 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
387 387 $ echo " saved backup bundle to \$TESTTMP/*.hg (glob)" >> test-failure.t
388 388 $ echo 'y' | rt -i 2>&1
389 389
390 390 --- $TESTTMP/test-failure.t
391 391 +++ $TESTTMP/test-failure.t.err
392 392 @@ -1,9 +1,9 @@
393 393 $ echo babar
394 394 - rataxes
395 395 + babar
396 396 This is a noop statement so that
397 397 this test is still more bytes than success.
398 398 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
399 399 - saved backup bundle to $TESTTMP/foo.hg
400 400 + saved backup bundle to $TESTTMP/foo.hg* (glob)
401 401 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
402 402 saved backup bundle to $TESTTMP/foo.hg* (glob)
403 403 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
404 404 Accept this change? [n] ..
405 405 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
406 406
407 407 $ sed -e 's,(glob)$,&<,g' test-failure.t
408 408 $ echo babar
409 409 babar
410 410 This is a noop statement so that
411 411 this test is still more bytes than success.
412 412 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
413 413 saved backup bundle to $TESTTMP/foo.hg (glob)<
414 414 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
415 415 saved backup bundle to $TESTTMP/foo.hg (glob)<
416 416 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
417 417 saved backup bundle to $TESTTMP/*.hg (glob)<
418 418
419 419 (reinstall)
420 420 $ mv backup test-failure.t
421 421
422 422 No Diff
423 423 ===============
424 424
425 425 $ rt --nodiff
426 426 !.
427 427 Failed test-failure.t: output changed
428 428 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
429 429 python hash seed: * (glob)
430 430 [1]
431 431
432 432 test --tmpdir support
433 433 $ rt --tmpdir=$TESTTMP/keep test-success.t
434 434
435 435 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t (glob)
436 436 Keeping threadtmp dir: $TESTTMP/keep/child1 (glob)
437 437 .
438 438 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
439 439
440 440 timeouts
441 441 ========
442 442 $ cat > test-timeout.t <<EOF
443 443 > $ sleep 2
444 444 > $ echo pass
445 445 > pass
446 446 > EOF
447 447 > echo '#require slow' > test-slow-timeout.t
448 448 > cat test-timeout.t >> test-slow-timeout.t
449 449 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
450 450 st
451 451 Skipped test-slow-timeout.t: missing feature: allow slow tests
452 452 Failed test-timeout.t: timed out
453 453 # Ran 1 tests, 1 skipped, 0 warned, 1 failed.
454 454 python hash seed: * (glob)
455 455 [1]
456 456 $ rt --timeout=1 --slowtimeout=3 \
457 457 > test-timeout.t test-slow-timeout.t --allow-slow-tests
458 458 .t
459 459 Failed test-timeout.t: timed out
460 460 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
461 461 python hash seed: * (glob)
462 462 [1]
463 463 $ rm test-timeout.t test-slow-timeout.t
464 464
465 465 test for --time
466 466 ==================
467 467
468 468 $ rt test-success.t --time
469 469 .
470 470 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
471 471 # Producing time report
472 472 start end cuser csys real Test
473 473 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
474 474
475 475 test for --time with --job enabled
476 476 ====================================
477 477
478 478 $ rt test-success.t --time --jobs 2
479 479 .
480 480 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
481 481 # Producing time report
482 482 start end cuser csys real Test
483 483 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
484 484
485 485 Skips
486 486 ================
487 487 $ cat > test-skip.t <<EOF
488 488 > $ echo xyzzy
489 489 > #require false
490 490 > EOF
491 491 $ rt --nodiff
492 492 !.s
493 493 Skipped test-skip.t: missing feature: nail clipper
494 494 Failed test-failure.t: output changed
495 495 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
496 496 python hash seed: * (glob)
497 497 [1]
498 498
499 499 $ rt --keyword xyzzy
500 500 .s
501 501 Skipped test-skip.t: missing feature: nail clipper
502 502 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
503 503
504 504 Skips with xml
505 505 $ rt --keyword xyzzy \
506 506 > --xunit=xunit.xml
507 507 .s
508 508 Skipped test-skip.t: missing feature: nail clipper
509 509 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
510 510 $ cat xunit.xml
511 511 <?xml version="1.0" encoding="utf-8"?>
512 512 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
513 513 <testcase name="test-success.t" time="*"/> (glob)
514 514 </testsuite>
515 515
516 516 Missing skips or blacklisted skips don't count as executed:
517 517 $ echo test-failure.t > blacklist
518 518 $ rt --blacklist=blacklist --json\
519 519 > test-failure.t test-bogus.t
520 520 ss
521 521 Skipped test-bogus.t: Doesn't exist
522 522 Skipped test-failure.t: blacklisted
523 523 # Ran 0 tests, 2 skipped, 0 warned, 0 failed.
524 524 $ cat report.json
525 525 testreport ={
526 526 "test-bogus.t": {
527 527 "result": "skip"
528 528 },
529 529 "test-failure.t": {
530 530 "result": "skip"
531 531 }
532 532 } (no-eol)
533 533 test for --json
534 534 ==================
535 535
536 536 $ rt --json
537 537
538 538 --- $TESTTMP/test-failure.t
539 539 +++ $TESTTMP/test-failure.t.err
540 540 @@ -1,4 +1,4 @@
541 541 $ echo babar
542 542 - rataxes
543 543 + babar
544 544 This is a noop statement so that
545 545 this test is still more bytes than success.
546 546
547 547 ERROR: test-failure.t output changed
548 548 !.s
549 549 Skipped test-skip.t: missing feature: nail clipper
550 550 Failed test-failure.t: output changed
551 551 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
552 552 python hash seed: * (glob)
553 553 [1]
554 554
555 555 $ cat report.json
556 556 testreport ={
557 557 "test-failure.t": [\{] (re)
558 558 "csys": "\s*[\d\.]{4,5}", ? (re)
559 559 "cuser": "\s*[\d\.]{4,5}", ? (re)
560 560 "diff": "---.+\+\+\+.+", ? (re)
561 561 "end": "\s*[\d\.]{4,5}", ? (re)
562 562 "result": "failure", ? (re)
563 563 "start": "\s*[\d\.]{4,5}", ? (re)
564 564 "time": "\s*[\d\.]{4,5}" (re)
565 565 }, ? (re)
566 566 "test-skip.t": {
567 567 "csys": "\s*[\d\.]{4,5}", ? (re)
568 568 "cuser": "\s*[\d\.]{4,5}", ? (re)
569 569 "diff": "", ? (re)
570 570 "end": "\s*[\d\.]{4,5}", ? (re)
571 571 "result": "skip", ? (re)
572 572 "start": "\s*[\d\.]{4,5}", ? (re)
573 573 "time": "\s*[\d\.]{4,5}" (re)
574 574 }, ? (re)
575 575 "test-success.t": [\{] (re)
576 576 "csys": "\s*[\d\.]{4,5}", ? (re)
577 577 "cuser": "\s*[\d\.]{4,5}", ? (re)
578 578 "diff": "", ? (re)
579 579 "end": "\s*[\d\.]{4,5}", ? (re)
580 580 "result": "success", ? (re)
581 581 "start": "\s*[\d\.]{4,5}", ? (re)
582 582 "time": "\s*[\d\.]{4,5}" (re)
583 583 }
584 584 } (no-eol)
585 585
586 586 Test that failed test accepted through interactive are properly reported:
587 587
588 588 $ cp test-failure.t backup
589 589 $ echo y | rt --json -i
590 590
591 591 --- $TESTTMP/test-failure.t
592 592 +++ $TESTTMP/test-failure.t.err
593 593 @@ -1,4 +1,4 @@
594 594 $ echo babar
595 595 - rataxes
596 596 + babar
597 597 This is a noop statement so that
598 598 this test is still more bytes than success.
599 599 Accept this change? [n] ..s
600 600 Skipped test-skip.t: missing feature: nail clipper
601 601 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
602 602
603 603 $ cat report.json
604 604 testreport ={
605 605 "test-failure.t": [\{] (re)
606 606 "csys": "\s*[\d\.]{4,5}", ? (re)
607 607 "cuser": "\s*[\d\.]{4,5}", ? (re)
608 608 "diff": "", ? (re)
609 609 "end": "\s*[\d\.]{4,5}", ? (re)
610 610 "result": "success", ? (re)
611 611 "start": "\s*[\d\.]{4,5}", ? (re)
612 612 "time": "\s*[\d\.]{4,5}" (re)
613 613 }, ? (re)
614 614 "test-skip.t": {
615 615 "csys": "\s*[\d\.]{4,5}", ? (re)
616 616 "cuser": "\s*[\d\.]{4,5}", ? (re)
617 617 "diff": "", ? (re)
618 618 "end": "\s*[\d\.]{4,5}", ? (re)
619 619 "result": "skip", ? (re)
620 620 "start": "\s*[\d\.]{4,5}", ? (re)
621 621 "time": "\s*[\d\.]{4,5}" (re)
622 622 }, ? (re)
623 623 "test-success.t": [\{] (re)
624 624 "csys": "\s*[\d\.]{4,5}", ? (re)
625 625 "cuser": "\s*[\d\.]{4,5}", ? (re)
626 626 "diff": "", ? (re)
627 627 "end": "\s*[\d\.]{4,5}", ? (re)
628 628 "result": "success", ? (re)
629 629 "start": "\s*[\d\.]{4,5}", ? (re)
630 630 "time": "\s*[\d\.]{4,5}" (re)
631 631 }
632 632 } (no-eol)
633 633 $ mv backup test-failure.t
634 634
635 635 backslash on end of line with glob matching is handled properly
636 636
637 637 $ cat > test-glob-backslash.t << EOF
638 638 > $ echo 'foo bar \\'
639 639 > foo * \ (glob)
640 640 > EOF
641 641
642 642 $ rt test-glob-backslash.t
643 643 .
644 644 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
645 645
646 646 $ rm -f test-glob-backslash.t
647 647
648 648 Test reusability for third party tools
649 649 ======================================
650 650
651 651 $ mkdir "$TESTTMP"/anothertests
652 652 $ cd "$TESTTMP"/anothertests
653 653
654 654 test that `run-tests.py` can execute hghave, even if it runs not in
655 655 Mercurial source tree.
656 656
657 657 $ cat > test-hghave.t <<EOF
658 658 > #require true
659 659 > $ echo foo
660 660 > foo
661 661 > EOF
662 662 $ rt $HGTEST_RUN_TESTS_PURE test-hghave.t
663 663 .
664 664 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
665 665
666 666 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
667 667 running is placed.
668 668
669 669 $ cat > test-runtestdir.t <<EOF
670 670 > - $TESTDIR, in which test-run-tests.t is placed
671 671 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
672 672 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
673 673 >
674 674 > #if windows
675 675 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
676 676 > #else
677 677 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
678 678 > #endif
679 679 > $ test "\$RUNTESTDIR" = "$TESTDIR"
680 680 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py
681 681 > #!/usr/bin/env python
682 682 > #
683 683 > # check-code - a style and portability checker for Mercurial
684 684 > EOF
685 685 $ rt $HGTEST_RUN_TESTS_PURE test-runtestdir.t
686 686 .
687 687 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
688 688
689 689 #if execbit
690 690
691 691 test that TESTDIR is referred in PATH
692 692
693 693 $ cat > custom-command.sh <<EOF
694 694 > #!/bin/sh
695 695 > echo "hello world"
696 696 > EOF
697 697 $ chmod +x custom-command.sh
698 698 $ cat > test-testdir-path.t <<EOF
699 699 > $ custom-command.sh
700 700 > hello world
701 701 > EOF
702 702 $ rt $HGTEST_RUN_TESTS_PURE test-testdir-path.t
703 703 .
704 704 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
705 705
706 706 #endif
707 707
708 708 test support for --allow-slow-tests
709 709 $ cat > test-very-slow-test.t <<EOF
710 710 > #require slow
711 711 > $ echo pass
712 712 > pass
713 713 > EOF
714 714 $ rt $HGTEST_RUN_TESTS_PURE test-very-slow-test.t
715 715 s
716 716 Skipped test-very-slow-test.t: missing feature: allow slow tests
717 717 # Ran 0 tests, 1 skipped, 0 warned, 0 failed.
718 718 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
719 719 .
720 720 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
721
722 support for running a test outside the current directory
723 $ mkdir nonlocal
724 $ cat > nonlocal/test-is-not-here.t << EOF
725 > $ echo pass
726 > pass
727 > EOF
728 $ rt nonlocal/test-is-not-here.t
729 .
730 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now