##// END OF EJS Templates
run-tests: handle json.dumps divergence...
timeless -
r29199:daff05dc default
parent child Browse files
Show More
@@ -1,2547 +1,2548 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import print_function
47 47
48 48 from distutils import version
49 49 import difflib
50 50 import errno
51 51 import json
52 52 import optparse
53 53 import os
54 54 import shutil
55 55 import subprocess
56 56 import signal
57 57 import socket
58 58 import sys
59 59 import tempfile
60 60 import time
61 61 import random
62 62 import re
63 63 import threading
64 64 import killdaemons as killmod
65 65 try:
66 66 import Queue as queue
67 67 except ImportError:
68 68 import queue
69 69 from xml.dom import minidom
70 70 import unittest
71 71
72 72 osenvironb = getattr(os, 'environb', os.environ)
73 73 processlock = threading.Lock()
74 74
75 75 if sys.version_info > (3, 5, 0):
76 76 PYTHON3 = True
77 77 xrange = range # we use xrange in one place, and we'd rather not use range
78 78 def _bytespath(p):
79 79 return p.encode('utf-8')
80 80
81 81 def _strpath(p):
82 82 return p.decode('utf-8')
83 83
84 84 elif sys.version_info >= (3, 0, 0):
85 85 print('%s is only supported on Python 3.5+ and 2.6-2.7, not %s' %
86 86 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
87 87 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
88 88 else:
89 89 PYTHON3 = False
90 90
91 91 # In python 2.x, path operations are generally done using
92 92 # bytestrings by default, so we don't have to do any extra
93 93 # fiddling there. We define the wrapper functions anyway just to
94 94 # help keep code consistent between platforms.
95 95 def _bytespath(p):
96 96 return p
97 97
98 98 _strpath = _bytespath
99 99
100 100 # For Windows support
101 101 wifexited = getattr(os, "WIFEXITED", lambda x: False)
102 102
103 103 def checkportisavailable(port):
104 104 """return true if a port seems free to bind on localhost"""
105 105 try:
106 106 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
107 107 s.bind(('localhost', port))
108 108 s.close()
109 109 return True
110 110 except socket.error as exc:
111 111 if not exc.errno == errno.EADDRINUSE:
112 112 raise
113 113 return False
114 114
115 115 closefds = os.name == 'posix'
116 116 def Popen4(cmd, wd, timeout, env=None):
117 117 processlock.acquire()
118 118 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
119 119 close_fds=closefds,
120 120 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
121 121 stderr=subprocess.STDOUT)
122 122 processlock.release()
123 123
124 124 p.fromchild = p.stdout
125 125 p.tochild = p.stdin
126 126 p.childerr = p.stderr
127 127
128 128 p.timeout = False
129 129 if timeout:
130 130 def t():
131 131 start = time.time()
132 132 while time.time() - start < timeout and p.returncode is None:
133 133 time.sleep(.1)
134 134 p.timeout = True
135 135 if p.returncode is None:
136 136 terminate(p)
137 137 threading.Thread(target=t).start()
138 138
139 139 return p
140 140
141 141 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
142 142 IMPL_PATH = b'PYTHONPATH'
143 143 if 'java' in sys.platform:
144 144 IMPL_PATH = b'JYTHONPATH'
145 145
146 146 defaults = {
147 147 'jobs': ('HGTEST_JOBS', 1),
148 148 'timeout': ('HGTEST_TIMEOUT', 180),
149 149 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
150 150 'port': ('HGTEST_PORT', 20059),
151 151 'shell': ('HGTEST_SHELL', 'sh'),
152 152 }
153 153
154 154 def canonpath(path):
155 155 return os.path.realpath(os.path.expanduser(path))
156 156
157 157 def parselistfiles(files, listtype, warn=True):
158 158 entries = dict()
159 159 for filename in files:
160 160 try:
161 161 path = os.path.expanduser(os.path.expandvars(filename))
162 162 f = open(path, "rb")
163 163 except IOError as err:
164 164 if err.errno != errno.ENOENT:
165 165 raise
166 166 if warn:
167 167 print("warning: no such %s file: %s" % (listtype, filename))
168 168 continue
169 169
170 170 for line in f.readlines():
171 171 line = line.split(b'#', 1)[0].strip()
172 172 if line:
173 173 entries[line] = filename
174 174
175 175 f.close()
176 176 return entries
177 177
178 178 def getparser():
179 179 """Obtain the OptionParser used by the CLI."""
180 180 parser = optparse.OptionParser("%prog [options] [tests]")
181 181
182 182 # keep these sorted
183 183 parser.add_option("--blacklist", action="append",
184 184 help="skip tests listed in the specified blacklist file")
185 185 parser.add_option("--whitelist", action="append",
186 186 help="always run tests listed in the specified whitelist file")
187 187 parser.add_option("--changed", type="string",
188 188 help="run tests that are changed in parent rev or working directory")
189 189 parser.add_option("-C", "--annotate", action="store_true",
190 190 help="output files annotated with coverage")
191 191 parser.add_option("-c", "--cover", action="store_true",
192 192 help="print a test coverage report")
193 193 parser.add_option("-d", "--debug", action="store_true",
194 194 help="debug mode: write output of test scripts to console"
195 195 " rather than capturing and diffing it (disables timeout)")
196 196 parser.add_option("-f", "--first", action="store_true",
197 197 help="exit on the first test failure")
198 198 parser.add_option("-H", "--htmlcov", action="store_true",
199 199 help="create an HTML report of the coverage of the files")
200 200 parser.add_option("-i", "--interactive", action="store_true",
201 201 help="prompt to accept changed output")
202 202 parser.add_option("-j", "--jobs", type="int",
203 203 help="number of jobs to run in parallel"
204 204 " (default: $%s or %d)" % defaults['jobs'])
205 205 parser.add_option("--keep-tmpdir", action="store_true",
206 206 help="keep temporary directory after running tests")
207 207 parser.add_option("-k", "--keywords",
208 208 help="run tests matching keywords")
209 209 parser.add_option("-l", "--local", action="store_true",
210 210 help="shortcut for --with-hg=<testdir>/../hg")
211 211 parser.add_option("--loop", action="store_true",
212 212 help="loop tests repeatedly")
213 213 parser.add_option("--runs-per-test", type="int", dest="runs_per_test",
214 214 help="run each test N times (default=1)", default=1)
215 215 parser.add_option("-n", "--nodiff", action="store_true",
216 216 help="skip showing test changes")
217 217 parser.add_option("-p", "--port", type="int",
218 218 help="port on which servers should listen"
219 219 " (default: $%s or %d)" % defaults['port'])
220 220 parser.add_option("--compiler", type="string",
221 221 help="compiler to build with")
222 222 parser.add_option("--pure", action="store_true",
223 223 help="use pure Python code instead of C extensions")
224 224 parser.add_option("-R", "--restart", action="store_true",
225 225 help="restart at last error")
226 226 parser.add_option("-r", "--retest", action="store_true",
227 227 help="retest failed tests")
228 228 parser.add_option("-S", "--noskips", action="store_true",
229 229 help="don't report skip tests verbosely")
230 230 parser.add_option("--shell", type="string",
231 231 help="shell to use (default: $%s or %s)" % defaults['shell'])
232 232 parser.add_option("-t", "--timeout", type="int",
233 233 help="kill errant tests after TIMEOUT seconds"
234 234 " (default: $%s or %d)" % defaults['timeout'])
235 235 parser.add_option("--slowtimeout", type="int",
236 236 help="kill errant slow tests after SLOWTIMEOUT seconds"
237 237 " (default: $%s or %d)" % defaults['slowtimeout'])
238 238 parser.add_option("--time", action="store_true",
239 239 help="time how long each test takes")
240 240 parser.add_option("--json", action="store_true",
241 241 help="store test result data in 'report.json' file")
242 242 parser.add_option("--tmpdir", type="string",
243 243 help="run tests in the given temporary directory"
244 244 " (implies --keep-tmpdir)")
245 245 parser.add_option("-v", "--verbose", action="store_true",
246 246 help="output verbose messages")
247 247 parser.add_option("--xunit", type="string",
248 248 help="record xunit results at specified path")
249 249 parser.add_option("--view", type="string",
250 250 help="external diff viewer")
251 251 parser.add_option("--with-hg", type="string",
252 252 metavar="HG",
253 253 help="test using specified hg script rather than a "
254 254 "temporary installation")
255 255 parser.add_option("--chg", action="store_true",
256 256 help="install and use chg wrapper in place of hg")
257 257 parser.add_option("--with-chg", metavar="CHG",
258 258 help="use specified chg wrapper in place of hg")
259 259 parser.add_option("-3", "--py3k-warnings", action="store_true",
260 260 help="enable Py3k warnings on Python 2.6+")
261 261 # This option should be deleted once test-check-py3-compat.t and other
262 262 # Python 3 tests run with Python 3.
263 263 parser.add_option("--with-python3", metavar="PYTHON3",
264 264 help="Python 3 interpreter (if running under Python 2)"
265 265 " (TEMPORARY)")
266 266 parser.add_option('--extra-config-opt', action="append",
267 267 help='set the given config opt in the test hgrc')
268 268 parser.add_option('--random', action="store_true",
269 269 help='run tests in random order')
270 270 parser.add_option('--profile-runner', action='store_true',
271 271 help='run statprof on run-tests')
272 272 parser.add_option('--allow-slow-tests', action='store_true',
273 273 help='allow extremely slow tests')
274 274 parser.add_option('--showchannels', action='store_true',
275 275 help='show scheduling channels')
276 276 parser.add_option('--known-good-rev', type="string",
277 277 metavar="known_good_rev",
278 278 help=("Automatically bisect any failures using this "
279 279 "revision as a known-good revision."))
280 280
281 281 for option, (envvar, default) in defaults.items():
282 282 defaults[option] = type(default)(os.environ.get(envvar, default))
283 283 parser.set_defaults(**defaults)
284 284
285 285 return parser
286 286
287 287 def parseargs(args, parser):
288 288 """Parse arguments with our OptionParser and validate results."""
289 289 (options, args) = parser.parse_args(args)
290 290
291 291 # jython is always pure
292 292 if 'java' in sys.platform or '__pypy__' in sys.modules:
293 293 options.pure = True
294 294
295 295 if options.with_hg:
296 296 options.with_hg = canonpath(_bytespath(options.with_hg))
297 297 if not (os.path.isfile(options.with_hg) and
298 298 os.access(options.with_hg, os.X_OK)):
299 299 parser.error('--with-hg must specify an executable hg script')
300 300 if not os.path.basename(options.with_hg) == b'hg':
301 301 sys.stderr.write('warning: --with-hg should specify an hg script\n')
302 302 if options.local:
303 303 testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
304 304 hgbin = os.path.join(os.path.dirname(testdir), b'hg')
305 305 if os.name != 'nt' and not os.access(hgbin, os.X_OK):
306 306 parser.error('--local specified, but %r not found or not executable'
307 307 % hgbin)
308 308 options.with_hg = hgbin
309 309
310 310 if (options.chg or options.with_chg) and os.name == 'nt':
311 311 parser.error('chg does not work on %s' % os.name)
312 312 if options.with_chg:
313 313 options.chg = False # no installation to temporary location
314 314 options.with_chg = canonpath(_bytespath(options.with_chg))
315 315 if not (os.path.isfile(options.with_chg) and
316 316 os.access(options.with_chg, os.X_OK)):
317 317 parser.error('--with-chg must specify a chg executable')
318 318 if options.chg and options.with_hg:
319 319 # chg shares installation location with hg
320 320 parser.error('--chg does not work when --with-hg is specified '
321 321 '(use --with-chg instead)')
322 322
323 323 options.anycoverage = options.cover or options.annotate or options.htmlcov
324 324 if options.anycoverage:
325 325 try:
326 326 import coverage
327 327 covver = version.StrictVersion(coverage.__version__).version
328 328 if covver < (3, 3):
329 329 parser.error('coverage options require coverage 3.3 or later')
330 330 except ImportError:
331 331 parser.error('coverage options now require the coverage package')
332 332
333 333 if options.anycoverage and options.local:
334 334 # this needs some path mangling somewhere, I guess
335 335 parser.error("sorry, coverage options do not work when --local "
336 336 "is specified")
337 337
338 338 if options.anycoverage and options.with_hg:
339 339 parser.error("sorry, coverage options do not work when --with-hg "
340 340 "is specified")
341 341
342 342 global verbose
343 343 if options.verbose:
344 344 verbose = ''
345 345
346 346 if options.tmpdir:
347 347 options.tmpdir = canonpath(options.tmpdir)
348 348
349 349 if options.jobs < 1:
350 350 parser.error('--jobs must be positive')
351 351 if options.interactive and options.debug:
352 352 parser.error("-i/--interactive and -d/--debug are incompatible")
353 353 if options.debug:
354 354 if options.timeout != defaults['timeout']:
355 355 sys.stderr.write(
356 356 'warning: --timeout option ignored with --debug\n')
357 357 if options.slowtimeout != defaults['slowtimeout']:
358 358 sys.stderr.write(
359 359 'warning: --slowtimeout option ignored with --debug\n')
360 360 options.timeout = 0
361 361 options.slowtimeout = 0
362 362 if options.py3k_warnings:
363 363 if PYTHON3:
364 364 parser.error(
365 365 '--py3k-warnings can only be used on Python 2.6 and 2.7')
366 366 if options.with_python3:
367 367 if PYTHON3:
368 368 parser.error('--with-python3 cannot be used when executing with '
369 369 'Python 3')
370 370
371 371 options.with_python3 = canonpath(options.with_python3)
372 372 # Verify Python3 executable is acceptable.
373 373 proc = subprocess.Popen([options.with_python3, b'--version'],
374 374 stdout=subprocess.PIPE,
375 375 stderr=subprocess.STDOUT)
376 376 out, _err = proc.communicate()
377 377 ret = proc.wait()
378 378 if ret != 0:
379 379 parser.error('could not determine version of python 3')
380 380 if not out.startswith('Python '):
381 381 parser.error('unexpected output from python3 --version: %s' %
382 382 out)
383 383 vers = version.LooseVersion(out[len('Python '):])
384 384 if vers < version.LooseVersion('3.5.0'):
385 385 parser.error('--with-python3 version must be 3.5.0 or greater; '
386 386 'got %s' % out)
387 387
388 388 if options.blacklist:
389 389 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
390 390 if options.whitelist:
391 391 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
392 392 else:
393 393 options.whitelisted = {}
394 394
395 395 if options.showchannels:
396 396 options.nodiff = True
397 397
398 398 return (options, args)
399 399
400 400 def rename(src, dst):
401 401 """Like os.rename(), trade atomicity and opened files friendliness
402 402 for existing destination support.
403 403 """
404 404 shutil.copy(src, dst)
405 405 os.remove(src)
406 406
407 407 _unified_diff = difflib.unified_diff
408 408 if PYTHON3:
409 409 import functools
410 410 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
411 411
412 412 def getdiff(expected, output, ref, err):
413 413 servefail = False
414 414 lines = []
415 415 for line in _unified_diff(expected, output, ref, err):
416 416 if line.startswith(b'+++') or line.startswith(b'---'):
417 417 line = line.replace(b'\\', b'/')
418 418 if line.endswith(b' \n'):
419 419 line = line[:-2] + b'\n'
420 420 lines.append(line)
421 421 if not servefail and line.startswith(
422 422 b'+ abort: child process failed to start'):
423 423 servefail = True
424 424
425 425 return servefail, lines
426 426
427 427 verbose = False
428 428 def vlog(*msg):
429 429 """Log only when in verbose mode."""
430 430 if verbose is False:
431 431 return
432 432
433 433 return log(*msg)
434 434
435 435 # Bytes that break XML even in a CDATA block: control characters 0-31
436 436 # sans \t, \n and \r
437 437 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
438 438
439 439 def cdatasafe(data):
440 440 """Make a string safe to include in a CDATA block.
441 441
442 442 Certain control characters are illegal in a CDATA block, and
443 443 there's no way to include a ]]> in a CDATA either. This function
444 444 replaces illegal bytes with ? and adds a space between the ]] so
445 445 that it won't break the CDATA block.
446 446 """
447 447 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
448 448
449 449 def log(*msg):
450 450 """Log something to stdout.
451 451
452 452 Arguments are strings to print.
453 453 """
454 454 with iolock:
455 455 if verbose:
456 456 print(verbose, end=' ')
457 457 for m in msg:
458 458 print(m, end=' ')
459 459 print()
460 460 sys.stdout.flush()
461 461
462 462 def terminate(proc):
463 463 """Terminate subprocess (with fallback for Python versions < 2.6)"""
464 464 vlog('# Terminating process %d' % proc.pid)
465 465 try:
466 466 getattr(proc, 'terminate', lambda : os.kill(proc.pid, signal.SIGTERM))()
467 467 except OSError:
468 468 pass
469 469
470 470 def killdaemons(pidfile):
471 471 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
472 472 logfn=vlog)
473 473
474 474 class Test(unittest.TestCase):
475 475 """Encapsulates a single, runnable test.
476 476
477 477 While this class conforms to the unittest.TestCase API, it differs in that
478 478 instances need to be instantiated manually. (Typically, unittest.TestCase
479 479 classes are instantiated automatically by scanning modules.)
480 480 """
481 481
482 482 # Status code reserved for skipped tests (used by hghave).
483 483 SKIPPED_STATUS = 80
484 484
485 485 def __init__(self, path, tmpdir, keeptmpdir=False,
486 486 debug=False,
487 487 timeout=defaults['timeout'],
488 488 startport=defaults['port'], extraconfigopts=None,
489 489 py3kwarnings=False, shell=None, hgcommand=None,
490 490 slowtimeout=defaults['slowtimeout'], usechg=False):
491 491 """Create a test from parameters.
492 492
493 493 path is the full path to the file defining the test.
494 494
495 495 tmpdir is the main temporary directory to use for this test.
496 496
497 497 keeptmpdir determines whether to keep the test's temporary directory
498 498 after execution. It defaults to removal (False).
499 499
500 500 debug mode will make the test execute verbosely, with unfiltered
501 501 output.
502 502
503 503 timeout controls the maximum run time of the test. It is ignored when
504 504 debug is True. See slowtimeout for tests with #require slow.
505 505
506 506 slowtimeout overrides timeout if the test has #require slow.
507 507
508 508 startport controls the starting port number to use for this test. Each
509 509 test will reserve 3 port numbers for execution. It is the caller's
510 510 responsibility to allocate a non-overlapping port range to Test
511 511 instances.
512 512
513 513 extraconfigopts is an iterable of extra hgrc config options. Values
514 514 must have the form "key=value" (something understood by hgrc). Values
515 515 of the form "foo.key=value" will result in "[foo] key=value".
516 516
517 517 py3kwarnings enables Py3k warnings.
518 518
519 519 shell is the shell to execute tests in.
520 520 """
521 521 self.path = path
522 522 self.bname = os.path.basename(path)
523 523 self.name = _strpath(self.bname)
524 524 self._testdir = os.path.dirname(path)
525 525 self.errpath = os.path.join(self._testdir, b'%s.err' % self.bname)
526 526
527 527 self._threadtmp = tmpdir
528 528 self._keeptmpdir = keeptmpdir
529 529 self._debug = debug
530 530 self._timeout = timeout
531 531 self._slowtimeout = slowtimeout
532 532 self._startport = startport
533 533 self._extraconfigopts = extraconfigopts or []
534 534 self._py3kwarnings = py3kwarnings
535 535 self._shell = _bytespath(shell)
536 536 self._hgcommand = hgcommand or b'hg'
537 537 self._usechg = usechg
538 538
539 539 self._aborted = False
540 540 self._daemonpids = []
541 541 self._finished = None
542 542 self._ret = None
543 543 self._out = None
544 544 self._skipped = None
545 545 self._testtmp = None
546 546 self._chgsockdir = None
547 547
548 548 # If we're not in --debug mode and reference output file exists,
549 549 # check test output against it.
550 550 if debug:
551 551 self._refout = None # to match "out is None"
552 552 elif os.path.exists(self.refpath):
553 553 f = open(self.refpath, 'rb')
554 554 self._refout = f.read().splitlines(True)
555 555 f.close()
556 556 else:
557 557 self._refout = []
558 558
559 559 # needed to get base class __repr__ running
560 560 @property
561 561 def _testMethodName(self):
562 562 return self.name
563 563
564 564 def __str__(self):
565 565 return self.name
566 566
567 567 def shortDescription(self):
568 568 return self.name
569 569
570 570 def setUp(self):
571 571 """Tasks to perform before run()."""
572 572 self._finished = False
573 573 self._ret = None
574 574 self._out = None
575 575 self._skipped = None
576 576
577 577 try:
578 578 os.mkdir(self._threadtmp)
579 579 except OSError as e:
580 580 if e.errno != errno.EEXIST:
581 581 raise
582 582
583 583 name = os.path.basename(self.path)
584 584 self._testtmp = os.path.join(self._threadtmp, name)
585 585 os.mkdir(self._testtmp)
586 586
587 587 # Remove any previous output files.
588 588 if os.path.exists(self.errpath):
589 589 try:
590 590 os.remove(self.errpath)
591 591 except OSError as e:
592 592 # We might have raced another test to clean up a .err
593 593 # file, so ignore ENOENT when removing a previous .err
594 594 # file.
595 595 if e.errno != errno.ENOENT:
596 596 raise
597 597
598 598 if self._usechg:
599 599 self._chgsockdir = os.path.join(self._threadtmp,
600 600 b'%s.chgsock' % name)
601 601 os.mkdir(self._chgsockdir)
602 602
603 603 def run(self, result):
604 604 """Run this test and report results against a TestResult instance."""
605 605 # This function is extremely similar to unittest.TestCase.run(). Once
606 606 # we require Python 2.7 (or at least its version of unittest), this
607 607 # function can largely go away.
608 608 self._result = result
609 609 result.startTest(self)
610 610 try:
611 611 try:
612 612 self.setUp()
613 613 except (KeyboardInterrupt, SystemExit):
614 614 self._aborted = True
615 615 raise
616 616 except Exception:
617 617 result.addError(self, sys.exc_info())
618 618 return
619 619
620 620 success = False
621 621 try:
622 622 self.runTest()
623 623 except KeyboardInterrupt:
624 624 self._aborted = True
625 625 raise
626 626 except SkipTest as e:
627 627 result.addSkip(self, str(e))
628 628 # The base class will have already counted this as a
629 629 # test we "ran", but we want to exclude skipped tests
630 630 # from those we count towards those run.
631 631 result.testsRun -= 1
632 632 except IgnoreTest as e:
633 633 result.addIgnore(self, str(e))
634 634 # As with skips, ignores also should be excluded from
635 635 # the number of tests executed.
636 636 result.testsRun -= 1
637 637 except WarnTest as e:
638 638 result.addWarn(self, str(e))
639 639 except ReportedTest as e:
640 640 pass
641 641 except self.failureException as e:
642 642 # This differs from unittest in that we don't capture
643 643 # the stack trace. This is for historical reasons and
644 644 # this decision could be revisited in the future,
645 645 # especially for PythonTest instances.
646 646 if result.addFailure(self, str(e)):
647 647 success = True
648 648 except Exception:
649 649 result.addError(self, sys.exc_info())
650 650 else:
651 651 success = True
652 652
653 653 try:
654 654 self.tearDown()
655 655 except (KeyboardInterrupt, SystemExit):
656 656 self._aborted = True
657 657 raise
658 658 except Exception:
659 659 result.addError(self, sys.exc_info())
660 660 success = False
661 661
662 662 if success:
663 663 result.addSuccess(self)
664 664 finally:
665 665 result.stopTest(self, interrupted=self._aborted)
666 666
667 667 def runTest(self):
668 668 """Run this test instance.
669 669
670 670 This will return a tuple describing the result of the test.
671 671 """
672 672 env = self._getenv()
673 673 self._daemonpids.append(env['DAEMON_PIDS'])
674 674 self._createhgrc(env['HGRCPATH'])
675 675
676 676 vlog('# Test', self.name)
677 677
678 678 ret, out = self._run(env)
679 679 self._finished = True
680 680 self._ret = ret
681 681 self._out = out
682 682
683 683 def describe(ret):
684 684 if ret < 0:
685 685 return 'killed by signal: %d' % -ret
686 686 return 'returned error code %d' % ret
687 687
688 688 self._skipped = False
689 689
690 690 if ret == self.SKIPPED_STATUS:
691 691 if out is None: # Debug mode, nothing to parse.
692 692 missing = ['unknown']
693 693 failed = None
694 694 else:
695 695 missing, failed = TTest.parsehghaveoutput(out)
696 696
697 697 if not missing:
698 698 missing = ['skipped']
699 699
700 700 if failed:
701 701 self.fail('hg have failed checking for %s' % failed[-1])
702 702 else:
703 703 self._skipped = True
704 704 raise SkipTest(missing[-1])
705 705 elif ret == 'timeout':
706 706 self.fail('timed out')
707 707 elif ret is False:
708 708 raise WarnTest('no result code from test')
709 709 elif out != self._refout:
710 710 # Diff generation may rely on written .err file.
711 711 if (ret != 0 or out != self._refout) and not self._skipped \
712 712 and not self._debug:
713 713 f = open(self.errpath, 'wb')
714 714 for line in out:
715 715 f.write(line)
716 716 f.close()
717 717
718 718 # The result object handles diff calculation for us.
719 719 if self._result.addOutputMismatch(self, ret, out, self._refout):
720 720 # change was accepted, skip failing
721 721 return
722 722
723 723 if ret:
724 724 msg = 'output changed and ' + describe(ret)
725 725 else:
726 726 msg = 'output changed'
727 727
728 728 self.fail(msg)
729 729 elif ret:
730 730 self.fail(describe(ret))
731 731
732 732 def tearDown(self):
733 733 """Tasks to perform after run()."""
734 734 for entry in self._daemonpids:
735 735 killdaemons(entry)
736 736 self._daemonpids = []
737 737
738 738 if self._keeptmpdir:
739 739 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
740 740 (self._testtmp.decode('utf-8'),
741 741 self._threadtmp.decode('utf-8')))
742 742 else:
743 743 shutil.rmtree(self._testtmp, True)
744 744 shutil.rmtree(self._threadtmp, True)
745 745
746 746 if self._usechg:
747 747 # chgservers will stop automatically after they find the socket
748 748 # files are deleted
749 749 shutil.rmtree(self._chgsockdir, True)
750 750
751 751 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
752 752 and not self._debug and self._out:
753 753 f = open(self.errpath, 'wb')
754 754 for line in self._out:
755 755 f.write(line)
756 756 f.close()
757 757
758 758 vlog("# Ret was:", self._ret, '(%s)' % self.name)
759 759
760 760 def _run(self, env):
761 761 # This should be implemented in child classes to run tests.
762 762 raise SkipTest('unknown test type')
763 763
764 764 def abort(self):
765 765 """Terminate execution of this test."""
766 766 self._aborted = True
767 767
768 768 def _portmap(self, i):
769 769 offset = b'' if i == 0 else b'%d' % i
770 770 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
771 771
772 772 def _getreplacements(self):
773 773 """Obtain a mapping of text replacements to apply to test output.
774 774
775 775 Test output needs to be normalized so it can be compared to expected
776 776 output. This function defines how some of that normalization will
777 777 occur.
778 778 """
779 779 r = [
780 780 # This list should be parallel to defineport in _getenv
781 781 self._portmap(0),
782 782 self._portmap(1),
783 783 self._portmap(2),
784 784 (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
785 785 br'\1 (glob)'),
786 786 ]
787 787 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
788 788
789 789 return r
790 790
791 791 def _escapepath(self, p):
792 792 if os.name == 'nt':
793 793 return (
794 794 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
795 795 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
796 796 for c in p))
797 797 )
798 798 else:
799 799 return re.escape(p)
800 800
801 801 def _getenv(self):
802 802 """Obtain environment variables to use during test execution."""
803 803 def defineport(i):
804 804 offset = '' if i == 0 else '%s' % i
805 805 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
806 806 env = os.environ.copy()
807 807 env['TESTTMP'] = self._testtmp
808 808 env['HOME'] = self._testtmp
809 809 # This number should match portneeded in _getport
810 810 for port in xrange(3):
811 811 # This list should be parallel to _portmap in _getreplacements
812 812 defineport(port)
813 813 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
814 814 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
815 815 env["HGEDITOR"] = ('"' + sys.executable + '"'
816 816 + ' -c "import sys; sys.exit(0)"')
817 817 env["HGMERGE"] = "internal:merge"
818 818 env["HGUSER"] = "test"
819 819 env["HGENCODING"] = "ascii"
820 820 env["HGENCODINGMODE"] = "strict"
821 821
822 822 # Reset some environment variables to well-known values so that
823 823 # the tests produce repeatable output.
824 824 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
825 825 env['TZ'] = 'GMT'
826 826 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
827 827 env['COLUMNS'] = '80'
828 828 env['TERM'] = 'xterm'
829 829
830 830 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
831 831 'NO_PROXY').split():
832 832 if k in env:
833 833 del env[k]
834 834
835 835 # unset env related to hooks
836 836 for k in env.keys():
837 837 if k.startswith('HG_'):
838 838 del env[k]
839 839
840 840 if self._usechg:
841 841 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
842 842
843 843 return env
844 844
845 845 def _createhgrc(self, path):
846 846 """Create an hgrc file for this test."""
847 847 hgrc = open(path, 'wb')
848 848 hgrc.write(b'[ui]\n')
849 849 hgrc.write(b'slash = True\n')
850 850 hgrc.write(b'interactive = False\n')
851 851 hgrc.write(b'mergemarkers = detailed\n')
852 852 hgrc.write(b'promptecho = True\n')
853 853 hgrc.write(b'[defaults]\n')
854 854 hgrc.write(b'backout = -d "0 0"\n')
855 855 hgrc.write(b'commit = -d "0 0"\n')
856 856 hgrc.write(b'shelve = --date "0 0"\n')
857 857 hgrc.write(b'tag = -d "0 0"\n')
858 858 hgrc.write(b'[devel]\n')
859 859 hgrc.write(b'all-warnings = true\n')
860 860 hgrc.write(b'[largefiles]\n')
861 861 hgrc.write(b'usercache = %s\n' %
862 862 (os.path.join(self._testtmp, b'.cache/largefiles')))
863 863
864 864 for opt in self._extraconfigopts:
865 865 section, key = opt.split('.', 1)
866 866 assert '=' in key, ('extra config opt %s must '
867 867 'have an = for assignment' % opt)
868 868 hgrc.write(b'[%s]\n%s\n' % (section, key))
869 869 hgrc.close()
870 870
871 871 def fail(self, msg):
872 872 # unittest differentiates between errored and failed.
873 873 # Failed is denoted by AssertionError (by default at least).
874 874 raise AssertionError(msg)
875 875
876 876 def _runcommand(self, cmd, env, normalizenewlines=False):
877 877 """Run command in a sub-process, capturing the output (stdout and
878 878 stderr).
879 879
880 880 Return a tuple (exitcode, output). output is None in debug mode.
881 881 """
882 882 if self._debug:
883 883 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
884 884 env=env)
885 885 ret = proc.wait()
886 886 return (ret, None)
887 887
888 888 proc = Popen4(cmd, self._testtmp, self._timeout, env)
889 889 def cleanup():
890 890 terminate(proc)
891 891 ret = proc.wait()
892 892 if ret == 0:
893 893 ret = signal.SIGTERM << 8
894 894 killdaemons(env['DAEMON_PIDS'])
895 895 return ret
896 896
897 897 output = ''
898 898 proc.tochild.close()
899 899
900 900 try:
901 901 output = proc.fromchild.read()
902 902 except KeyboardInterrupt:
903 903 vlog('# Handling keyboard interrupt')
904 904 cleanup()
905 905 raise
906 906
907 907 ret = proc.wait()
908 908 if wifexited(ret):
909 909 ret = os.WEXITSTATUS(ret)
910 910
911 911 if proc.timeout:
912 912 ret = 'timeout'
913 913
914 914 if ret:
915 915 killdaemons(env['DAEMON_PIDS'])
916 916
917 917 for s, r in self._getreplacements():
918 918 output = re.sub(s, r, output)
919 919
920 920 if normalizenewlines:
921 921 output = output.replace('\r\n', '\n')
922 922
923 923 return ret, output.splitlines(True)
924 924
925 925 class PythonTest(Test):
926 926 """A Python-based test."""
927 927
928 928 @property
929 929 def refpath(self):
930 930 return os.path.join(self._testdir, b'%s.out' % self.bname)
931 931
932 932 def _run(self, env):
933 933 py3kswitch = self._py3kwarnings and b' -3' or b''
934 934 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
935 935 vlog("# Running", cmd)
936 936 normalizenewlines = os.name == 'nt'
937 937 result = self._runcommand(cmd, env,
938 938 normalizenewlines=normalizenewlines)
939 939 if self._aborted:
940 940 raise KeyboardInterrupt()
941 941
942 942 return result
943 943
944 944 # This script may want to drop globs from lines matching these patterns on
945 945 # Windows, but check-code.py wants a glob on these lines unconditionally. Don't
946 946 # warn if that is the case for anything matching these lines.
947 947 checkcodeglobpats = [
948 948 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
949 949 re.compile(br'^moving \S+/.*[^)]$'),
950 950 re.compile(br'^pulling from \$TESTTMP/.*[^)]$')
951 951 ]
952 952
953 953 bchr = chr
954 954 if PYTHON3:
955 955 bchr = lambda x: bytes([x])
956 956
957 957 class TTest(Test):
958 958 """A "t test" is a test backed by a .t file."""
959 959
960 960 SKIPPED_PREFIX = b'skipped: '
961 961 FAILED_PREFIX = b'hghave check failed: '
962 962 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
963 963
964 964 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
965 965 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
966 966 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
967 967
968 968 @property
969 969 def refpath(self):
970 970 return os.path.join(self._testdir, self.bname)
971 971
972 972 def _run(self, env):
973 973 f = open(self.path, 'rb')
974 974 lines = f.readlines()
975 975 f.close()
976 976
977 977 salt, script, after, expected = self._parsetest(lines)
978 978
979 979 # Write out the generated script.
980 980 fname = b'%s.sh' % self._testtmp
981 981 f = open(fname, 'wb')
982 982 for l in script:
983 983 f.write(l)
984 984 f.close()
985 985
986 986 cmd = b'%s "%s"' % (self._shell, fname)
987 987 vlog("# Running", cmd)
988 988
989 989 exitcode, output = self._runcommand(cmd, env)
990 990
991 991 if self._aborted:
992 992 raise KeyboardInterrupt()
993 993
994 994 # Do not merge output if skipped. Return hghave message instead.
995 995 # Similarly, with --debug, output is None.
996 996 if exitcode == self.SKIPPED_STATUS or output is None:
997 997 return exitcode, output
998 998
999 999 return self._processoutput(exitcode, output, salt, after, expected)
1000 1000
1001 1001 def _hghave(self, reqs):
1002 1002 # TODO do something smarter when all other uses of hghave are gone.
1003 1003 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
1004 1004 tdir = runtestdir.replace(b'\\', b'/')
1005 1005 proc = Popen4(b'%s -c "%s/hghave %s"' %
1006 1006 (self._shell, tdir, b' '.join(reqs)),
1007 1007 self._testtmp, 0, self._getenv())
1008 1008 stdout, stderr = proc.communicate()
1009 1009 ret = proc.wait()
1010 1010 if wifexited(ret):
1011 1011 ret = os.WEXITSTATUS(ret)
1012 1012 if ret == 2:
1013 1013 print(stdout.decode('utf-8'))
1014 1014 sys.exit(1)
1015 1015
1016 1016 if ret != 0:
1017 1017 return False, stdout
1018 1018
1019 1019 if 'slow' in reqs:
1020 1020 self._timeout = self._slowtimeout
1021 1021 return True, None
1022 1022
1023 1023 def _parsetest(self, lines):
1024 1024 # We generate a shell script which outputs unique markers to line
1025 1025 # up script results with our source. These markers include input
1026 1026 # line number and the last return code.
1027 1027 salt = b"SALT%d" % time.time()
1028 1028 def addsalt(line, inpython):
1029 1029 if inpython:
1030 1030 script.append(b'%s %d 0\n' % (salt, line))
1031 1031 else:
1032 1032 script.append(b'echo %s %d $?\n' % (salt, line))
1033 1033
1034 1034 script = []
1035 1035
1036 1036 # After we run the shell script, we re-unify the script output
1037 1037 # with non-active parts of the source, with synchronization by our
1038 1038 # SALT line number markers. The after table contains the non-active
1039 1039 # components, ordered by line number.
1040 1040 after = {}
1041 1041
1042 1042 # Expected shell script output.
1043 1043 expected = {}
1044 1044
1045 1045 pos = prepos = -1
1046 1046
1047 1047 # True or False when in a true or false conditional section
1048 1048 skipping = None
1049 1049
1050 1050 # We keep track of whether or not we're in a Python block so we
1051 1051 # can generate the surrounding doctest magic.
1052 1052 inpython = False
1053 1053
1054 1054 if self._debug:
1055 1055 script.append(b'set -x\n')
1056 1056 if self._hgcommand != b'hg':
1057 1057 script.append(b'alias hg="%s"\n' % self._hgcommand)
1058 1058 if os.getenv('MSYSTEM'):
1059 1059 script.append(b'alias pwd="pwd -W"\n')
1060 1060
1061 1061 n = 0
1062 1062 for n, l in enumerate(lines):
1063 1063 if not l.endswith(b'\n'):
1064 1064 l += b'\n'
1065 1065 if l.startswith(b'#require'):
1066 1066 lsplit = l.split()
1067 1067 if len(lsplit) < 2 or lsplit[0] != b'#require':
1068 1068 after.setdefault(pos, []).append(' !!! invalid #require\n')
1069 1069 haveresult, message = self._hghave(lsplit[1:])
1070 1070 if not haveresult:
1071 1071 script = [b'echo "%s"\nexit 80\n' % message]
1072 1072 break
1073 1073 after.setdefault(pos, []).append(l)
1074 1074 elif l.startswith(b'#if'):
1075 1075 lsplit = l.split()
1076 1076 if len(lsplit) < 2 or lsplit[0] != b'#if':
1077 1077 after.setdefault(pos, []).append(' !!! invalid #if\n')
1078 1078 if skipping is not None:
1079 1079 after.setdefault(pos, []).append(' !!! nested #if\n')
1080 1080 skipping = not self._hghave(lsplit[1:])[0]
1081 1081 after.setdefault(pos, []).append(l)
1082 1082 elif l.startswith(b'#else'):
1083 1083 if skipping is None:
1084 1084 after.setdefault(pos, []).append(' !!! missing #if\n')
1085 1085 skipping = not skipping
1086 1086 after.setdefault(pos, []).append(l)
1087 1087 elif l.startswith(b'#endif'):
1088 1088 if skipping is None:
1089 1089 after.setdefault(pos, []).append(' !!! missing #if\n')
1090 1090 skipping = None
1091 1091 after.setdefault(pos, []).append(l)
1092 1092 elif skipping:
1093 1093 after.setdefault(pos, []).append(l)
1094 1094 elif l.startswith(b' >>> '): # python inlines
1095 1095 after.setdefault(pos, []).append(l)
1096 1096 prepos = pos
1097 1097 pos = n
1098 1098 if not inpython:
1099 1099 # We've just entered a Python block. Add the header.
1100 1100 inpython = True
1101 1101 addsalt(prepos, False) # Make sure we report the exit code.
1102 1102 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1103 1103 addsalt(n, True)
1104 1104 script.append(l[2:])
1105 1105 elif l.startswith(b' ... '): # python inlines
1106 1106 after.setdefault(prepos, []).append(l)
1107 1107 script.append(l[2:])
1108 1108 elif l.startswith(b' $ '): # commands
1109 1109 if inpython:
1110 1110 script.append(b'EOF\n')
1111 1111 inpython = False
1112 1112 after.setdefault(pos, []).append(l)
1113 1113 prepos = pos
1114 1114 pos = n
1115 1115 addsalt(n, False)
1116 1116 cmd = l[4:].split()
1117 1117 if len(cmd) == 2 and cmd[0] == b'cd':
1118 1118 l = b' $ cd %s || exit 1\n' % cmd[1]
1119 1119 script.append(l[4:])
1120 1120 elif l.startswith(b' > '): # continuations
1121 1121 after.setdefault(prepos, []).append(l)
1122 1122 script.append(l[4:])
1123 1123 elif l.startswith(b' '): # results
1124 1124 # Queue up a list of expected results.
1125 1125 expected.setdefault(pos, []).append(l[2:])
1126 1126 else:
1127 1127 if inpython:
1128 1128 script.append(b'EOF\n')
1129 1129 inpython = False
1130 1130 # Non-command/result. Queue up for merged output.
1131 1131 after.setdefault(pos, []).append(l)
1132 1132
1133 1133 if inpython:
1134 1134 script.append(b'EOF\n')
1135 1135 if skipping is not None:
1136 1136 after.setdefault(pos, []).append(' !!! missing #endif\n')
1137 1137 addsalt(n + 1, False)
1138 1138
1139 1139 return salt, script, after, expected
1140 1140
1141 1141 def _processoutput(self, exitcode, output, salt, after, expected):
1142 1142 # Merge the script output back into a unified test.
1143 1143 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1144 1144 if exitcode != 0:
1145 1145 warnonly = 3
1146 1146
1147 1147 pos = -1
1148 1148 postout = []
1149 1149 for l in output:
1150 1150 lout, lcmd = l, None
1151 1151 if salt in l:
1152 1152 lout, lcmd = l.split(salt, 1)
1153 1153
1154 1154 while lout:
1155 1155 if not lout.endswith(b'\n'):
1156 1156 lout += b' (no-eol)\n'
1157 1157
1158 1158 # Find the expected output at the current position.
1159 1159 els = [None]
1160 1160 if expected.get(pos, None):
1161 1161 els = expected[pos]
1162 1162
1163 1163 i = 0
1164 1164 optional = []
1165 1165 while i < len(els):
1166 1166 el = els[i]
1167 1167
1168 1168 r = TTest.linematch(el, lout)
1169 1169 if isinstance(r, str):
1170 1170 if r == '+glob':
1171 1171 lout = el[:-1] + ' (glob)\n'
1172 1172 r = '' # Warn only this line.
1173 1173 elif r == '-glob':
1174 1174 lout = ''.join(el.rsplit(' (glob)', 1))
1175 1175 r = '' # Warn only this line.
1176 1176 elif r == "retry":
1177 1177 postout.append(b' ' + el)
1178 1178 els.pop(i)
1179 1179 break
1180 1180 else:
1181 1181 log('\ninfo, unknown linematch result: %r\n' % r)
1182 1182 r = False
1183 1183 if r:
1184 1184 els.pop(i)
1185 1185 break
1186 1186 if el and el.endswith(b" (?)\n"):
1187 1187 optional.append(i)
1188 1188 i += 1
1189 1189
1190 1190 if r:
1191 1191 if r == "retry":
1192 1192 continue
1193 1193 # clean up any optional leftovers
1194 1194 for i in optional:
1195 1195 postout.append(b' ' + els[i])
1196 1196 for i in reversed(optional):
1197 1197 del els[i]
1198 1198 postout.append(b' ' + el)
1199 1199 else:
1200 1200 if self.NEEDESCAPE(lout):
1201 1201 lout = TTest._stringescape(b'%s (esc)\n' %
1202 1202 lout.rstrip(b'\n'))
1203 1203 postout.append(b' ' + lout) # Let diff deal with it.
1204 1204 if r != '': # If line failed.
1205 1205 warnonly = 3 # for sure not
1206 1206 elif warnonly == 1: # Is "not yet" and line is warn only.
1207 1207 warnonly = 2 # Yes do warn.
1208 1208 break
1209 1209 else:
1210 1210 # clean up any optional leftovers
1211 1211 while expected.get(pos, None):
1212 1212 el = expected[pos].pop(0)
1213 1213 if el and not el.endswith(b" (?)\n"):
1214 1214 break
1215 1215 postout.append(b' ' + el)
1216 1216
1217 1217 if lcmd:
1218 1218 # Add on last return code.
1219 1219 ret = int(lcmd.split()[1])
1220 1220 if ret != 0:
1221 1221 postout.append(b' [%d]\n' % ret)
1222 1222 if pos in after:
1223 1223 # Merge in non-active test bits.
1224 1224 postout += after.pop(pos)
1225 1225 pos = int(lcmd.split()[0])
1226 1226
1227 1227 if pos in after:
1228 1228 postout += after.pop(pos)
1229 1229
1230 1230 if warnonly == 2:
1231 1231 exitcode = False # Set exitcode to warned.
1232 1232
1233 1233 return exitcode, postout
1234 1234
1235 1235 @staticmethod
1236 1236 def rematch(el, l):
1237 1237 try:
1238 1238 # use \Z to ensure that the regex matches to the end of the string
1239 1239 if os.name == 'nt':
1240 1240 return re.match(el + br'\r?\n\Z', l)
1241 1241 return re.match(el + br'\n\Z', l)
1242 1242 except re.error:
1243 1243 # el is an invalid regex
1244 1244 return False
1245 1245
1246 1246 @staticmethod
1247 1247 def globmatch(el, l):
1248 1248 # The only supported special characters are * and ? plus / which also
1249 1249 # matches \ on windows. Escaping of these characters is supported.
1250 1250 if el + b'\n' == l:
1251 1251 if os.altsep:
1252 1252 # matching on "/" is not needed for this line
1253 1253 for pat in checkcodeglobpats:
1254 1254 if pat.match(el):
1255 1255 return True
1256 1256 return b'-glob'
1257 1257 return True
1258 1258 i, n = 0, len(el)
1259 1259 res = b''
1260 1260 while i < n:
1261 1261 c = el[i:i + 1]
1262 1262 i += 1
1263 1263 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1264 1264 res += el[i - 1:i + 1]
1265 1265 i += 1
1266 1266 elif c == b'*':
1267 1267 res += b'.*'
1268 1268 elif c == b'?':
1269 1269 res += b'.'
1270 1270 elif c == b'/' and os.altsep:
1271 1271 res += b'[/\\\\]'
1272 1272 else:
1273 1273 res += re.escape(c)
1274 1274 return TTest.rematch(res, l)
1275 1275
1276 1276 @staticmethod
1277 1277 def linematch(el, l):
1278 1278 retry = False
1279 1279 if el == l: # perfect match (fast)
1280 1280 return True
1281 1281 if el:
1282 1282 if el.endswith(b" (?)\n"):
1283 1283 retry = "retry"
1284 1284 el = el[:-5] + b"\n"
1285 1285 if el.endswith(b" (esc)\n"):
1286 1286 if PYTHON3:
1287 1287 el = el[:-7].decode('unicode_escape') + '\n'
1288 1288 el = el.encode('utf-8')
1289 1289 else:
1290 1290 el = el[:-7].decode('string-escape') + '\n'
1291 1291 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1292 1292 return True
1293 1293 if el.endswith(b" (re)\n"):
1294 1294 return TTest.rematch(el[:-6], l) or retry
1295 1295 if el.endswith(b" (glob)\n"):
1296 1296 # ignore '(glob)' added to l by 'replacements'
1297 1297 if l.endswith(b" (glob)\n"):
1298 1298 l = l[:-8] + b"\n"
1299 1299 return TTest.globmatch(el[:-8], l)
1300 1300 if os.altsep and l.replace(b'\\', b'/') == el:
1301 1301 return b'+glob'
1302 1302 return retry
1303 1303
1304 1304 @staticmethod
1305 1305 def parsehghaveoutput(lines):
1306 1306 '''Parse hghave log lines.
1307 1307
1308 1308 Return tuple of lists (missing, failed):
1309 1309 * the missing/unknown features
1310 1310 * the features for which existence check failed'''
1311 1311 missing = []
1312 1312 failed = []
1313 1313 for line in lines:
1314 1314 if line.startswith(TTest.SKIPPED_PREFIX):
1315 1315 line = line.splitlines()[0]
1316 1316 missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8'))
1317 1317 elif line.startswith(TTest.FAILED_PREFIX):
1318 1318 line = line.splitlines()[0]
1319 1319 failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8'))
1320 1320
1321 1321 return missing, failed
1322 1322
1323 1323 @staticmethod
1324 1324 def _escapef(m):
1325 1325 return TTest.ESCAPEMAP[m.group(0)]
1326 1326
1327 1327 @staticmethod
1328 1328 def _stringescape(s):
1329 1329 return TTest.ESCAPESUB(TTest._escapef, s)
1330 1330
1331 1331 iolock = threading.RLock()
1332 1332
1333 1333 class SkipTest(Exception):
1334 1334 """Raised to indicate that a test is to be skipped."""
1335 1335
1336 1336 class IgnoreTest(Exception):
1337 1337 """Raised to indicate that a test is to be ignored."""
1338 1338
1339 1339 class WarnTest(Exception):
1340 1340 """Raised to indicate that a test warned."""
1341 1341
1342 1342 class ReportedTest(Exception):
1343 1343 """Raised to indicate that a test already reported."""
1344 1344
1345 1345 class TestResult(unittest._TextTestResult):
1346 1346 """Holds results when executing via unittest."""
1347 1347 # Don't worry too much about accessing the non-public _TextTestResult.
1348 1348 # It is relatively common in Python testing tools.
1349 1349 def __init__(self, options, *args, **kwargs):
1350 1350 super(TestResult, self).__init__(*args, **kwargs)
1351 1351
1352 1352 self._options = options
1353 1353
1354 1354 # unittest.TestResult didn't have skipped until 2.7. We need to
1355 1355 # polyfill it.
1356 1356 self.skipped = []
1357 1357
1358 1358 # We have a custom "ignored" result that isn't present in any Python
1359 1359 # unittest implementation. It is very similar to skipped. It may make
1360 1360 # sense to map it into skip some day.
1361 1361 self.ignored = []
1362 1362
1363 1363 # We have a custom "warned" result that isn't present in any Python
1364 1364 # unittest implementation. It is very similar to failed. It may make
1365 1365 # sense to map it into fail some day.
1366 1366 self.warned = []
1367 1367
1368 1368 self.times = []
1369 1369 self._firststarttime = None
1370 1370 # Data stored for the benefit of generating xunit reports.
1371 1371 self.successes = []
1372 1372 self.faildata = {}
1373 1373
1374 1374 def addFailure(self, test, reason):
1375 1375 self.failures.append((test, reason))
1376 1376
1377 1377 if self._options.first:
1378 1378 self.stop()
1379 1379 else:
1380 1380 with iolock:
1381 1381 if reason == "timed out":
1382 1382 self.stream.write('t')
1383 1383 else:
1384 1384 if not self._options.nodiff:
1385 1385 self.stream.write('\nERROR: %s output changed\n' % test)
1386 1386 self.stream.write('!')
1387 1387
1388 1388 self.stream.flush()
1389 1389
1390 1390 def addSuccess(self, test):
1391 1391 with iolock:
1392 1392 super(TestResult, self).addSuccess(test)
1393 1393 self.successes.append(test)
1394 1394
1395 1395 def addError(self, test, err):
1396 1396 super(TestResult, self).addError(test, err)
1397 1397 if self._options.first:
1398 1398 self.stop()
1399 1399
1400 1400 # Polyfill.
1401 1401 def addSkip(self, test, reason):
1402 1402 self.skipped.append((test, reason))
1403 1403 with iolock:
1404 1404 if self.showAll:
1405 1405 self.stream.writeln('skipped %s' % reason)
1406 1406 else:
1407 1407 self.stream.write('s')
1408 1408 self.stream.flush()
1409 1409
1410 1410 def addIgnore(self, test, reason):
1411 1411 self.ignored.append((test, reason))
1412 1412 with iolock:
1413 1413 if self.showAll:
1414 1414 self.stream.writeln('ignored %s' % reason)
1415 1415 else:
1416 1416 if reason not in ('not retesting', "doesn't match keyword"):
1417 1417 self.stream.write('i')
1418 1418 else:
1419 1419 self.testsRun += 1
1420 1420 self.stream.flush()
1421 1421
1422 1422 def addWarn(self, test, reason):
1423 1423 self.warned.append((test, reason))
1424 1424
1425 1425 if self._options.first:
1426 1426 self.stop()
1427 1427
1428 1428 with iolock:
1429 1429 if self.showAll:
1430 1430 self.stream.writeln('warned %s' % reason)
1431 1431 else:
1432 1432 self.stream.write('~')
1433 1433 self.stream.flush()
1434 1434
1435 1435 def addOutputMismatch(self, test, ret, got, expected):
1436 1436 """Record a mismatch in test output for a particular test."""
1437 1437 if self.shouldStop:
1438 1438 # don't print, some other test case already failed and
1439 1439 # printed, we're just stale and probably failed due to our
1440 1440 # temp dir getting cleaned up.
1441 1441 return
1442 1442
1443 1443 accepted = False
1444 1444 lines = []
1445 1445
1446 1446 with iolock:
1447 1447 if self._options.nodiff:
1448 1448 pass
1449 1449 elif self._options.view:
1450 1450 v = self._options.view
1451 1451 if PYTHON3:
1452 1452 v = _bytespath(v)
1453 1453 os.system(b"%s %s %s" %
1454 1454 (v, test.refpath, test.errpath))
1455 1455 else:
1456 1456 servefail, lines = getdiff(expected, got,
1457 1457 test.refpath, test.errpath)
1458 1458 if servefail:
1459 1459 self.addFailure(
1460 1460 test,
1461 1461 'server failed to start (HGPORT=%s)' % test._startport)
1462 1462 raise ReportedTest('server failed to start')
1463 1463 else:
1464 1464 self.stream.write('\n')
1465 1465 for line in lines:
1466 1466 if PYTHON3:
1467 1467 self.stream.flush()
1468 1468 self.stream.buffer.write(line)
1469 1469 self.stream.buffer.flush()
1470 1470 else:
1471 1471 self.stream.write(line)
1472 1472 self.stream.flush()
1473 1473
1474 1474 # handle interactive prompt without releasing iolock
1475 1475 if self._options.interactive:
1476 1476 self.stream.write('Accept this change? [n] ')
1477 1477 answer = sys.stdin.readline().strip()
1478 1478 if answer.lower() in ('y', 'yes'):
1479 1479 if test.name.endswith('.t'):
1480 1480 rename(test.errpath, test.path)
1481 1481 else:
1482 1482 rename(test.errpath, '%s.out' % test.path)
1483 1483 accepted = True
1484 1484 if not accepted:
1485 1485 self.faildata[test.name] = b''.join(lines)
1486 1486
1487 1487 return accepted
1488 1488
1489 1489 def startTest(self, test):
1490 1490 super(TestResult, self).startTest(test)
1491 1491
1492 1492 # os.times module computes the user time and system time spent by
1493 1493 # child's processes along with real elapsed time taken by a process.
1494 1494 # This module has one limitation. It can only work for Linux user
1495 1495 # and not for Windows.
1496 1496 test.started = os.times()
1497 1497 if self._firststarttime is None: # thread racy but irrelevant
1498 1498 self._firststarttime = test.started[4]
1499 1499
1500 1500 def stopTest(self, test, interrupted=False):
1501 1501 super(TestResult, self).stopTest(test)
1502 1502
1503 1503 test.stopped = os.times()
1504 1504
1505 1505 starttime = test.started
1506 1506 endtime = test.stopped
1507 1507 origin = self._firststarttime
1508 1508 self.times.append((test.name,
1509 1509 endtime[2] - starttime[2], # user space CPU time
1510 1510 endtime[3] - starttime[3], # sys space CPU time
1511 1511 endtime[4] - starttime[4], # real time
1512 1512 starttime[4] - origin, # start date in run context
1513 1513 endtime[4] - origin, # end date in run context
1514 1514 ))
1515 1515
1516 1516 if interrupted:
1517 1517 with iolock:
1518 1518 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1519 1519 test.name, self.times[-1][3]))
1520 1520
1521 1521 class TestSuite(unittest.TestSuite):
1522 1522 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1523 1523
1524 1524 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1525 1525 retest=False, keywords=None, loop=False, runs_per_test=1,
1526 1526 loadtest=None, showchannels=False,
1527 1527 *args, **kwargs):
1528 1528 """Create a new instance that can run tests with a configuration.
1529 1529
1530 1530 testdir specifies the directory where tests are executed from. This
1531 1531 is typically the ``tests`` directory from Mercurial's source
1532 1532 repository.
1533 1533
1534 1534 jobs specifies the number of jobs to run concurrently. Each test
1535 1535 executes on its own thread. Tests actually spawn new processes, so
1536 1536 state mutation should not be an issue.
1537 1537
1538 1538 If there is only one job, it will use the main thread.
1539 1539
1540 1540 whitelist and blacklist denote tests that have been whitelisted and
1541 1541 blacklisted, respectively. These arguments don't belong in TestSuite.
1542 1542 Instead, whitelist and blacklist should be handled by the thing that
1543 1543 populates the TestSuite with tests. They are present to preserve
1544 1544 backwards compatible behavior which reports skipped tests as part
1545 1545 of the results.
1546 1546
1547 1547 retest denotes whether to retest failed tests. This arguably belongs
1548 1548 outside of TestSuite.
1549 1549
1550 1550 keywords denotes key words that will be used to filter which tests
1551 1551 to execute. This arguably belongs outside of TestSuite.
1552 1552
1553 1553 loop denotes whether to loop over tests forever.
1554 1554 """
1555 1555 super(TestSuite, self).__init__(*args, **kwargs)
1556 1556
1557 1557 self._jobs = jobs
1558 1558 self._whitelist = whitelist
1559 1559 self._blacklist = blacklist
1560 1560 self._retest = retest
1561 1561 self._keywords = keywords
1562 1562 self._loop = loop
1563 1563 self._runs_per_test = runs_per_test
1564 1564 self._loadtest = loadtest
1565 1565 self._showchannels = showchannels
1566 1566
1567 1567 def run(self, result):
1568 1568 # We have a number of filters that need to be applied. We do this
1569 1569 # here instead of inside Test because it makes the running logic for
1570 1570 # Test simpler.
1571 1571 tests = []
1572 1572 num_tests = [0]
1573 1573 for test in self._tests:
1574 1574 def get():
1575 1575 num_tests[0] += 1
1576 1576 if getattr(test, 'should_reload', False):
1577 1577 return self._loadtest(test.path, num_tests[0])
1578 1578 return test
1579 1579 if not os.path.exists(test.path):
1580 1580 result.addSkip(test, "Doesn't exist")
1581 1581 continue
1582 1582
1583 1583 if not (self._whitelist and test.name in self._whitelist):
1584 1584 if self._blacklist and test.bname in self._blacklist:
1585 1585 result.addSkip(test, 'blacklisted')
1586 1586 continue
1587 1587
1588 1588 if self._retest and not os.path.exists(test.errpath):
1589 1589 result.addIgnore(test, 'not retesting')
1590 1590 continue
1591 1591
1592 1592 if self._keywords:
1593 1593 f = open(test.path, 'rb')
1594 1594 t = f.read().lower() + test.bname.lower()
1595 1595 f.close()
1596 1596 ignored = False
1597 1597 for k in self._keywords.lower().split():
1598 1598 if k not in t:
1599 1599 result.addIgnore(test, "doesn't match keyword")
1600 1600 ignored = True
1601 1601 break
1602 1602
1603 1603 if ignored:
1604 1604 continue
1605 1605 for _ in xrange(self._runs_per_test):
1606 1606 tests.append(get())
1607 1607
1608 1608 runtests = list(tests)
1609 1609 done = queue.Queue()
1610 1610 running = 0
1611 1611
1612 1612 channels = [""] * self._jobs
1613 1613
1614 1614 def job(test, result):
1615 1615 for n, v in enumerate(channels):
1616 1616 if not v:
1617 1617 channel = n
1618 1618 break
1619 1619 channels[channel] = "=" + test.name[5:].split(".")[0]
1620 1620 try:
1621 1621 test(result)
1622 1622 done.put(None)
1623 1623 except KeyboardInterrupt:
1624 1624 pass
1625 1625 except: # re-raises
1626 1626 done.put(('!', test, 'run-test raised an error, see traceback'))
1627 1627 raise
1628 1628 try:
1629 1629 channels[channel] = ''
1630 1630 except IndexError:
1631 1631 pass
1632 1632
1633 1633 def stat():
1634 1634 count = 0
1635 1635 while channels:
1636 1636 d = '\n%03s ' % count
1637 1637 for n, v in enumerate(channels):
1638 1638 if v:
1639 1639 d += v[0]
1640 1640 channels[n] = v[1:] or '.'
1641 1641 else:
1642 1642 d += ' '
1643 1643 d += ' '
1644 1644 with iolock:
1645 1645 sys.stdout.write(d + ' ')
1646 1646 sys.stdout.flush()
1647 1647 for x in xrange(10):
1648 1648 if channels:
1649 1649 time.sleep(.1)
1650 1650 count += 1
1651 1651
1652 1652 stoppedearly = False
1653 1653
1654 1654 if self._showchannels:
1655 1655 statthread = threading.Thread(target=stat, name="stat")
1656 1656 statthread.start()
1657 1657
1658 1658 try:
1659 1659 while tests or running:
1660 1660 if not done.empty() or running == self._jobs or not tests:
1661 1661 try:
1662 1662 done.get(True, 1)
1663 1663 running -= 1
1664 1664 if result and result.shouldStop:
1665 1665 stoppedearly = True
1666 1666 break
1667 1667 except queue.Empty:
1668 1668 continue
1669 1669 if tests and not running == self._jobs:
1670 1670 test = tests.pop(0)
1671 1671 if self._loop:
1672 1672 if getattr(test, 'should_reload', False):
1673 1673 num_tests[0] += 1
1674 1674 tests.append(
1675 1675 self._loadtest(test.name, num_tests[0]))
1676 1676 else:
1677 1677 tests.append(test)
1678 1678 if self._jobs == 1:
1679 1679 job(test, result)
1680 1680 else:
1681 1681 t = threading.Thread(target=job, name=test.name,
1682 1682 args=(test, result))
1683 1683 t.start()
1684 1684 running += 1
1685 1685
1686 1686 # If we stop early we still need to wait on started tests to
1687 1687 # finish. Otherwise, there is a race between the test completing
1688 1688 # and the test's cleanup code running. This could result in the
1689 1689 # test reporting incorrect.
1690 1690 if stoppedearly:
1691 1691 while running:
1692 1692 try:
1693 1693 done.get(True, 1)
1694 1694 running -= 1
1695 1695 except queue.Empty:
1696 1696 continue
1697 1697 except KeyboardInterrupt:
1698 1698 for test in runtests:
1699 1699 test.abort()
1700 1700
1701 1701 channels = []
1702 1702
1703 1703 return result
1704 1704
1705 1705 # Save the most recent 5 wall-clock runtimes of each test to a
1706 1706 # human-readable text file named .testtimes. Tests are sorted
1707 1707 # alphabetically, while times for each test are listed from oldest to
1708 1708 # newest.
1709 1709
1710 1710 def loadtimes(testdir):
1711 1711 times = []
1712 1712 try:
1713 1713 with open(os.path.join(testdir, b'.testtimes-')) as fp:
1714 1714 for line in fp:
1715 1715 ts = line.split()
1716 1716 times.append((ts[0], [float(t) for t in ts[1:]]))
1717 1717 except IOError as err:
1718 1718 if err.errno != errno.ENOENT:
1719 1719 raise
1720 1720 return times
1721 1721
1722 1722 def savetimes(testdir, result):
1723 1723 saved = dict(loadtimes(testdir))
1724 1724 maxruns = 5
1725 1725 skipped = set([str(t[0]) for t in result.skipped])
1726 1726 for tdata in result.times:
1727 1727 test, real = tdata[0], tdata[3]
1728 1728 if test not in skipped:
1729 1729 ts = saved.setdefault(test, [])
1730 1730 ts.append(real)
1731 1731 ts[:] = ts[-maxruns:]
1732 1732
1733 1733 fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes',
1734 1734 dir=testdir, text=True)
1735 1735 with os.fdopen(fd, 'w') as fp:
1736 1736 for name, ts in sorted(saved.items()):
1737 1737 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
1738 1738 timepath = os.path.join(testdir, b'.testtimes')
1739 1739 try:
1740 1740 os.unlink(timepath)
1741 1741 except OSError:
1742 1742 pass
1743 1743 try:
1744 1744 os.rename(tmpname, timepath)
1745 1745 except OSError:
1746 1746 pass
1747 1747
1748 1748 class TextTestRunner(unittest.TextTestRunner):
1749 1749 """Custom unittest test runner that uses appropriate settings."""
1750 1750
1751 1751 def __init__(self, runner, *args, **kwargs):
1752 1752 super(TextTestRunner, self).__init__(*args, **kwargs)
1753 1753
1754 1754 self._runner = runner
1755 1755
1756 1756 def run(self, test):
1757 1757 result = TestResult(self._runner.options, self.stream,
1758 1758 self.descriptions, self.verbosity)
1759 1759
1760 1760 test(result)
1761 1761
1762 1762 failed = len(result.failures)
1763 1763 warned = len(result.warned)
1764 1764 skipped = len(result.skipped)
1765 1765 ignored = len(result.ignored)
1766 1766
1767 1767 with iolock:
1768 1768 self.stream.writeln('')
1769 1769
1770 1770 if not self._runner.options.noskips:
1771 1771 for test, msg in result.skipped:
1772 1772 self.stream.writeln('Skipped %s: %s' % (test.name, msg))
1773 1773 for test, msg in result.warned:
1774 1774 self.stream.writeln('Warned %s: %s' % (test.name, msg))
1775 1775 for test, msg in result.failures:
1776 1776 self.stream.writeln('Failed %s: %s' % (test.name, msg))
1777 1777 for test, msg in result.errors:
1778 1778 self.stream.writeln('Errored %s: %s' % (test.name, msg))
1779 1779
1780 1780 if self._runner.options.xunit:
1781 1781 with open(self._runner.options.xunit, 'wb') as xuf:
1782 1782 timesd = dict((t[0], t[3]) for t in result.times)
1783 1783 doc = minidom.Document()
1784 1784 s = doc.createElement('testsuite')
1785 1785 s.setAttribute('name', 'run-tests')
1786 1786 s.setAttribute('tests', str(result.testsRun))
1787 1787 s.setAttribute('errors', "0") # TODO
1788 1788 s.setAttribute('failures', str(failed))
1789 1789 s.setAttribute('skipped', str(skipped + ignored))
1790 1790 doc.appendChild(s)
1791 1791 for tc in result.successes:
1792 1792 t = doc.createElement('testcase')
1793 1793 t.setAttribute('name', tc.name)
1794 1794 t.setAttribute('time', '%.3f' % timesd[tc.name])
1795 1795 s.appendChild(t)
1796 1796 for tc, err in sorted(result.faildata.items()):
1797 1797 t = doc.createElement('testcase')
1798 1798 t.setAttribute('name', tc)
1799 1799 t.setAttribute('time', '%.3f' % timesd[tc])
1800 1800 # createCDATASection expects a unicode or it will
1801 1801 # convert using default conversion rules, which will
1802 1802 # fail if string isn't ASCII.
1803 1803 err = cdatasafe(err).decode('utf-8', 'replace')
1804 1804 cd = doc.createCDATASection(err)
1805 1805 t.appendChild(cd)
1806 1806 s.appendChild(t)
1807 1807 xuf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
1808 1808
1809 1809 if self._runner.options.json:
1810 1810 jsonpath = os.path.join(self._runner._testdir, b'report.json')
1811 1811 with open(jsonpath, 'w') as fp:
1812 1812 timesd = {}
1813 1813 for tdata in result.times:
1814 1814 test = tdata[0]
1815 1815 timesd[test] = tdata[1:]
1816 1816
1817 1817 outcome = {}
1818 1818 groups = [('success', ((tc, None)
1819 1819 for tc in result.successes)),
1820 1820 ('failure', result.failures),
1821 1821 ('skip', result.skipped)]
1822 1822 for res, testcases in groups:
1823 1823 for tc, __ in testcases:
1824 1824 if tc.name in timesd:
1825 1825 diff = result.faildata.get(tc.name, b'')
1826 1826 tres = {'result': res,
1827 1827 'time': ('%0.3f' % timesd[tc.name][2]),
1828 1828 'cuser': ('%0.3f' % timesd[tc.name][0]),
1829 1829 'csys': ('%0.3f' % timesd[tc.name][1]),
1830 1830 'start': ('%0.3f' % timesd[tc.name][3]),
1831 1831 'end': ('%0.3f' % timesd[tc.name][4]),
1832 1832 'diff': diff.decode('unicode_escape'),
1833 1833 }
1834 1834 else:
1835 1835 # blacklisted test
1836 1836 tres = {'result': res}
1837 1837
1838 1838 outcome[tc.name] = tres
1839 jsonout = json.dumps(outcome, sort_keys=True, indent=4)
1839 out = json.dumps(outcome, sort_keys=True, indent=4)
1840 jsonout = '\n'.join([l.rstrip() for l in out.splitlines()])
1840 1841 fp.writelines(("testreport =", jsonout))
1841 1842
1842 1843 self._runner._checkhglib('Tested')
1843 1844
1844 1845 savetimes(self._runner._testdir, result)
1845 1846
1846 1847 if failed and self._runner.options.known_good_rev:
1847 1848 def nooutput(args):
1848 1849 p = subprocess.Popen(args, stderr=subprocess.STDOUT,
1849 1850 stdout=subprocess.PIPE)
1850 1851 p.stdout.read()
1851 1852 p.wait()
1852 1853 for test, msg in result.failures:
1853 1854 nooutput(['hg', 'bisect', '--reset']),
1854 1855 nooutput(['hg', 'bisect', '--bad', '.'])
1855 1856 nooutput(['hg', 'bisect', '--good',
1856 1857 self._runner.options.known_good_rev])
1857 1858 # TODO: we probably need to forward some options
1858 1859 # that alter hg's behavior inside the tests.
1859 1860 rtc = '%s %s %s' % (sys.executable, sys.argv[0], test)
1860 1861 sub = subprocess.Popen(['hg', 'bisect', '--command', rtc],
1861 1862 stderr=subprocess.STDOUT,
1862 1863 stdout=subprocess.PIPE)
1863 1864 data = sub.stdout.read()
1864 1865 sub.wait()
1865 1866 m = re.search(
1866 1867 (r'\nThe first (?P<goodbad>bad|good) revision '
1867 1868 r'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
1868 1869 r'summary: +(?P<summary>[^\n]+)\n'),
1869 1870 data, (re.MULTILINE | re.DOTALL))
1870 1871 if m is None:
1871 1872 self.stream.writeln(
1872 1873 'Failed to identify failure point for %s' % test)
1873 1874 continue
1874 1875 dat = m.groupdict()
1875 1876 verb = 'broken' if dat['goodbad'] == 'bad' else 'fixed'
1876 1877 self.stream.writeln(
1877 1878 '%s %s by %s (%s)' % (
1878 1879 test, verb, dat['node'], dat['summary']))
1879 1880 self.stream.writeln(
1880 1881 '# Ran %d tests, %d skipped, %d warned, %d failed.'
1881 1882 % (result.testsRun,
1882 1883 skipped + ignored, warned, failed))
1883 1884 if failed:
1884 1885 self.stream.writeln('python hash seed: %s' %
1885 1886 os.environ['PYTHONHASHSEED'])
1886 1887 if self._runner.options.time:
1887 1888 self.printtimes(result.times)
1888 1889
1889 1890 return result
1890 1891
1891 1892 def printtimes(self, times):
1892 1893 # iolock held by run
1893 1894 self.stream.writeln('# Producing time report')
1894 1895 times.sort(key=lambda t: (t[3]))
1895 1896 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
1896 1897 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
1897 1898 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
1898 1899 for tdata in times:
1899 1900 test = tdata[0]
1900 1901 cuser, csys, real, start, end = tdata[1:6]
1901 1902 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
1902 1903
1903 1904 class TestRunner(object):
1904 1905 """Holds context for executing tests.
1905 1906
1906 1907 Tests rely on a lot of state. This object holds it for them.
1907 1908 """
1908 1909
1909 1910 # Programs required to run tests.
1910 1911 REQUIREDTOOLS = [
1911 1912 os.path.basename(_bytespath(sys.executable)),
1912 1913 b'diff',
1913 1914 b'grep',
1914 1915 b'unzip',
1915 1916 b'gunzip',
1916 1917 b'bunzip2',
1917 1918 b'sed',
1918 1919 ]
1919 1920
1920 1921 # Maps file extensions to test class.
1921 1922 TESTTYPES = [
1922 1923 (b'.py', PythonTest),
1923 1924 (b'.t', TTest),
1924 1925 ]
1925 1926
1926 1927 def __init__(self):
1927 1928 self.options = None
1928 1929 self._hgroot = None
1929 1930 self._testdir = None
1930 1931 self._hgtmp = None
1931 1932 self._installdir = None
1932 1933 self._bindir = None
1933 1934 self._tmpbinddir = None
1934 1935 self._pythondir = None
1935 1936 self._coveragefile = None
1936 1937 self._createdfiles = []
1937 1938 self._hgcommand = None
1938 1939 self._hgpath = None
1939 1940 self._portoffset = 0
1940 1941 self._ports = {}
1941 1942
1942 1943 def run(self, args, parser=None):
1943 1944 """Run the test suite."""
1944 1945 oldmask = os.umask(0o22)
1945 1946 try:
1946 1947 parser = parser or getparser()
1947 1948 options, args = parseargs(args, parser)
1948 1949 # positional arguments are paths to test files to run, so
1949 1950 # we make sure they're all bytestrings
1950 1951 args = [_bytespath(a) for a in args]
1951 1952 self.options = options
1952 1953
1953 1954 self._checktools()
1954 1955 tests = self.findtests(args)
1955 1956 if options.profile_runner:
1956 1957 import statprof
1957 1958 statprof.start()
1958 1959 result = self._run(tests)
1959 1960 if options.profile_runner:
1960 1961 statprof.stop()
1961 1962 statprof.display()
1962 1963 return result
1963 1964
1964 1965 finally:
1965 1966 os.umask(oldmask)
1966 1967
1967 1968 def _run(self, tests):
1968 1969 if self.options.random:
1969 1970 random.shuffle(tests)
1970 1971 else:
1971 1972 # keywords for slow tests
1972 1973 slow = {b'svn': 10,
1973 1974 b'cvs': 10,
1974 1975 b'hghave': 10,
1975 1976 b'largefiles-update': 10,
1976 1977 b'run-tests': 10,
1977 1978 b'corruption': 10,
1978 1979 b'race': 10,
1979 1980 b'i18n': 10,
1980 1981 b'check': 100,
1981 1982 b'gendoc': 100,
1982 1983 b'contrib-perf': 200,
1983 1984 }
1984 1985 perf = {}
1985 1986 def sortkey(f):
1986 1987 # run largest tests first, as they tend to take the longest
1987 1988 try:
1988 1989 return perf[f]
1989 1990 except KeyError:
1990 1991 try:
1991 1992 val = -os.stat(f).st_size
1992 1993 except OSError as e:
1993 1994 if e.errno != errno.ENOENT:
1994 1995 raise
1995 1996 perf[f] = -1e9 # file does not exist, tell early
1996 1997 return -1e9
1997 1998 for kw, mul in slow.items():
1998 1999 if kw in f:
1999 2000 val *= mul
2000 2001 if f.endswith(b'.py'):
2001 2002 val /= 10.0
2002 2003 perf[f] = val / 1000.0
2003 2004 return perf[f]
2004 2005 tests.sort(key=sortkey)
2005 2006
2006 2007 self._testdir = osenvironb[b'TESTDIR'] = getattr(
2007 2008 os, 'getcwdb', os.getcwd)()
2008 2009
2009 2010 if 'PYTHONHASHSEED' not in os.environ:
2010 2011 # use a random python hash seed all the time
2011 2012 # we do the randomness ourself to know what seed is used
2012 2013 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2013 2014
2014 2015 if self.options.tmpdir:
2015 2016 self.options.keep_tmpdir = True
2016 2017 tmpdir = _bytespath(self.options.tmpdir)
2017 2018 if os.path.exists(tmpdir):
2018 2019 # Meaning of tmpdir has changed since 1.3: we used to create
2019 2020 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2020 2021 # tmpdir already exists.
2021 2022 print("error: temp dir %r already exists" % tmpdir)
2022 2023 return 1
2023 2024
2024 2025 # Automatically removing tmpdir sounds convenient, but could
2025 2026 # really annoy anyone in the habit of using "--tmpdir=/tmp"
2026 2027 # or "--tmpdir=$HOME".
2027 2028 #vlog("# Removing temp dir", tmpdir)
2028 2029 #shutil.rmtree(tmpdir)
2029 2030 os.makedirs(tmpdir)
2030 2031 else:
2031 2032 d = None
2032 2033 if os.name == 'nt':
2033 2034 # without this, we get the default temp dir location, but
2034 2035 # in all lowercase, which causes troubles with paths (issue3490)
2035 2036 d = osenvironb.get(b'TMP', None)
2036 2037 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2037 2038
2038 2039 self._hgtmp = osenvironb[b'HGTMP'] = (
2039 2040 os.path.realpath(tmpdir))
2040 2041
2041 2042 if self.options.with_hg:
2042 2043 self._installdir = None
2043 2044 whg = self.options.with_hg
2044 2045 self._bindir = os.path.dirname(os.path.realpath(whg))
2045 2046 assert isinstance(self._bindir, bytes)
2046 2047 self._hgcommand = os.path.basename(whg)
2047 2048 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
2048 2049 os.makedirs(self._tmpbindir)
2049 2050
2050 2051 # This looks redundant with how Python initializes sys.path from
2051 2052 # the location of the script being executed. Needed because the
2052 2053 # "hg" specified by --with-hg is not the only Python script
2053 2054 # executed in the test suite that needs to import 'mercurial'
2054 2055 # ... which means it's not really redundant at all.
2055 2056 self._pythondir = self._bindir
2056 2057 else:
2057 2058 self._installdir = os.path.join(self._hgtmp, b"install")
2058 2059 self._bindir = os.path.join(self._installdir, b"bin")
2059 2060 self._hgcommand = b'hg'
2060 2061 self._tmpbindir = self._bindir
2061 2062 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
2062 2063
2063 2064 # set CHGHG, then replace "hg" command by "chg"
2064 2065 chgbindir = self._bindir
2065 2066 if self.options.chg or self.options.with_chg:
2066 2067 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
2067 2068 else:
2068 2069 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
2069 2070 if self.options.chg:
2070 2071 self._hgcommand = b'chg'
2071 2072 elif self.options.with_chg:
2072 2073 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
2073 2074 self._hgcommand = os.path.basename(self.options.with_chg)
2074 2075
2075 2076 osenvironb[b"BINDIR"] = self._bindir
2076 2077 osenvironb[b"PYTHON"] = PYTHON
2077 2078
2078 2079 if self.options.with_python3:
2079 2080 osenvironb[b'PYTHON3'] = self.options.with_python3
2080 2081
2081 2082 fileb = _bytespath(__file__)
2082 2083 runtestdir = os.path.abspath(os.path.dirname(fileb))
2083 2084 osenvironb[b'RUNTESTDIR'] = runtestdir
2084 2085 if PYTHON3:
2085 2086 sepb = _bytespath(os.pathsep)
2086 2087 else:
2087 2088 sepb = os.pathsep
2088 2089 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
2089 2090 if os.path.islink(__file__):
2090 2091 # test helper will likely be at the end of the symlink
2091 2092 realfile = os.path.realpath(fileb)
2092 2093 realdir = os.path.abspath(os.path.dirname(realfile))
2093 2094 path.insert(2, realdir)
2094 2095 if chgbindir != self._bindir:
2095 2096 path.insert(1, chgbindir)
2096 2097 if self._testdir != runtestdir:
2097 2098 path = [self._testdir] + path
2098 2099 if self._tmpbindir != self._bindir:
2099 2100 path = [self._tmpbindir] + path
2100 2101 osenvironb[b"PATH"] = sepb.join(path)
2101 2102
2102 2103 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
2103 2104 # can run .../tests/run-tests.py test-foo where test-foo
2104 2105 # adds an extension to HGRC. Also include run-test.py directory to
2105 2106 # import modules like heredoctest.
2106 2107 pypath = [self._pythondir, self._testdir, runtestdir]
2107 2108 # We have to augment PYTHONPATH, rather than simply replacing
2108 2109 # it, in case external libraries are only available via current
2109 2110 # PYTHONPATH. (In particular, the Subversion bindings on OS X
2110 2111 # are in /opt/subversion.)
2111 2112 oldpypath = osenvironb.get(IMPL_PATH)
2112 2113 if oldpypath:
2113 2114 pypath.append(oldpypath)
2114 2115 osenvironb[IMPL_PATH] = sepb.join(pypath)
2115 2116
2116 2117 if self.options.pure:
2117 2118 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
2118 2119 os.environ["HGMODULEPOLICY"] = "py"
2119 2120
2120 2121 if self.options.allow_slow_tests:
2121 2122 os.environ["HGTEST_SLOW"] = "slow"
2122 2123 elif 'HGTEST_SLOW' in os.environ:
2123 2124 del os.environ['HGTEST_SLOW']
2124 2125
2125 2126 self._coveragefile = os.path.join(self._testdir, b'.coverage')
2126 2127
2127 2128 vlog("# Using TESTDIR", self._testdir)
2128 2129 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
2129 2130 vlog("# Using HGTMP", self._hgtmp)
2130 2131 vlog("# Using PATH", os.environ["PATH"])
2131 2132 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
2132 2133
2133 2134 try:
2134 2135 return self._runtests(tests) or 0
2135 2136 finally:
2136 2137 time.sleep(.1)
2137 2138 self._cleanup()
2138 2139
2139 2140 def findtests(self, args):
2140 2141 """Finds possible test files from arguments.
2141 2142
2142 2143 If you wish to inject custom tests into the test harness, this would
2143 2144 be a good function to monkeypatch or override in a derived class.
2144 2145 """
2145 2146 if not args:
2146 2147 if self.options.changed:
2147 2148 proc = Popen4('hg st --rev "%s" -man0 .' %
2148 2149 self.options.changed, None, 0)
2149 2150 stdout, stderr = proc.communicate()
2150 2151 args = stdout.strip(b'\0').split(b'\0')
2151 2152 else:
2152 2153 args = os.listdir(b'.')
2153 2154
2154 2155 return [t for t in args
2155 2156 if os.path.basename(t).startswith(b'test-')
2156 2157 and (t.endswith(b'.py') or t.endswith(b'.t'))]
2157 2158
2158 2159 def _runtests(self, tests):
2159 2160 try:
2160 2161 if self._installdir:
2161 2162 self._installhg()
2162 2163 self._checkhglib("Testing")
2163 2164 else:
2164 2165 self._usecorrectpython()
2165 2166 if self.options.chg:
2166 2167 assert self._installdir
2167 2168 self._installchg()
2168 2169
2169 2170 if self.options.restart:
2170 2171 orig = list(tests)
2171 2172 while tests:
2172 2173 if os.path.exists(tests[0] + ".err"):
2173 2174 break
2174 2175 tests.pop(0)
2175 2176 if not tests:
2176 2177 print("running all tests")
2177 2178 tests = orig
2178 2179
2179 2180 tests = [self._gettest(t, i) for i, t in enumerate(tests)]
2180 2181
2181 2182 failed = False
2182 2183 warned = False
2183 2184 kws = self.options.keywords
2184 2185 if kws is not None and PYTHON3:
2185 2186 kws = kws.encode('utf-8')
2186 2187
2187 2188 suite = TestSuite(self._testdir,
2188 2189 jobs=self.options.jobs,
2189 2190 whitelist=self.options.whitelisted,
2190 2191 blacklist=self.options.blacklist,
2191 2192 retest=self.options.retest,
2192 2193 keywords=kws,
2193 2194 loop=self.options.loop,
2194 2195 runs_per_test=self.options.runs_per_test,
2195 2196 showchannels=self.options.showchannels,
2196 2197 tests=tests, loadtest=self._gettest)
2197 2198 verbosity = 1
2198 2199 if self.options.verbose:
2199 2200 verbosity = 2
2200 2201 runner = TextTestRunner(self, verbosity=verbosity)
2201 2202 result = runner.run(suite)
2202 2203
2203 2204 if result.failures:
2204 2205 failed = True
2205 2206 if result.warned:
2206 2207 warned = True
2207 2208
2208 2209 if self.options.anycoverage:
2209 2210 self._outputcoverage()
2210 2211 except KeyboardInterrupt:
2211 2212 failed = True
2212 2213 print("\ninterrupted!")
2213 2214
2214 2215 if failed:
2215 2216 return 1
2216 2217 if warned:
2217 2218 return 80
2218 2219
2219 2220 def _getport(self, count):
2220 2221 port = self._ports.get(count) # do we have a cached entry?
2221 2222 if port is None:
2222 2223 portneeded = 3
2223 2224 # above 100 tries we just give up and let test reports failure
2224 2225 for tries in xrange(100):
2225 2226 allfree = True
2226 2227 port = self.options.port + self._portoffset
2227 2228 for idx in xrange(portneeded):
2228 2229 if not checkportisavailable(port + idx):
2229 2230 allfree = False
2230 2231 break
2231 2232 self._portoffset += portneeded
2232 2233 if allfree:
2233 2234 break
2234 2235 self._ports[count] = port
2235 2236 return port
2236 2237
2237 2238 def _gettest(self, test, count):
2238 2239 """Obtain a Test by looking at its filename.
2239 2240
2240 2241 Returns a Test instance. The Test may not be runnable if it doesn't
2241 2242 map to a known type.
2242 2243 """
2243 2244 lctest = test.lower()
2244 2245 testcls = Test
2245 2246
2246 2247 for ext, cls in self.TESTTYPES:
2247 2248 if lctest.endswith(ext):
2248 2249 testcls = cls
2249 2250 break
2250 2251
2251 2252 refpath = os.path.join(self._testdir, test)
2252 2253 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2253 2254
2254 2255 t = testcls(refpath, tmpdir,
2255 2256 keeptmpdir=self.options.keep_tmpdir,
2256 2257 debug=self.options.debug,
2257 2258 timeout=self.options.timeout,
2258 2259 startport=self._getport(count),
2259 2260 extraconfigopts=self.options.extra_config_opt,
2260 2261 py3kwarnings=self.options.py3k_warnings,
2261 2262 shell=self.options.shell,
2262 2263 hgcommand=self._hgcommand,
2263 2264 usechg=bool(self.options.with_chg or self.options.chg))
2264 2265 t.should_reload = True
2265 2266 return t
2266 2267
2267 2268 def _cleanup(self):
2268 2269 """Clean up state from this test invocation."""
2269 2270 if self.options.keep_tmpdir:
2270 2271 return
2271 2272
2272 2273 vlog("# Cleaning up HGTMP", self._hgtmp)
2273 2274 shutil.rmtree(self._hgtmp, True)
2274 2275 for f in self._createdfiles:
2275 2276 try:
2276 2277 os.remove(f)
2277 2278 except OSError:
2278 2279 pass
2279 2280
2280 2281 def _usecorrectpython(self):
2281 2282 """Configure the environment to use the appropriate Python in tests."""
2282 2283 # Tests must use the same interpreter as us or bad things will happen.
2283 2284 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2284 2285 if getattr(os, 'symlink', None):
2285 2286 vlog("# Making python executable in test path a symlink to '%s'" %
2286 2287 sys.executable)
2287 2288 mypython = os.path.join(self._tmpbindir, pyexename)
2288 2289 try:
2289 2290 if os.readlink(mypython) == sys.executable:
2290 2291 return
2291 2292 os.unlink(mypython)
2292 2293 except OSError as err:
2293 2294 if err.errno != errno.ENOENT:
2294 2295 raise
2295 2296 if self._findprogram(pyexename) != sys.executable:
2296 2297 try:
2297 2298 os.symlink(sys.executable, mypython)
2298 2299 self._createdfiles.append(mypython)
2299 2300 except OSError as err:
2300 2301 # child processes may race, which is harmless
2301 2302 if err.errno != errno.EEXIST:
2302 2303 raise
2303 2304 else:
2304 2305 exedir, exename = os.path.split(sys.executable)
2305 2306 vlog("# Modifying search path to find %s as %s in '%s'" %
2306 2307 (exename, pyexename, exedir))
2307 2308 path = os.environ['PATH'].split(os.pathsep)
2308 2309 while exedir in path:
2309 2310 path.remove(exedir)
2310 2311 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2311 2312 if not self._findprogram(pyexename):
2312 2313 print("WARNING: Cannot find %s in search path" % pyexename)
2313 2314
2314 2315 def _installhg(self):
2315 2316 """Install hg into the test environment.
2316 2317
2317 2318 This will also configure hg with the appropriate testing settings.
2318 2319 """
2319 2320 vlog("# Performing temporary installation of HG")
2320 2321 installerrs = os.path.join(self._hgtmp, b"install.err")
2321 2322 compiler = ''
2322 2323 if self.options.compiler:
2323 2324 compiler = '--compiler ' + self.options.compiler
2324 2325 if self.options.pure:
2325 2326 pure = b"--pure"
2326 2327 else:
2327 2328 pure = b""
2328 2329
2329 2330 # Run installer in hg root
2330 2331 script = os.path.realpath(sys.argv[0])
2331 2332 exe = sys.executable
2332 2333 if PYTHON3:
2333 2334 compiler = _bytespath(compiler)
2334 2335 script = _bytespath(script)
2335 2336 exe = _bytespath(exe)
2336 2337 hgroot = os.path.dirname(os.path.dirname(script))
2337 2338 self._hgroot = hgroot
2338 2339 os.chdir(hgroot)
2339 2340 nohome = b'--home=""'
2340 2341 if os.name == 'nt':
2341 2342 # The --home="" trick works only on OS where os.sep == '/'
2342 2343 # because of a distutils convert_path() fast-path. Avoid it at
2343 2344 # least on Windows for now, deal with .pydistutils.cfg bugs
2344 2345 # when they happen.
2345 2346 nohome = b''
2346 2347 cmd = (b'%(exe)s setup.py %(pure)s clean --all'
2347 2348 b' build %(compiler)s --build-base="%(base)s"'
2348 2349 b' install --force --prefix="%(prefix)s"'
2349 2350 b' --install-lib="%(libdir)s"'
2350 2351 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2351 2352 % {b'exe': exe, b'pure': pure,
2352 2353 b'compiler': compiler,
2353 2354 b'base': os.path.join(self._hgtmp, b"build"),
2354 2355 b'prefix': self._installdir, b'libdir': self._pythondir,
2355 2356 b'bindir': self._bindir,
2356 2357 b'nohome': nohome, b'logfile': installerrs})
2357 2358
2358 2359 # setuptools requires install directories to exist.
2359 2360 def makedirs(p):
2360 2361 try:
2361 2362 os.makedirs(p)
2362 2363 except OSError as e:
2363 2364 if e.errno != errno.EEXIST:
2364 2365 raise
2365 2366 makedirs(self._pythondir)
2366 2367 makedirs(self._bindir)
2367 2368
2368 2369 vlog("# Running", cmd)
2369 2370 if os.system(cmd) == 0:
2370 2371 if not self.options.verbose:
2371 2372 try:
2372 2373 os.remove(installerrs)
2373 2374 except OSError as e:
2374 2375 if e.errno != errno.ENOENT:
2375 2376 raise
2376 2377 else:
2377 2378 f = open(installerrs, 'rb')
2378 2379 for line in f:
2379 2380 if PYTHON3:
2380 2381 sys.stdout.buffer.write(line)
2381 2382 else:
2382 2383 sys.stdout.write(line)
2383 2384 f.close()
2384 2385 sys.exit(1)
2385 2386 os.chdir(self._testdir)
2386 2387
2387 2388 self._usecorrectpython()
2388 2389
2389 2390 if self.options.py3k_warnings and not self.options.anycoverage:
2390 2391 vlog("# Updating hg command to enable Py3k Warnings switch")
2391 2392 f = open(os.path.join(self._bindir, 'hg'), 'rb')
2392 2393 lines = [line.rstrip() for line in f]
2393 2394 lines[0] += ' -3'
2394 2395 f.close()
2395 2396 f = open(os.path.join(self._bindir, 'hg'), 'wb')
2396 2397 for line in lines:
2397 2398 f.write(line + '\n')
2398 2399 f.close()
2399 2400
2400 2401 hgbat = os.path.join(self._bindir, b'hg.bat')
2401 2402 if os.path.isfile(hgbat):
2402 2403 # hg.bat expects to be put in bin/scripts while run-tests.py
2403 2404 # installation layout put it in bin/ directly. Fix it
2404 2405 f = open(hgbat, 'rb')
2405 2406 data = f.read()
2406 2407 f.close()
2407 2408 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2408 2409 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2409 2410 b'"%~dp0python" "%~dp0hg" %*')
2410 2411 f = open(hgbat, 'wb')
2411 2412 f.write(data)
2412 2413 f.close()
2413 2414 else:
2414 2415 print('WARNING: cannot fix hg.bat reference to python.exe')
2415 2416
2416 2417 if self.options.anycoverage:
2417 2418 custom = os.path.join(self._testdir, 'sitecustomize.py')
2418 2419 target = os.path.join(self._pythondir, 'sitecustomize.py')
2419 2420 vlog('# Installing coverage trigger to %s' % target)
2420 2421 shutil.copyfile(custom, target)
2421 2422 rc = os.path.join(self._testdir, '.coveragerc')
2422 2423 vlog('# Installing coverage rc to %s' % rc)
2423 2424 os.environ['COVERAGE_PROCESS_START'] = rc
2424 2425 covdir = os.path.join(self._installdir, '..', 'coverage')
2425 2426 try:
2426 2427 os.mkdir(covdir)
2427 2428 except OSError as e:
2428 2429 if e.errno != errno.EEXIST:
2429 2430 raise
2430 2431
2431 2432 os.environ['COVERAGE_DIR'] = covdir
2432 2433
2433 2434 def _checkhglib(self, verb):
2434 2435 """Ensure that the 'mercurial' package imported by python is
2435 2436 the one we expect it to be. If not, print a warning to stderr."""
2436 2437 if ((self._bindir == self._pythondir) and
2437 2438 (self._bindir != self._tmpbindir)):
2438 2439 # The pythondir has been inferred from --with-hg flag.
2439 2440 # We cannot expect anything sensible here.
2440 2441 return
2441 2442 expecthg = os.path.join(self._pythondir, b'mercurial')
2442 2443 actualhg = self._gethgpath()
2443 2444 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2444 2445 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2445 2446 ' (expected %s)\n'
2446 2447 % (verb, actualhg, expecthg))
2447 2448 def _gethgpath(self):
2448 2449 """Return the path to the mercurial package that is actually found by
2449 2450 the current Python interpreter."""
2450 2451 if self._hgpath is not None:
2451 2452 return self._hgpath
2452 2453
2453 2454 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2454 2455 cmd = cmd % PYTHON
2455 2456 if PYTHON3:
2456 2457 cmd = _strpath(cmd)
2457 2458 pipe = os.popen(cmd)
2458 2459 try:
2459 2460 self._hgpath = _bytespath(pipe.read().strip())
2460 2461 finally:
2461 2462 pipe.close()
2462 2463
2463 2464 return self._hgpath
2464 2465
2465 2466 def _installchg(self):
2466 2467 """Install chg into the test environment"""
2467 2468 vlog('# Performing temporary installation of CHG')
2468 2469 assert os.path.dirname(self._bindir) == self._installdir
2469 2470 assert self._hgroot, 'must be called after _installhg()'
2470 2471 cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"'
2471 2472 % {b'make': 'make', # TODO: switch by option or environment?
2472 2473 b'prefix': self._installdir})
2473 2474 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
2474 2475 vlog("# Running", cmd)
2475 2476 proc = subprocess.Popen(cmd, shell=True, cwd=cwd,
2476 2477 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
2477 2478 stderr=subprocess.STDOUT)
2478 2479 out, _err = proc.communicate()
2479 2480 if proc.returncode != 0:
2480 2481 if PYTHON3:
2481 2482 sys.stdout.buffer.write(out)
2482 2483 else:
2483 2484 sys.stdout.write(out)
2484 2485 sys.exit(1)
2485 2486
2486 2487 def _outputcoverage(self):
2487 2488 """Produce code coverage output."""
2488 2489 from coverage import coverage
2489 2490
2490 2491 vlog('# Producing coverage report')
2491 2492 # chdir is the easiest way to get short, relative paths in the
2492 2493 # output.
2493 2494 os.chdir(self._hgroot)
2494 2495 covdir = os.path.join(self._installdir, '..', 'coverage')
2495 2496 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2496 2497
2497 2498 # Map install directory paths back to source directory.
2498 2499 cov.config.paths['srcdir'] = ['.', self._pythondir]
2499 2500
2500 2501 cov.combine()
2501 2502
2502 2503 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2503 2504 cov.report(ignore_errors=True, omit=omit)
2504 2505
2505 2506 if self.options.htmlcov:
2506 2507 htmldir = os.path.join(self._testdir, 'htmlcov')
2507 2508 cov.html_report(directory=htmldir, omit=omit)
2508 2509 if self.options.annotate:
2509 2510 adir = os.path.join(self._testdir, 'annotated')
2510 2511 if not os.path.isdir(adir):
2511 2512 os.mkdir(adir)
2512 2513 cov.annotate(directory=adir, omit=omit)
2513 2514
2514 2515 def _findprogram(self, program):
2515 2516 """Search PATH for a executable program"""
2516 2517 dpb = _bytespath(os.defpath)
2517 2518 sepb = _bytespath(os.pathsep)
2518 2519 for p in osenvironb.get(b'PATH', dpb).split(sepb):
2519 2520 name = os.path.join(p, program)
2520 2521 if os.name == 'nt' or os.access(name, os.X_OK):
2521 2522 return name
2522 2523 return None
2523 2524
2524 2525 def _checktools(self):
2525 2526 """Ensure tools required to run tests are present."""
2526 2527 for p in self.REQUIREDTOOLS:
2527 2528 if os.name == 'nt' and not p.endswith('.exe'):
2528 2529 p += '.exe'
2529 2530 found = self._findprogram(p)
2530 2531 if found:
2531 2532 vlog("# Found prerequisite", p, "at", found)
2532 2533 else:
2533 2534 print("WARNING: Did not find prerequisite tool: %s " %
2534 2535 p.decode("utf-8"))
2535 2536
2536 2537 if __name__ == '__main__':
2537 2538 runner = TestRunner()
2538 2539
2539 2540 try:
2540 2541 import msvcrt
2541 2542 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
2542 2543 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2543 2544 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
2544 2545 except ImportError:
2545 2546 pass
2546 2547
2547 2548 sys.exit(runner.run(sys.argv[1:]))
@@ -1,850 +1,850 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ unset HGTEST_JOBS
6 6 $ unset HGTEST_TIMEOUT
7 7 $ unset HGTEST_PORT
8 8 $ unset HGTEST_SHELL
9 9
10 10 Smoke test with install
11 11 ============
12 12
13 13 $ run-tests.py $HGTEST_RUN_TESTS_PURE -l
14 14
15 15 # Ran 0 tests, 0 skipped, 0 warned, 0 failed.
16 16
17 17 Define a helper to avoid the install step
18 18 =============
19 19 $ rt()
20 20 > {
21 21 > run-tests.py --with-hg=`which hg` "$@"
22 22 > }
23 23
24 24 error paths
25 25
26 26 #if symlink
27 27 $ ln -s `which true` hg
28 28 $ run-tests.py --with-hg=./hg
29 29 warning: --with-hg should specify an hg script
30 30
31 31 # Ran 0 tests, 0 skipped, 0 warned, 0 failed.
32 32 $ rm hg
33 33 #endif
34 34
35 35 #if execbit
36 36 $ touch hg
37 37 $ run-tests.py --with-hg=./hg
38 38 Usage: run-tests.py [options] [tests]
39 39
40 40 run-tests.py: error: --with-hg must specify an executable hg script
41 41 [2]
42 42 $ rm hg
43 43 #endif
44 44
45 45 an empty test
46 46 =======================
47 47
48 48 $ touch test-empty.t
49 49 $ rt
50 50 .
51 51 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
52 52 $ rm test-empty.t
53 53
54 54 a succesful test
55 55 =======================
56 56
57 57 $ cat > test-success.t << EOF
58 58 > $ echo babar
59 59 > babar
60 60 > $ echo xyzzy
61 61 > never*happens (glob) (?)
62 62 > xyzzy
63 63 > nor this (?)
64 64 > $ printf 'abc\ndef\nxyz\n'
65 65 > 123 (?)
66 66 > abc
67 67 > def (?)
68 68 > 456 (?)
69 69 > xyz
70 70 > EOF
71 71
72 72 $ rt
73 73 .
74 74 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
75 75
76 76 failing test
77 77 ==================
78 78
79 79 test churn with globs
80 80 $ cat > test-failure.t <<EOF
81 81 > $ echo "bar-baz"; echo "bar-bad"
82 82 > bar*bad (glob)
83 83 > bar*baz (glob)
84 84 > EOF
85 85 $ rt test-failure.t
86 86
87 87 --- $TESTTMP/test-failure.t
88 88 +++ $TESTTMP/test-failure.t.err
89 89 @@ -1,3 +1,3 @@
90 90 $ echo "bar-baz"; echo "bar-bad"
91 91 + bar*baz (glob)
92 92 bar*bad (glob)
93 93 - bar*baz (glob)
94 94
95 95 ERROR: test-failure.t output changed
96 96 !
97 97 Failed test-failure.t: output changed
98 98 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
99 99 python hash seed: * (glob)
100 100 [1]
101 101
102 102 basic failing test
103 103 $ cat > test-failure.t << EOF
104 104 > $ echo babar
105 105 > rataxes
106 106 > This is a noop statement so that
107 107 > this test is still more bytes than success.
108 108 > pad pad pad pad............................................................
109 109 > pad pad pad pad............................................................
110 110 > EOF
111 111
112 112 >>> fh = open('test-failure-unicode.t', 'wb')
113 113 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
114 114 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
115 115
116 116 $ rt
117 117
118 118 --- $TESTTMP/test-failure.t
119 119 +++ $TESTTMP/test-failure.t.err
120 120 @@ -1,5 +1,5 @@
121 121 $ echo babar
122 122 - rataxes
123 123 + babar
124 124 This is a noop statement so that
125 125 this test is still more bytes than success.
126 126 pad pad pad pad............................................................
127 127
128 128 ERROR: test-failure.t output changed
129 129 !.
130 130 --- $TESTTMP/test-failure-unicode.t
131 131 +++ $TESTTMP/test-failure-unicode.t.err
132 132 @@ -1,2 +1,2 @@
133 133 $ echo babar\xce\xb1 (esc)
134 134 - l\xce\xb5\xce\xb5t (esc)
135 135 + babar\xce\xb1 (esc)
136 136
137 137 ERROR: test-failure-unicode.t output changed
138 138 !
139 139 Failed test-failure.t: output changed
140 140 Failed test-failure-unicode.t: output changed
141 141 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
142 142 python hash seed: * (glob)
143 143 [1]
144 144
145 145 test --xunit support
146 146 $ rt --xunit=xunit.xml
147 147
148 148 --- $TESTTMP/test-failure.t
149 149 +++ $TESTTMP/test-failure.t.err
150 150 @@ -1,5 +1,5 @@
151 151 $ echo babar
152 152 - rataxes
153 153 + babar
154 154 This is a noop statement so that
155 155 this test is still more bytes than success.
156 156 pad pad pad pad............................................................
157 157
158 158 ERROR: test-failure.t output changed
159 159 !.
160 160 --- $TESTTMP/test-failure-unicode.t
161 161 +++ $TESTTMP/test-failure-unicode.t.err
162 162 @@ -1,2 +1,2 @@
163 163 $ echo babar\xce\xb1 (esc)
164 164 - l\xce\xb5\xce\xb5t (esc)
165 165 + babar\xce\xb1 (esc)
166 166
167 167 ERROR: test-failure-unicode.t output changed
168 168 !
169 169 Failed test-failure.t: output changed
170 170 Failed test-failure-unicode.t: output changed
171 171 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
172 172 python hash seed: * (glob)
173 173 [1]
174 174 $ cat xunit.xml
175 175 <?xml version="1.0" encoding="utf-8"?>
176 176 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
177 177 <testcase name="test-success.t" time="*"/> (glob)
178 178 <testcase name="test-failure-unicode.t" time="*"> (glob)
179 179 <![CDATA[--- $TESTTMP/test-failure-unicode.t
180 180 +++ $TESTTMP/test-failure-unicode.t.err
181 181 @@ -1,2 +1,2 @@
182 182 $ echo babar\xce\xb1 (esc)
183 183 - l\xce\xb5\xce\xb5t (esc)
184 184 + babar\xce\xb1 (esc)
185 185 ]]> </testcase>
186 186 <testcase name="test-failure.t" time="*"> (glob)
187 187 <![CDATA[--- $TESTTMP/test-failure.t
188 188 +++ $TESTTMP/test-failure.t.err
189 189 @@ -1,5 +1,5 @@
190 190 $ echo babar
191 191 - rataxes
192 192 + babar
193 193 This is a noop statement so that
194 194 this test is still more bytes than success.
195 195 pad pad pad pad............................................................
196 196 ]]> </testcase>
197 197 </testsuite>
198 198
199 199 $ rm test-failure-unicode.t
200 200
201 201 test for --retest
202 202 ====================
203 203
204 204 $ rt --retest
205 205
206 206 --- $TESTTMP/test-failure.t
207 207 +++ $TESTTMP/test-failure.t.err
208 208 @@ -1,5 +1,5 @@
209 209 $ echo babar
210 210 - rataxes
211 211 + babar
212 212 This is a noop statement so that
213 213 this test is still more bytes than success.
214 214 pad pad pad pad............................................................
215 215
216 216 ERROR: test-failure.t output changed
217 217 !
218 218 Failed test-failure.t: output changed
219 219 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
220 220 python hash seed: * (glob)
221 221 [1]
222 222
223 223 Selecting Tests To Run
224 224 ======================
225 225
226 226 successful
227 227
228 228 $ rt test-success.t
229 229 .
230 230 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
231 231
232 232 success w/ keyword
233 233 $ rt -k xyzzy
234 234 .
235 235 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
236 236
237 237 failed
238 238
239 239 $ rt test-failure.t
240 240
241 241 --- $TESTTMP/test-failure.t
242 242 +++ $TESTTMP/test-failure.t.err
243 243 @@ -1,5 +1,5 @@
244 244 $ echo babar
245 245 - rataxes
246 246 + babar
247 247 This is a noop statement so that
248 248 this test is still more bytes than success.
249 249 pad pad pad pad............................................................
250 250
251 251 ERROR: test-failure.t output changed
252 252 !
253 253 Failed test-failure.t: output changed
254 254 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
255 255 python hash seed: * (glob)
256 256 [1]
257 257
258 258 failure w/ keyword
259 259 $ rt -k rataxes
260 260
261 261 --- $TESTTMP/test-failure.t
262 262 +++ $TESTTMP/test-failure.t.err
263 263 @@ -1,5 +1,5 @@
264 264 $ echo babar
265 265 - rataxes
266 266 + babar
267 267 This is a noop statement so that
268 268 this test is still more bytes than success.
269 269 pad pad pad pad............................................................
270 270
271 271 ERROR: test-failure.t output changed
272 272 !
273 273 Failed test-failure.t: output changed
274 274 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
275 275 python hash seed: * (glob)
276 276 [1]
277 277
278 278 Verify that when a process fails to start we show a useful message
279 279 ==================================================================
280 280
281 281 $ cat > test-serve-fail.t <<EOF
282 282 > $ echo 'abort: child process failed to start blah'
283 283 > EOF
284 284 $ rt test-serve-fail.t
285 285
286 286 ERROR: test-serve-fail.t output changed
287 287 !
288 288 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
289 289 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
290 290 python hash seed: * (glob)
291 291 [1]
292 292 $ rm test-serve-fail.t
293 293
294 294 Verify that we can try other ports
295 295 ===================================
296 296 $ hg init inuse
297 297 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
298 298 $ cat blocks.pid >> $DAEMON_PIDS
299 299 $ cat > test-serve-inuse.t <<EOF
300 300 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
301 301 > $ cat hg.pid >> \$DAEMON_PIDS
302 302 > EOF
303 303 $ rt test-serve-inuse.t
304 304 .
305 305 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
306 306 $ rm test-serve-inuse.t
307 307
308 308 Running In Debug Mode
309 309 ======================
310 310
311 311 $ rt --debug 2>&1 | grep -v pwd
312 312 + echo *SALT* 0 0 (glob)
313 313 *SALT* 0 0 (glob)
314 314 + echo babar
315 315 babar
316 316 + echo *SALT* 6 0 (glob)
317 317 *SALT* 6 0 (glob)
318 318 *+ echo *SALT* 0 0 (glob)
319 319 *SALT* 0 0 (glob)
320 320 + echo babar
321 321 babar
322 322 + echo *SALT* 2 0 (glob)
323 323 *SALT* 2 0 (glob)
324 324 + echo xyzzy
325 325 xyzzy
326 326 + echo *SALT* 6 0 (glob)
327 327 *SALT* 6 0 (glob)
328 328 + printf *abc\ndef\nxyz\n* (glob)
329 329 abc
330 330 def
331 331 xyz
332 332 + echo *SALT* 12 0 (glob)
333 333 *SALT* 12 0 (glob)
334 334 .
335 335 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
336 336
337 337 Parallel runs
338 338 ==============
339 339
340 340 (duplicate the failing test to get predictable output)
341 341 $ cp test-failure.t test-failure-copy.t
342 342
343 343 $ rt --jobs 2 test-failure*.t -n
344 344 !!
345 345 Failed test-failure*.t: output changed (glob)
346 346 Failed test-failure*.t: output changed (glob)
347 347 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
348 348 python hash seed: * (glob)
349 349 [1]
350 350
351 351 failures in parallel with --first should only print one failure
352 352 >>> f = open('test-nothing.t', 'w')
353 353 >>> f.write('foo\n' * 1024) and None
354 354 >>> f.write(' $ sleep 1') and None
355 355 $ rt --jobs 2 --first
356 356
357 357 --- $TESTTMP/test-failure*.t (glob)
358 358 +++ $TESTTMP/test-failure*.t.err (glob)
359 359 @@ -1,5 +1,5 @@
360 360 $ echo babar
361 361 - rataxes
362 362 + babar
363 363 This is a noop statement so that
364 364 this test is still more bytes than success.
365 365 pad pad pad pad............................................................
366 366
367 367 Failed test-failure*.t: output changed (glob)
368 368 Failed test-nothing.t: output changed
369 369 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
370 370 python hash seed: * (glob)
371 371 [1]
372 372
373 373
374 374 (delete the duplicated test file)
375 375 $ rm test-failure-copy.t test-nothing.t
376 376
377 377
378 378 Interactive run
379 379 ===============
380 380
381 381 (backup the failing test)
382 382 $ cp test-failure.t backup
383 383
384 384 Refuse the fix
385 385
386 386 $ echo 'n' | rt -i
387 387
388 388 --- $TESTTMP/test-failure.t
389 389 +++ $TESTTMP/test-failure.t.err
390 390 @@ -1,5 +1,5 @@
391 391 $ echo babar
392 392 - rataxes
393 393 + babar
394 394 This is a noop statement so that
395 395 this test is still more bytes than success.
396 396 pad pad pad pad............................................................
397 397 Accept this change? [n]
398 398 ERROR: test-failure.t output changed
399 399 !.
400 400 Failed test-failure.t: output changed
401 401 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
402 402 python hash seed: * (glob)
403 403 [1]
404 404
405 405 $ cat test-failure.t
406 406 $ echo babar
407 407 rataxes
408 408 This is a noop statement so that
409 409 this test is still more bytes than success.
410 410 pad pad pad pad............................................................
411 411 pad pad pad pad............................................................
412 412
413 413 Interactive with custom view
414 414
415 415 $ echo 'n' | rt -i --view echo
416 416 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
417 417 Accept this change? [n]* (glob)
418 418 ERROR: test-failure.t output changed
419 419 !.
420 420 Failed test-failure.t: output changed
421 421 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
422 422 python hash seed: * (glob)
423 423 [1]
424 424
425 425 View the fix
426 426
427 427 $ echo 'y' | rt --view echo
428 428 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
429 429
430 430 ERROR: test-failure.t output changed
431 431 !.
432 432 Failed test-failure.t: output changed
433 433 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
434 434 python hash seed: * (glob)
435 435 [1]
436 436
437 437 Accept the fix
438 438
439 439 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
440 440 $ echo " saved backup bundle to \$TESTTMP/foo.hg" >> test-failure.t
441 441 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
442 442 $ echo " saved backup bundle to \$TESTTMP/foo.hg (glob)" >> test-failure.t
443 443 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
444 444 $ echo " saved backup bundle to \$TESTTMP/*.hg (glob)" >> test-failure.t
445 445 $ echo 'y' | rt -i 2>&1
446 446
447 447 --- $TESTTMP/test-failure.t
448 448 +++ $TESTTMP/test-failure.t.err
449 449 @@ -1,11 +1,11 @@
450 450 $ echo babar
451 451 - rataxes
452 452 + babar
453 453 This is a noop statement so that
454 454 this test is still more bytes than success.
455 455 pad pad pad pad............................................................
456 456 pad pad pad pad............................................................
457 457 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
458 458 - saved backup bundle to $TESTTMP/foo.hg
459 459 + saved backup bundle to $TESTTMP/foo.hg* (glob)
460 460 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
461 461 saved backup bundle to $TESTTMP/foo.hg* (glob)
462 462 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
463 463 Accept this change? [n] ..
464 464 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
465 465
466 466 $ sed -e 's,(glob)$,&<,g' test-failure.t
467 467 $ echo babar
468 468 babar
469 469 This is a noop statement so that
470 470 this test is still more bytes than success.
471 471 pad pad pad pad............................................................
472 472 pad pad pad pad............................................................
473 473 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
474 474 saved backup bundle to $TESTTMP/foo.hg (glob)<
475 475 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
476 476 saved backup bundle to $TESTTMP/foo.hg (glob)<
477 477 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
478 478 saved backup bundle to $TESTTMP/*.hg (glob)<
479 479
480 480 (reinstall)
481 481 $ mv backup test-failure.t
482 482
483 483 No Diff
484 484 ===============
485 485
486 486 $ rt --nodiff
487 487 !.
488 488 Failed test-failure.t: output changed
489 489 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
490 490 python hash seed: * (glob)
491 491 [1]
492 492
493 493 test --tmpdir support
494 494 $ rt --tmpdir=$TESTTMP/keep test-success.t
495 495
496 496 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t (glob)
497 497 Keeping threadtmp dir: $TESTTMP/keep/child1 (glob)
498 498 .
499 499 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
500 500
501 501 timeouts
502 502 ========
503 503 $ cat > test-timeout.t <<EOF
504 504 > $ sleep 2
505 505 > $ echo pass
506 506 > pass
507 507 > EOF
508 508 > echo '#require slow' > test-slow-timeout.t
509 509 > cat test-timeout.t >> test-slow-timeout.t
510 510 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
511 511 st
512 512 Skipped test-slow-timeout.t: missing feature: allow slow tests
513 513 Failed test-timeout.t: timed out
514 514 # Ran 1 tests, 1 skipped, 0 warned, 1 failed.
515 515 python hash seed: * (glob)
516 516 [1]
517 517 $ rt --timeout=1 --slowtimeout=3 \
518 518 > test-timeout.t test-slow-timeout.t --allow-slow-tests
519 519 .t
520 520 Failed test-timeout.t: timed out
521 521 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
522 522 python hash seed: * (glob)
523 523 [1]
524 524 $ rm test-timeout.t test-slow-timeout.t
525 525
526 526 test for --time
527 527 ==================
528 528
529 529 $ rt test-success.t --time
530 530 .
531 531 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
532 532 # Producing time report
533 533 start end cuser csys real Test
534 534 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
535 535
536 536 test for --time with --job enabled
537 537 ====================================
538 538
539 539 $ rt test-success.t --time --jobs 2
540 540 .
541 541 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
542 542 # Producing time report
543 543 start end cuser csys real Test
544 544 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
545 545
546 546 Skips
547 547 ================
548 548 $ cat > test-skip.t <<EOF
549 549 > $ echo xyzzy
550 550 > #require false
551 551 > EOF
552 552 $ rt --nodiff
553 553 !.s
554 554 Skipped test-skip.t: missing feature: nail clipper
555 555 Failed test-failure.t: output changed
556 556 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
557 557 python hash seed: * (glob)
558 558 [1]
559 559
560 560 $ rt --keyword xyzzy
561 561 .s
562 562 Skipped test-skip.t: missing feature: nail clipper
563 563 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
564 564
565 565 Skips with xml
566 566 $ rt --keyword xyzzy \
567 567 > --xunit=xunit.xml
568 568 .s
569 569 Skipped test-skip.t: missing feature: nail clipper
570 570 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
571 571 $ cat xunit.xml
572 572 <?xml version="1.0" encoding="utf-8"?>
573 573 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
574 574 <testcase name="test-success.t" time="*"/> (glob)
575 575 </testsuite>
576 576
577 577 Missing skips or blacklisted skips don't count as executed:
578 578 $ echo test-failure.t > blacklist
579 579 $ rt --blacklist=blacklist --json\
580 580 > test-failure.t test-bogus.t
581 581 ss
582 582 Skipped test-bogus.t: Doesn't exist
583 583 Skipped test-failure.t: blacklisted
584 584 # Ran 0 tests, 2 skipped, 0 warned, 0 failed.
585 585 $ cat report.json
586 586 testreport ={
587 587 "test-bogus.t": {
588 588 "result": "skip"
589 },
589 },
590 590 "test-failure.t": {
591 591 "result": "skip"
592 592 }
593 593 } (no-eol)
594 594
595 595 Whitelist trumps blacklist
596 596 $ echo test-failure.t > whitelist
597 597 $ rt --blacklist=blacklist --whitelist=whitelist --json\
598 598 > test-failure.t test-bogus.t
599 599 s
600 600 --- $TESTTMP/test-failure.t
601 601 +++ $TESTTMP/test-failure.t.err
602 602 @@ -1,5 +1,5 @@
603 603 $ echo babar
604 604 - rataxes
605 605 + babar
606 606 This is a noop statement so that
607 607 this test is still more bytes than success.
608 608 pad pad pad pad............................................................
609 609
610 610 ERROR: test-failure.t output changed
611 611 !
612 612 Skipped test-bogus.t: Doesn't exist
613 613 Failed test-failure.t: output changed
614 614 # Ran 1 tests, 1 skipped, 0 warned, 1 failed.
615 615 python hash seed: * (glob)
616 616 [1]
617 617
618 618 test for --json
619 619 ==================
620 620
621 621 $ rt --json
622 622
623 623 --- $TESTTMP/test-failure.t
624 624 +++ $TESTTMP/test-failure.t.err
625 625 @@ -1,5 +1,5 @@
626 626 $ echo babar
627 627 - rataxes
628 628 + babar
629 629 This is a noop statement so that
630 630 this test is still more bytes than success.
631 631 pad pad pad pad............................................................
632 632
633 633 ERROR: test-failure.t output changed
634 634 !.s
635 635 Skipped test-skip.t: missing feature: nail clipper
636 636 Failed test-failure.t: output changed
637 637 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
638 638 python hash seed: * (glob)
639 639 [1]
640 640
641 641 $ cat report.json
642 642 testreport ={
643 643 "test-failure.t": [\{] (re)
644 644 "csys": "\s*[\d\.]{4,5}", ? (re)
645 645 "cuser": "\s*[\d\.]{4,5}", ? (re)
646 646 "diff": "---.+\+\+\+.+", ? (re)
647 647 "end": "\s*[\d\.]{4,5}", ? (re)
648 648 "result": "failure", ? (re)
649 649 "start": "\s*[\d\.]{4,5}", ? (re)
650 650 "time": "\s*[\d\.]{4,5}" (re)
651 651 }, ? (re)
652 652 "test-skip.t": {
653 653 "csys": "\s*[\d\.]{4,5}", ? (re)
654 654 "cuser": "\s*[\d\.]{4,5}", ? (re)
655 655 "diff": "", ? (re)
656 656 "end": "\s*[\d\.]{4,5}", ? (re)
657 657 "result": "skip", ? (re)
658 658 "start": "\s*[\d\.]{4,5}", ? (re)
659 659 "time": "\s*[\d\.]{4,5}" (re)
660 660 }, ? (re)
661 661 "test-success.t": [\{] (re)
662 662 "csys": "\s*[\d\.]{4,5}", ? (re)
663 663 "cuser": "\s*[\d\.]{4,5}", ? (re)
664 664 "diff": "", ? (re)
665 665 "end": "\s*[\d\.]{4,5}", ? (re)
666 666 "result": "success", ? (re)
667 667 "start": "\s*[\d\.]{4,5}", ? (re)
668 668 "time": "\s*[\d\.]{4,5}" (re)
669 669 }
670 670 } (no-eol)
671 671
672 672 Test that failed test accepted through interactive are properly reported:
673 673
674 674 $ cp test-failure.t backup
675 675 $ echo y | rt --json -i
676 676
677 677 --- $TESTTMP/test-failure.t
678 678 +++ $TESTTMP/test-failure.t.err
679 679 @@ -1,5 +1,5 @@
680 680 $ echo babar
681 681 - rataxes
682 682 + babar
683 683 This is a noop statement so that
684 684 this test is still more bytes than success.
685 685 pad pad pad pad............................................................
686 686 Accept this change? [n] ..s
687 687 Skipped test-skip.t: missing feature: nail clipper
688 688 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
689 689
690 690 $ cat report.json
691 691 testreport ={
692 692 "test-failure.t": [\{] (re)
693 693 "csys": "\s*[\d\.]{4,5}", ? (re)
694 694 "cuser": "\s*[\d\.]{4,5}", ? (re)
695 695 "diff": "", ? (re)
696 696 "end": "\s*[\d\.]{4,5}", ? (re)
697 697 "result": "success", ? (re)
698 698 "start": "\s*[\d\.]{4,5}", ? (re)
699 699 "time": "\s*[\d\.]{4,5}" (re)
700 700 }, ? (re)
701 701 "test-skip.t": {
702 702 "csys": "\s*[\d\.]{4,5}", ? (re)
703 703 "cuser": "\s*[\d\.]{4,5}", ? (re)
704 704 "diff": "", ? (re)
705 705 "end": "\s*[\d\.]{4,5}", ? (re)
706 706 "result": "skip", ? (re)
707 707 "start": "\s*[\d\.]{4,5}", ? (re)
708 708 "time": "\s*[\d\.]{4,5}" (re)
709 709 }, ? (re)
710 710 "test-success.t": [\{] (re)
711 711 "csys": "\s*[\d\.]{4,5}", ? (re)
712 712 "cuser": "\s*[\d\.]{4,5}", ? (re)
713 713 "diff": "", ? (re)
714 714 "end": "\s*[\d\.]{4,5}", ? (re)
715 715 "result": "success", ? (re)
716 716 "start": "\s*[\d\.]{4,5}", ? (re)
717 717 "time": "\s*[\d\.]{4,5}" (re)
718 718 }
719 719 } (no-eol)
720 720 $ mv backup test-failure.t
721 721
722 722 backslash on end of line with glob matching is handled properly
723 723
724 724 $ cat > test-glob-backslash.t << EOF
725 725 > $ echo 'foo bar \\'
726 726 > foo * \ (glob)
727 727 > EOF
728 728
729 729 $ rt test-glob-backslash.t
730 730 .
731 731 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
732 732
733 733 $ rm -f test-glob-backslash.t
734 734
735 735 Test reusability for third party tools
736 736 ======================================
737 737
738 738 $ mkdir "$TESTTMP"/anothertests
739 739 $ cd "$TESTTMP"/anothertests
740 740
741 741 test that `run-tests.py` can execute hghave, even if it runs not in
742 742 Mercurial source tree.
743 743
744 744 $ cat > test-hghave.t <<EOF
745 745 > #require true
746 746 > $ echo foo
747 747 > foo
748 748 > EOF
749 749 $ rt test-hghave.t
750 750 .
751 751 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
752 752
753 753 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
754 754 running is placed.
755 755
756 756 $ cat > test-runtestdir.t <<EOF
757 757 > - $TESTDIR, in which test-run-tests.t is placed
758 758 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
759 759 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
760 760 >
761 761 > #if windows
762 762 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
763 763 > #else
764 764 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
765 765 > #endif
766 766 > $ test "\$RUNTESTDIR" = "$TESTDIR"
767 767 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py
768 768 > #!/usr/bin/env python
769 769 > #
770 770 > # check-code - a style and portability checker for Mercurial
771 771 > EOF
772 772 $ rt test-runtestdir.t
773 773 .
774 774 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
775 775
776 776 #if execbit
777 777
778 778 test that TESTDIR is referred in PATH
779 779
780 780 $ cat > custom-command.sh <<EOF
781 781 > #!/bin/sh
782 782 > echo "hello world"
783 783 > EOF
784 784 $ chmod +x custom-command.sh
785 785 $ cat > test-testdir-path.t <<EOF
786 786 > $ custom-command.sh
787 787 > hello world
788 788 > EOF
789 789 $ rt test-testdir-path.t
790 790 .
791 791 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
792 792
793 793 #endif
794 794
795 795 test support for --allow-slow-tests
796 796 $ cat > test-very-slow-test.t <<EOF
797 797 > #require slow
798 798 > $ echo pass
799 799 > pass
800 800 > EOF
801 801 $ rt test-very-slow-test.t
802 802 s
803 803 Skipped test-very-slow-test.t: missing feature: allow slow tests
804 804 # Ran 0 tests, 1 skipped, 0 warned, 0 failed.
805 805 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
806 806 .
807 807 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
808 808
809 809 support for running a test outside the current directory
810 810 $ mkdir nonlocal
811 811 $ cat > nonlocal/test-is-not-here.t << EOF
812 812 > $ echo pass
813 813 > pass
814 814 > EOF
815 815 $ rt nonlocal/test-is-not-here.t
816 816 .
817 817 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
818 818
819 819 support for bisecting failed tests automatically
820 820 $ hg init bisect
821 821 $ cd bisect
822 822 $ cat >> test-bisect.t <<EOF
823 823 > $ echo pass
824 824 > pass
825 825 > EOF
826 826 $ hg add test-bisect.t
827 827 $ hg ci -m 'good'
828 828 $ cat >> test-bisect.t <<EOF
829 829 > $ echo pass
830 830 > fail
831 831 > EOF
832 832 $ hg ci -m 'bad'
833 833 $ rt --known-good-rev=0 test-bisect.t
834 834
835 835 --- $TESTTMP/anothertests/bisect/test-bisect.t
836 836 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
837 837 @@ -1,4 +1,4 @@
838 838 $ echo pass
839 839 pass
840 840 $ echo pass
841 841 - fail
842 842 + pass
843 843
844 844 ERROR: test-bisect.t output changed
845 845 !
846 846 Failed test-bisect.t: output changed
847 847 test-bisect.t broken by 72cbf122d116 (bad)
848 848 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
849 849 python hash seed: * (glob)
850 850 [1]
General Comments 0
You need to be logged in to leave comments. Login now