##// END OF EJS Templates
run-tests: wait for test threads after first error...
Gregory Szorc -
r24507:a0668a58 default
parent child Browse files
Show More
@@ -1,2056 +1,2071 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 #
39 39 # (You could use any subset of the tests: test-s* happens to match
40 40 # enough that it's worth doing parallel runs, few enough that it
41 41 # completes fairly quickly, includes both shell and Python scripts, and
42 42 # includes some scripts that run daemon processes.)
43 43
44 44 from distutils import version
45 45 import difflib
46 46 import errno
47 47 import optparse
48 48 import os
49 49 import shutil
50 50 import subprocess
51 51 import signal
52 52 import sys
53 53 import tempfile
54 54 import time
55 55 import random
56 56 import re
57 57 import threading
58 58 import killdaemons as killmod
59 59 import Queue as queue
60 60 from xml.dom import minidom
61 61 import unittest
62 62
63 63 try:
64 64 import json
65 65 except ImportError:
66 66 try:
67 67 import simplejson as json
68 68 except ImportError:
69 69 json = None
70 70
71 71 processlock = threading.Lock()
72 72
73 73 # subprocess._cleanup can race with any Popen.wait or Popen.poll on py24
74 74 # http://bugs.python.org/issue1731717 for details. We shouldn't be producing
75 75 # zombies but it's pretty harmless even if we do.
76 76 if sys.version_info < (2, 5):
77 77 subprocess._cleanup = lambda: None
78 78
79 79 closefds = os.name == 'posix'
80 80 def Popen4(cmd, wd, timeout, env=None):
81 81 processlock.acquire()
82 82 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
83 83 close_fds=closefds,
84 84 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
85 85 stderr=subprocess.STDOUT)
86 86 processlock.release()
87 87
88 88 p.fromchild = p.stdout
89 89 p.tochild = p.stdin
90 90 p.childerr = p.stderr
91 91
92 92 p.timeout = False
93 93 if timeout:
94 94 def t():
95 95 start = time.time()
96 96 while time.time() - start < timeout and p.returncode is None:
97 97 time.sleep(.1)
98 98 p.timeout = True
99 99 if p.returncode is None:
100 100 terminate(p)
101 101 threading.Thread(target=t).start()
102 102
103 103 return p
104 104
105 105 PYTHON = sys.executable.replace('\\', '/')
106 106 IMPL_PATH = 'PYTHONPATH'
107 107 if 'java' in sys.platform:
108 108 IMPL_PATH = 'JYTHONPATH'
109 109
110 110 defaults = {
111 111 'jobs': ('HGTEST_JOBS', 1),
112 112 'timeout': ('HGTEST_TIMEOUT', 180),
113 113 'port': ('HGTEST_PORT', 20059),
114 114 'shell': ('HGTEST_SHELL', 'sh'),
115 115 }
116 116
117 117 def parselistfiles(files, listtype, warn=True):
118 118 entries = dict()
119 119 for filename in files:
120 120 try:
121 121 path = os.path.expanduser(os.path.expandvars(filename))
122 122 f = open(path, "rb")
123 123 except IOError, err:
124 124 if err.errno != errno.ENOENT:
125 125 raise
126 126 if warn:
127 127 print "warning: no such %s file: %s" % (listtype, filename)
128 128 continue
129 129
130 130 for line in f.readlines():
131 131 line = line.split('#', 1)[0].strip()
132 132 if line:
133 133 entries[line] = filename
134 134
135 135 f.close()
136 136 return entries
137 137
138 138 def getparser():
139 139 """Obtain the OptionParser used by the CLI."""
140 140 parser = optparse.OptionParser("%prog [options] [tests]")
141 141
142 142 # keep these sorted
143 143 parser.add_option("--blacklist", action="append",
144 144 help="skip tests listed in the specified blacklist file")
145 145 parser.add_option("--whitelist", action="append",
146 146 help="always run tests listed in the specified whitelist file")
147 147 parser.add_option("--changed", type="string",
148 148 help="run tests that are changed in parent rev or working directory")
149 149 parser.add_option("-C", "--annotate", action="store_true",
150 150 help="output files annotated with coverage")
151 151 parser.add_option("-c", "--cover", action="store_true",
152 152 help="print a test coverage report")
153 153 parser.add_option("-d", "--debug", action="store_true",
154 154 help="debug mode: write output of test scripts to console"
155 155 " rather than capturing and diffing it (disables timeout)")
156 156 parser.add_option("-f", "--first", action="store_true",
157 157 help="exit on the first test failure")
158 158 parser.add_option("-H", "--htmlcov", action="store_true",
159 159 help="create an HTML report of the coverage of the files")
160 160 parser.add_option("-i", "--interactive", action="store_true",
161 161 help="prompt to accept changed output")
162 162 parser.add_option("-j", "--jobs", type="int",
163 163 help="number of jobs to run in parallel"
164 164 " (default: $%s or %d)" % defaults['jobs'])
165 165 parser.add_option("--keep-tmpdir", action="store_true",
166 166 help="keep temporary directory after running tests")
167 167 parser.add_option("-k", "--keywords",
168 168 help="run tests matching keywords")
169 169 parser.add_option("-l", "--local", action="store_true",
170 170 help="shortcut for --with-hg=<testdir>/../hg")
171 171 parser.add_option("--loop", action="store_true",
172 172 help="loop tests repeatedly")
173 173 parser.add_option("--runs-per-test", type="int", dest="runs_per_test",
174 174 help="run each test N times (default=1)", default=1)
175 175 parser.add_option("-n", "--nodiff", action="store_true",
176 176 help="skip showing test changes")
177 177 parser.add_option("-p", "--port", type="int",
178 178 help="port on which servers should listen"
179 179 " (default: $%s or %d)" % defaults['port'])
180 180 parser.add_option("--compiler", type="string",
181 181 help="compiler to build with")
182 182 parser.add_option("--pure", action="store_true",
183 183 help="use pure Python code instead of C extensions")
184 184 parser.add_option("-R", "--restart", action="store_true",
185 185 help="restart at last error")
186 186 parser.add_option("-r", "--retest", action="store_true",
187 187 help="retest failed tests")
188 188 parser.add_option("-S", "--noskips", action="store_true",
189 189 help="don't report skip tests verbosely")
190 190 parser.add_option("--shell", type="string",
191 191 help="shell to use (default: $%s or %s)" % defaults['shell'])
192 192 parser.add_option("-t", "--timeout", type="int",
193 193 help="kill errant tests after TIMEOUT seconds"
194 194 " (default: $%s or %d)" % defaults['timeout'])
195 195 parser.add_option("--time", action="store_true",
196 196 help="time how long each test takes")
197 197 parser.add_option("--json", action="store_true",
198 198 help="store test result data in 'report.json' file")
199 199 parser.add_option("--tmpdir", type="string",
200 200 help="run tests in the given temporary directory"
201 201 " (implies --keep-tmpdir)")
202 202 parser.add_option("-v", "--verbose", action="store_true",
203 203 help="output verbose messages")
204 204 parser.add_option("--xunit", type="string",
205 205 help="record xunit results at specified path")
206 206 parser.add_option("--view", type="string",
207 207 help="external diff viewer")
208 208 parser.add_option("--with-hg", type="string",
209 209 metavar="HG",
210 210 help="test using specified hg script rather than a "
211 211 "temporary installation")
212 212 parser.add_option("-3", "--py3k-warnings", action="store_true",
213 213 help="enable Py3k warnings on Python 2.6+")
214 214 parser.add_option('--extra-config-opt', action="append",
215 215 help='set the given config opt in the test hgrc')
216 216 parser.add_option('--random', action="store_true",
217 217 help='run tests in random order')
218 218
219 219 for option, (envvar, default) in defaults.items():
220 220 defaults[option] = type(default)(os.environ.get(envvar, default))
221 221 parser.set_defaults(**defaults)
222 222
223 223 return parser
224 224
225 225 def parseargs(args, parser):
226 226 """Parse arguments with our OptionParser and validate results."""
227 227 (options, args) = parser.parse_args(args)
228 228
229 229 # jython is always pure
230 230 if 'java' in sys.platform or '__pypy__' in sys.modules:
231 231 options.pure = True
232 232
233 233 if options.with_hg:
234 234 options.with_hg = os.path.expanduser(options.with_hg)
235 235 if not (os.path.isfile(options.with_hg) and
236 236 os.access(options.with_hg, os.X_OK)):
237 237 parser.error('--with-hg must specify an executable hg script')
238 238 if not os.path.basename(options.with_hg) == 'hg':
239 239 sys.stderr.write('warning: --with-hg should specify an hg script\n')
240 240 if options.local:
241 241 testdir = os.path.dirname(os.path.realpath(sys.argv[0]))
242 242 hgbin = os.path.join(os.path.dirname(testdir), 'hg')
243 243 if os.name != 'nt' and not os.access(hgbin, os.X_OK):
244 244 parser.error('--local specified, but %r not found or not executable'
245 245 % hgbin)
246 246 options.with_hg = hgbin
247 247
248 248 options.anycoverage = options.cover or options.annotate or options.htmlcov
249 249 if options.anycoverage:
250 250 try:
251 251 import coverage
252 252 covver = version.StrictVersion(coverage.__version__).version
253 253 if covver < (3, 3):
254 254 parser.error('coverage options require coverage 3.3 or later')
255 255 except ImportError:
256 256 parser.error('coverage options now require the coverage package')
257 257
258 258 if options.anycoverage and options.local:
259 259 # this needs some path mangling somewhere, I guess
260 260 parser.error("sorry, coverage options do not work when --local "
261 261 "is specified")
262 262
263 263 if options.anycoverage and options.with_hg:
264 264 parser.error("sorry, coverage options do not work when --with-hg "
265 265 "is specified")
266 266
267 267 global verbose
268 268 if options.verbose:
269 269 verbose = ''
270 270
271 271 if options.tmpdir:
272 272 options.tmpdir = os.path.expanduser(options.tmpdir)
273 273
274 274 if options.jobs < 1:
275 275 parser.error('--jobs must be positive')
276 276 if options.interactive and options.debug:
277 277 parser.error("-i/--interactive and -d/--debug are incompatible")
278 278 if options.debug:
279 279 if options.timeout != defaults['timeout']:
280 280 sys.stderr.write(
281 281 'warning: --timeout option ignored with --debug\n')
282 282 options.timeout = 0
283 283 if options.py3k_warnings:
284 284 if sys.version_info[:2] < (2, 6) or sys.version_info[:2] >= (3, 0):
285 285 parser.error('--py3k-warnings can only be used on Python 2.6+')
286 286 if options.blacklist:
287 287 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
288 288 if options.whitelist:
289 289 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
290 290 else:
291 291 options.whitelisted = {}
292 292
293 293 return (options, args)
294 294
295 295 def rename(src, dst):
296 296 """Like os.rename(), trade atomicity and opened files friendliness
297 297 for existing destination support.
298 298 """
299 299 shutil.copy(src, dst)
300 300 os.remove(src)
301 301
302 302 def getdiff(expected, output, ref, err):
303 303 servefail = False
304 304 lines = []
305 305 for line in difflib.unified_diff(expected, output, ref, err):
306 306 if line.startswith('+++') or line.startswith('---'):
307 307 line = line.replace('\\', '/')
308 308 if line.endswith(' \n'):
309 309 line = line[:-2] + '\n'
310 310 lines.append(line)
311 311 if not servefail and line.startswith(
312 312 '+ abort: child process failed to start'):
313 313 servefail = True
314 314
315 315 return servefail, lines
316 316
317 317 verbose = False
318 318 def vlog(*msg):
319 319 """Log only when in verbose mode."""
320 320 if verbose is False:
321 321 return
322 322
323 323 return log(*msg)
324 324
325 325 # Bytes that break XML even in a CDATA block: control characters 0-31
326 326 # sans \t, \n and \r
327 327 CDATA_EVIL = re.compile(r"[\000-\010\013\014\016-\037]")
328 328
329 329 def cdatasafe(data):
330 330 """Make a string safe to include in a CDATA block.
331 331
332 332 Certain control characters are illegal in a CDATA block, and
333 333 there's no way to include a ]]> in a CDATA either. This function
334 334 replaces illegal bytes with ? and adds a space between the ]] so
335 335 that it won't break the CDATA block.
336 336 """
337 337 return CDATA_EVIL.sub('?', data).replace(']]>', '] ]>')
338 338
339 339 def log(*msg):
340 340 """Log something to stdout.
341 341
342 342 Arguments are strings to print.
343 343 """
344 344 iolock.acquire()
345 345 if verbose:
346 346 print verbose,
347 347 for m in msg:
348 348 print m,
349 349 print
350 350 sys.stdout.flush()
351 351 iolock.release()
352 352
353 353 def terminate(proc):
354 354 """Terminate subprocess (with fallback for Python versions < 2.6)"""
355 355 vlog('# Terminating process %d' % proc.pid)
356 356 try:
357 357 getattr(proc, 'terminate', lambda : os.kill(proc.pid, signal.SIGTERM))()
358 358 except OSError:
359 359 pass
360 360
361 361 def killdaemons(pidfile):
362 362 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
363 363 logfn=vlog)
364 364
365 365 class Test(unittest.TestCase):
366 366 """Encapsulates a single, runnable test.
367 367
368 368 While this class conforms to the unittest.TestCase API, it differs in that
369 369 instances need to be instantiated manually. (Typically, unittest.TestCase
370 370 classes are instantiated automatically by scanning modules.)
371 371 """
372 372
373 373 # Status code reserved for skipped tests (used by hghave).
374 374 SKIPPED_STATUS = 80
375 375
376 376 def __init__(self, path, tmpdir, keeptmpdir=False,
377 377 debug=False,
378 378 timeout=defaults['timeout'],
379 379 startport=defaults['port'], extraconfigopts=None,
380 380 py3kwarnings=False, shell=None):
381 381 """Create a test from parameters.
382 382
383 383 path is the full path to the file defining the test.
384 384
385 385 tmpdir is the main temporary directory to use for this test.
386 386
387 387 keeptmpdir determines whether to keep the test's temporary directory
388 388 after execution. It defaults to removal (False).
389 389
390 390 debug mode will make the test execute verbosely, with unfiltered
391 391 output.
392 392
393 393 timeout controls the maximum run time of the test. It is ignored when
394 394 debug is True.
395 395
396 396 startport controls the starting port number to use for this test. Each
397 397 test will reserve 3 port numbers for execution. It is the caller's
398 398 responsibility to allocate a non-overlapping port range to Test
399 399 instances.
400 400
401 401 extraconfigopts is an iterable of extra hgrc config options. Values
402 402 must have the form "key=value" (something understood by hgrc). Values
403 403 of the form "foo.key=value" will result in "[foo] key=value".
404 404
405 405 py3kwarnings enables Py3k warnings.
406 406
407 407 shell is the shell to execute tests in.
408 408 """
409 409
410 410 self.path = path
411 411 self.name = os.path.basename(path)
412 412 self._testdir = os.path.dirname(path)
413 413 self.errpath = os.path.join(self._testdir, '%s.err' % self.name)
414 414
415 415 self._threadtmp = tmpdir
416 416 self._keeptmpdir = keeptmpdir
417 417 self._debug = debug
418 418 self._timeout = timeout
419 419 self._startport = startport
420 420 self._extraconfigopts = extraconfigopts or []
421 421 self._py3kwarnings = py3kwarnings
422 422 self._shell = shell
423 423
424 424 self._aborted = False
425 425 self._daemonpids = []
426 426 self._finished = None
427 427 self._ret = None
428 428 self._out = None
429 429 self._skipped = None
430 430 self._testtmp = None
431 431
432 432 # If we're not in --debug mode and reference output file exists,
433 433 # check test output against it.
434 434 if debug:
435 435 self._refout = None # to match "out is None"
436 436 elif os.path.exists(self.refpath):
437 437 f = open(self.refpath, 'rb')
438 438 self._refout = f.read().splitlines(True)
439 439 f.close()
440 440 else:
441 441 self._refout = []
442 442
443 443 def __str__(self):
444 444 return self.name
445 445
446 446 def shortDescription(self):
447 447 return self.name
448 448
449 449 def setUp(self):
450 450 """Tasks to perform before run()."""
451 451 self._finished = False
452 452 self._ret = None
453 453 self._out = None
454 454 self._skipped = None
455 455
456 456 try:
457 457 os.mkdir(self._threadtmp)
458 458 except OSError, e:
459 459 if e.errno != errno.EEXIST:
460 460 raise
461 461
462 462 self._testtmp = os.path.join(self._threadtmp,
463 463 os.path.basename(self.path))
464 464 os.mkdir(self._testtmp)
465 465
466 466 # Remove any previous output files.
467 467 if os.path.exists(self.errpath):
468 468 try:
469 469 os.remove(self.errpath)
470 470 except OSError, e:
471 471 # We might have raced another test to clean up a .err
472 472 # file, so ignore ENOENT when removing a previous .err
473 473 # file.
474 474 if e.errno != errno.ENOENT:
475 475 raise
476 476
477 477 def run(self, result):
478 478 """Run this test and report results against a TestResult instance."""
479 479 # This function is extremely similar to unittest.TestCase.run(). Once
480 480 # we require Python 2.7 (or at least its version of unittest), this
481 481 # function can largely go away.
482 482 self._result = result
483 483 result.startTest(self)
484 484 try:
485 485 try:
486 486 self.setUp()
487 487 except (KeyboardInterrupt, SystemExit):
488 488 self._aborted = True
489 489 raise
490 490 except Exception:
491 491 result.addError(self, sys.exc_info())
492 492 return
493 493
494 494 success = False
495 495 try:
496 496 self.runTest()
497 497 except KeyboardInterrupt:
498 498 self._aborted = True
499 499 raise
500 500 except SkipTest, e:
501 501 result.addSkip(self, str(e))
502 502 # The base class will have already counted this as a
503 503 # test we "ran", but we want to exclude skipped tests
504 504 # from those we count towards those run.
505 505 result.testsRun -= 1
506 506 except IgnoreTest, e:
507 507 result.addIgnore(self, str(e))
508 508 # As with skips, ignores also should be excluded from
509 509 # the number of tests executed.
510 510 result.testsRun -= 1
511 511 except WarnTest, e:
512 512 result.addWarn(self, str(e))
513 513 except self.failureException, e:
514 514 # This differs from unittest in that we don't capture
515 515 # the stack trace. This is for historical reasons and
516 516 # this decision could be revisited in the future,
517 517 # especially for PythonTest instances.
518 518 if result.addFailure(self, str(e)):
519 519 success = True
520 520 except Exception:
521 521 result.addError(self, sys.exc_info())
522 522 else:
523 523 success = True
524 524
525 525 try:
526 526 self.tearDown()
527 527 except (KeyboardInterrupt, SystemExit):
528 528 self._aborted = True
529 529 raise
530 530 except Exception:
531 531 result.addError(self, sys.exc_info())
532 532 success = False
533 533
534 534 if success:
535 535 result.addSuccess(self)
536 536 finally:
537 537 result.stopTest(self, interrupted=self._aborted)
538 538
539 539 def runTest(self):
540 540 """Run this test instance.
541 541
542 542 This will return a tuple describing the result of the test.
543 543 """
544 544 replacements = self._getreplacements()
545 545 env = self._getenv()
546 546 self._daemonpids.append(env['DAEMON_PIDS'])
547 547 self._createhgrc(env['HGRCPATH'])
548 548
549 549 vlog('# Test', self.name)
550 550
551 551 ret, out = self._run(replacements, env)
552 552 self._finished = True
553 553 self._ret = ret
554 554 self._out = out
555 555
556 556 def describe(ret):
557 557 if ret < 0:
558 558 return 'killed by signal: %d' % -ret
559 559 return 'returned error code %d' % ret
560 560
561 561 self._skipped = False
562 562
563 563 if ret == self.SKIPPED_STATUS:
564 564 if out is None: # Debug mode, nothing to parse.
565 565 missing = ['unknown']
566 566 failed = None
567 567 else:
568 568 missing, failed = TTest.parsehghaveoutput(out)
569 569
570 570 if not missing:
571 571 missing = ['skipped']
572 572
573 573 if failed:
574 574 self.fail('hg have failed checking for %s' % failed[-1])
575 575 else:
576 576 self._skipped = True
577 577 raise SkipTest(missing[-1])
578 578 elif ret == 'timeout':
579 579 self.fail('timed out')
580 580 elif ret is False:
581 581 raise WarnTest('no result code from test')
582 582 elif out != self._refout:
583 583 # Diff generation may rely on written .err file.
584 584 if (ret != 0 or out != self._refout) and not self._skipped \
585 585 and not self._debug:
586 586 f = open(self.errpath, 'wb')
587 587 for line in out:
588 588 f.write(line)
589 589 f.close()
590 590
591 591 # The result object handles diff calculation for us.
592 592 if self._result.addOutputMismatch(self, ret, out, self._refout):
593 593 # change was accepted, skip failing
594 594 return
595 595
596 596 if ret:
597 597 msg = 'output changed and ' + describe(ret)
598 598 else:
599 599 msg = 'output changed'
600 600
601 601 self.fail(msg)
602 602 elif ret:
603 603 self.fail(describe(ret))
604 604
605 605 def tearDown(self):
606 606 """Tasks to perform after run()."""
607 607 for entry in self._daemonpids:
608 608 killdaemons(entry)
609 609 self._daemonpids = []
610 610
611 611 if not self._keeptmpdir:
612 612 shutil.rmtree(self._testtmp, True)
613 613 shutil.rmtree(self._threadtmp, True)
614 614
615 615 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
616 616 and not self._debug and self._out:
617 617 f = open(self.errpath, 'wb')
618 618 for line in self._out:
619 619 f.write(line)
620 620 f.close()
621 621
622 622 vlog("# Ret was:", self._ret)
623 623
624 624 def _run(self, replacements, env):
625 625 # This should be implemented in child classes to run tests.
626 626 raise SkipTest('unknown test type')
627 627
628 628 def abort(self):
629 629 """Terminate execution of this test."""
630 630 self._aborted = True
631 631
632 632 def _getreplacements(self):
633 633 """Obtain a mapping of text replacements to apply to test output.
634 634
635 635 Test output needs to be normalized so it can be compared to expected
636 636 output. This function defines how some of that normalization will
637 637 occur.
638 638 """
639 639 r = [
640 640 (r':%s\b' % self._startport, ':$HGPORT'),
641 641 (r':%s\b' % (self._startport + 1), ':$HGPORT1'),
642 642 (r':%s\b' % (self._startport + 2), ':$HGPORT2'),
643 643 (r'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
644 644 r'\1 (glob)'),
645 645 ]
646 646
647 647 if os.name == 'nt':
648 648 r.append(
649 649 (''.join(c.isalpha() and '[%s%s]' % (c.lower(), c.upper()) or
650 650 c in '/\\' and r'[/\\]' or c.isdigit() and c or '\\' + c
651 651 for c in self._testtmp), '$TESTTMP'))
652 652 else:
653 653 r.append((re.escape(self._testtmp), '$TESTTMP'))
654 654
655 655 return r
656 656
657 657 def _getenv(self):
658 658 """Obtain environment variables to use during test execution."""
659 659 env = os.environ.copy()
660 660 env['TESTTMP'] = self._testtmp
661 661 env['HOME'] = self._testtmp
662 662 env["HGPORT"] = str(self._startport)
663 663 env["HGPORT1"] = str(self._startport + 1)
664 664 env["HGPORT2"] = str(self._startport + 2)
665 665 env["HGRCPATH"] = os.path.join(self._threadtmp, '.hgrc')
666 666 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, 'daemon.pids')
667 667 env["HGEDITOR"] = ('"' + sys.executable + '"'
668 668 + ' -c "import sys; sys.exit(0)"')
669 669 env["HGMERGE"] = "internal:merge"
670 670 env["HGUSER"] = "test"
671 671 env["HGENCODING"] = "ascii"
672 672 env["HGENCODINGMODE"] = "strict"
673 673
674 674 # Reset some environment variables to well-known values so that
675 675 # the tests produce repeatable output.
676 676 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
677 677 env['TZ'] = 'GMT'
678 678 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
679 679 env['COLUMNS'] = '80'
680 680 env['TERM'] = 'xterm'
681 681
682 682 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
683 683 'NO_PROXY').split():
684 684 if k in env:
685 685 del env[k]
686 686
687 687 # unset env related to hooks
688 688 for k in env.keys():
689 689 if k.startswith('HG_'):
690 690 del env[k]
691 691
692 692 return env
693 693
694 694 def _createhgrc(self, path):
695 695 """Create an hgrc file for this test."""
696 696 hgrc = open(path, 'wb')
697 697 hgrc.write('[ui]\n')
698 698 hgrc.write('slash = True\n')
699 699 hgrc.write('interactive = False\n')
700 700 hgrc.write('mergemarkers = detailed\n')
701 701 hgrc.write('promptecho = True\n')
702 702 hgrc.write('[defaults]\n')
703 703 hgrc.write('backout = -d "0 0"\n')
704 704 hgrc.write('commit = -d "0 0"\n')
705 705 hgrc.write('shelve = --date "0 0"\n')
706 706 hgrc.write('tag = -d "0 0"\n')
707 707 hgrc.write('[largefiles]\n')
708 708 hgrc.write('usercache = %s\n' %
709 709 (os.path.join(self._testtmp, '.cache/largefiles')))
710 710
711 711 for opt in self._extraconfigopts:
712 712 section, key = opt.split('.', 1)
713 713 assert '=' in key, ('extra config opt %s must '
714 714 'have an = for assignment' % opt)
715 715 hgrc.write('[%s]\n%s\n' % (section, key))
716 716 hgrc.close()
717 717
718 718 def fail(self, msg):
719 719 # unittest differentiates between errored and failed.
720 720 # Failed is denoted by AssertionError (by default at least).
721 721 raise AssertionError(msg)
722 722
723 723 class PythonTest(Test):
724 724 """A Python-based test."""
725 725
726 726 @property
727 727 def refpath(self):
728 728 return os.path.join(self._testdir, '%s.out' % self.name)
729 729
730 730 def _run(self, replacements, env):
731 731 py3kswitch = self._py3kwarnings and ' -3' or ''
732 732 cmd = '%s%s "%s"' % (PYTHON, py3kswitch, self.path)
733 733 vlog("# Running", cmd)
734 734 if os.name == 'nt':
735 735 replacements.append((r'\r\n', '\n'))
736 736 result = run(cmd, self._testtmp, replacements, env,
737 737 debug=self._debug, timeout=self._timeout)
738 738 if self._aborted:
739 739 raise KeyboardInterrupt()
740 740
741 741 return result
742 742
743 743 # This script may want to drop globs from lines matching these patterns on
744 744 # Windows, but check-code.py wants a glob on these lines unconditionally. Don't
745 745 # warn if that is the case for anything matching these lines.
746 746 checkcodeglobpats = [
747 747 re.compile(r'^pushing to \$TESTTMP/.*[^)]$'),
748 748 re.compile(r'^moving \S+/.*[^)]$'),
749 749 re.compile(r'^pulling from \$TESTTMP/.*[^)]$')
750 750 ]
751 751
752 752 class TTest(Test):
753 753 """A "t test" is a test backed by a .t file."""
754 754
755 755 SKIPPED_PREFIX = 'skipped: '
756 756 FAILED_PREFIX = 'hghave check failed: '
757 757 NEEDESCAPE = re.compile(r'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
758 758
759 759 ESCAPESUB = re.compile(r'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
760 760 ESCAPEMAP = dict((chr(i), r'\x%02x' % i) for i in range(256))
761 761 ESCAPEMAP.update({'\\': '\\\\', '\r': r'\r'})
762 762
763 763 @property
764 764 def refpath(self):
765 765 return os.path.join(self._testdir, self.name)
766 766
767 767 def _run(self, replacements, env):
768 768 f = open(self.path, 'rb')
769 769 lines = f.readlines()
770 770 f.close()
771 771
772 772 salt, script, after, expected = self._parsetest(lines)
773 773
774 774 # Write out the generated script.
775 775 fname = '%s.sh' % self._testtmp
776 776 f = open(fname, 'wb')
777 777 for l in script:
778 778 f.write(l)
779 779 f.close()
780 780
781 781 cmd = '%s "%s"' % (self._shell, fname)
782 782 vlog("# Running", cmd)
783 783
784 784 exitcode, output = run(cmd, self._testtmp, replacements, env,
785 785 debug=self._debug, timeout=self._timeout)
786 786
787 787 if self._aborted:
788 788 raise KeyboardInterrupt()
789 789
790 790 # Do not merge output if skipped. Return hghave message instead.
791 791 # Similarly, with --debug, output is None.
792 792 if exitcode == self.SKIPPED_STATUS or output is None:
793 793 return exitcode, output
794 794
795 795 return self._processoutput(exitcode, output, salt, after, expected)
796 796
797 797 def _hghave(self, reqs):
798 798 # TODO do something smarter when all other uses of hghave are gone.
799 799 tdir = self._testdir.replace('\\', '/')
800 800 proc = Popen4('%s -c "%s/hghave %s"' %
801 801 (self._shell, tdir, ' '.join(reqs)),
802 802 self._testtmp, 0, self._getenv())
803 803 stdout, stderr = proc.communicate()
804 804 ret = proc.wait()
805 805 if wifexited(ret):
806 806 ret = os.WEXITSTATUS(ret)
807 807 if ret == 2:
808 808 print stdout
809 809 sys.exit(1)
810 810
811 811 return ret == 0
812 812
813 813 def _parsetest(self, lines):
814 814 # We generate a shell script which outputs unique markers to line
815 815 # up script results with our source. These markers include input
816 816 # line number and the last return code.
817 817 salt = "SALT" + str(time.time())
818 818 def addsalt(line, inpython):
819 819 if inpython:
820 820 script.append('%s %d 0\n' % (salt, line))
821 821 else:
822 822 script.append('echo %s %s $?\n' % (salt, line))
823 823
824 824 script = []
825 825
826 826 # After we run the shell script, we re-unify the script output
827 827 # with non-active parts of the source, with synchronization by our
828 828 # SALT line number markers. The after table contains the non-active
829 829 # components, ordered by line number.
830 830 after = {}
831 831
832 832 # Expected shell script output.
833 833 expected = {}
834 834
835 835 pos = prepos = -1
836 836
837 837 # True or False when in a true or false conditional section
838 838 skipping = None
839 839
840 840 # We keep track of whether or not we're in a Python block so we
841 841 # can generate the surrounding doctest magic.
842 842 inpython = False
843 843
844 844 if self._debug:
845 845 script.append('set -x\n')
846 846 if os.getenv('MSYSTEM'):
847 847 script.append('alias pwd="pwd -W"\n')
848 848
849 849 for n, l in enumerate(lines):
850 850 if not l.endswith('\n'):
851 851 l += '\n'
852 852 if l.startswith('#require'):
853 853 lsplit = l.split()
854 854 if len(lsplit) < 2 or lsplit[0] != '#require':
855 855 after.setdefault(pos, []).append(' !!! invalid #require\n')
856 856 if not self._hghave(lsplit[1:]):
857 857 script = ["exit 80\n"]
858 858 break
859 859 after.setdefault(pos, []).append(l)
860 860 elif l.startswith('#if'):
861 861 lsplit = l.split()
862 862 if len(lsplit) < 2 or lsplit[0] != '#if':
863 863 after.setdefault(pos, []).append(' !!! invalid #if\n')
864 864 if skipping is not None:
865 865 after.setdefault(pos, []).append(' !!! nested #if\n')
866 866 skipping = not self._hghave(lsplit[1:])
867 867 after.setdefault(pos, []).append(l)
868 868 elif l.startswith('#else'):
869 869 if skipping is None:
870 870 after.setdefault(pos, []).append(' !!! missing #if\n')
871 871 skipping = not skipping
872 872 after.setdefault(pos, []).append(l)
873 873 elif l.startswith('#endif'):
874 874 if skipping is None:
875 875 after.setdefault(pos, []).append(' !!! missing #if\n')
876 876 skipping = None
877 877 after.setdefault(pos, []).append(l)
878 878 elif skipping:
879 879 after.setdefault(pos, []).append(l)
880 880 elif l.startswith(' >>> '): # python inlines
881 881 after.setdefault(pos, []).append(l)
882 882 prepos = pos
883 883 pos = n
884 884 if not inpython:
885 885 # We've just entered a Python block. Add the header.
886 886 inpython = True
887 887 addsalt(prepos, False) # Make sure we report the exit code.
888 888 script.append('%s -m heredoctest <<EOF\n' % PYTHON)
889 889 addsalt(n, True)
890 890 script.append(l[2:])
891 891 elif l.startswith(' ... '): # python inlines
892 892 after.setdefault(prepos, []).append(l)
893 893 script.append(l[2:])
894 894 elif l.startswith(' $ '): # commands
895 895 if inpython:
896 896 script.append('EOF\n')
897 897 inpython = False
898 898 after.setdefault(pos, []).append(l)
899 899 prepos = pos
900 900 pos = n
901 901 addsalt(n, False)
902 902 cmd = l[4:].split()
903 903 if len(cmd) == 2 and cmd[0] == 'cd':
904 904 l = ' $ cd %s || exit 1\n' % cmd[1]
905 905 script.append(l[4:])
906 906 elif l.startswith(' > '): # continuations
907 907 after.setdefault(prepos, []).append(l)
908 908 script.append(l[4:])
909 909 elif l.startswith(' '): # results
910 910 # Queue up a list of expected results.
911 911 expected.setdefault(pos, []).append(l[2:])
912 912 else:
913 913 if inpython:
914 914 script.append('EOF\n')
915 915 inpython = False
916 916 # Non-command/result. Queue up for merged output.
917 917 after.setdefault(pos, []).append(l)
918 918
919 919 if inpython:
920 920 script.append('EOF\n')
921 921 if skipping is not None:
922 922 after.setdefault(pos, []).append(' !!! missing #endif\n')
923 923 addsalt(n + 1, False)
924 924
925 925 return salt, script, after, expected
926 926
927 927 def _processoutput(self, exitcode, output, salt, after, expected):
928 928 # Merge the script output back into a unified test.
929 929 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
930 930 if exitcode != 0:
931 931 warnonly = 3
932 932
933 933 pos = -1
934 934 postout = []
935 935 for l in output:
936 936 lout, lcmd = l, None
937 937 if salt in l:
938 938 lout, lcmd = l.split(salt, 1)
939 939
940 940 if lout:
941 941 if not lout.endswith('\n'):
942 942 lout += ' (no-eol)\n'
943 943
944 944 # Find the expected output at the current position.
945 945 el = None
946 946 if expected.get(pos, None):
947 947 el = expected[pos].pop(0)
948 948
949 949 r = TTest.linematch(el, lout)
950 950 if isinstance(r, str):
951 951 if r == '+glob':
952 952 lout = el[:-1] + ' (glob)\n'
953 953 r = '' # Warn only this line.
954 954 elif r == '-glob':
955 955 lout = ''.join(el.rsplit(' (glob)', 1))
956 956 r = '' # Warn only this line.
957 957 else:
958 958 log('\ninfo, unknown linematch result: %r\n' % r)
959 959 r = False
960 960 if r:
961 961 postout.append(' ' + el)
962 962 else:
963 963 if self.NEEDESCAPE(lout):
964 964 lout = TTest._stringescape('%s (esc)\n' %
965 965 lout.rstrip('\n'))
966 966 postout.append(' ' + lout) # Let diff deal with it.
967 967 if r != '': # If line failed.
968 968 warnonly = 3 # for sure not
969 969 elif warnonly == 1: # Is "not yet" and line is warn only.
970 970 warnonly = 2 # Yes do warn.
971 971
972 972 if lcmd:
973 973 # Add on last return code.
974 974 ret = int(lcmd.split()[1])
975 975 if ret != 0:
976 976 postout.append(' [%s]\n' % ret)
977 977 if pos in after:
978 978 # Merge in non-active test bits.
979 979 postout += after.pop(pos)
980 980 pos = int(lcmd.split()[0])
981 981
982 982 if pos in after:
983 983 postout += after.pop(pos)
984 984
985 985 if warnonly == 2:
986 986 exitcode = False # Set exitcode to warned.
987 987
988 988 return exitcode, postout
989 989
990 990 @staticmethod
991 991 def rematch(el, l):
992 992 try:
993 993 # use \Z to ensure that the regex matches to the end of the string
994 994 if os.name == 'nt':
995 995 return re.match(el + r'\r?\n\Z', l)
996 996 return re.match(el + r'\n\Z', l)
997 997 except re.error:
998 998 # el is an invalid regex
999 999 return False
1000 1000
1001 1001 @staticmethod
1002 1002 def globmatch(el, l):
1003 1003 # The only supported special characters are * and ? plus / which also
1004 1004 # matches \ on windows. Escaping of these characters is supported.
1005 1005 if el + '\n' == l:
1006 1006 if os.altsep:
1007 1007 # matching on "/" is not needed for this line
1008 1008 for pat in checkcodeglobpats:
1009 1009 if pat.match(el):
1010 1010 return True
1011 1011 return '-glob'
1012 1012 return True
1013 1013 i, n = 0, len(el)
1014 1014 res = ''
1015 1015 while i < n:
1016 1016 c = el[i]
1017 1017 i += 1
1018 1018 if c == '\\' and el[i] in '*?\\/':
1019 1019 res += el[i - 1:i + 1]
1020 1020 i += 1
1021 1021 elif c == '*':
1022 1022 res += '.*'
1023 1023 elif c == '?':
1024 1024 res += '.'
1025 1025 elif c == '/' and os.altsep:
1026 1026 res += '[/\\\\]'
1027 1027 else:
1028 1028 res += re.escape(c)
1029 1029 return TTest.rematch(res, l)
1030 1030
1031 1031 @staticmethod
1032 1032 def linematch(el, l):
1033 1033 if el == l: # perfect match (fast)
1034 1034 return True
1035 1035 if el:
1036 1036 if el.endswith(" (esc)\n"):
1037 1037 el = el[:-7].decode('string-escape') + '\n'
1038 1038 if el == l or os.name == 'nt' and el[:-1] + '\r\n' == l:
1039 1039 return True
1040 1040 if el.endswith(" (re)\n"):
1041 1041 return TTest.rematch(el[:-6], l)
1042 1042 if el.endswith(" (glob)\n"):
1043 1043 # ignore '(glob)' added to l by 'replacements'
1044 1044 if l.endswith(" (glob)\n"):
1045 1045 l = l[:-8] + "\n"
1046 1046 return TTest.globmatch(el[:-8], l)
1047 1047 if os.altsep and l.replace('\\', '/') == el:
1048 1048 return '+glob'
1049 1049 return False
1050 1050
1051 1051 @staticmethod
1052 1052 def parsehghaveoutput(lines):
1053 1053 '''Parse hghave log lines.
1054 1054
1055 1055 Return tuple of lists (missing, failed):
1056 1056 * the missing/unknown features
1057 1057 * the features for which existence check failed'''
1058 1058 missing = []
1059 1059 failed = []
1060 1060 for line in lines:
1061 1061 if line.startswith(TTest.SKIPPED_PREFIX):
1062 1062 line = line.splitlines()[0]
1063 1063 missing.append(line[len(TTest.SKIPPED_PREFIX):])
1064 1064 elif line.startswith(TTest.FAILED_PREFIX):
1065 1065 line = line.splitlines()[0]
1066 1066 failed.append(line[len(TTest.FAILED_PREFIX):])
1067 1067
1068 1068 return missing, failed
1069 1069
1070 1070 @staticmethod
1071 1071 def _escapef(m):
1072 1072 return TTest.ESCAPEMAP[m.group(0)]
1073 1073
1074 1074 @staticmethod
1075 1075 def _stringescape(s):
1076 1076 return TTest.ESCAPESUB(TTest._escapef, s)
1077 1077
1078 1078
1079 1079 wifexited = getattr(os, "WIFEXITED", lambda x: False)
1080 1080 def run(cmd, wd, replacements, env, debug=False, timeout=None):
1081 1081 """Run command in a sub-process, capturing the output (stdout and stderr).
1082 1082 Return a tuple (exitcode, output). output is None in debug mode."""
1083 1083 if debug:
1084 1084 proc = subprocess.Popen(cmd, shell=True, cwd=wd, env=env)
1085 1085 ret = proc.wait()
1086 1086 return (ret, None)
1087 1087
1088 1088 proc = Popen4(cmd, wd, timeout, env)
1089 1089 def cleanup():
1090 1090 terminate(proc)
1091 1091 ret = proc.wait()
1092 1092 if ret == 0:
1093 1093 ret = signal.SIGTERM << 8
1094 1094 killdaemons(env['DAEMON_PIDS'])
1095 1095 return ret
1096 1096
1097 1097 output = ''
1098 1098 proc.tochild.close()
1099 1099
1100 1100 try:
1101 1101 output = proc.fromchild.read()
1102 1102 except KeyboardInterrupt:
1103 1103 vlog('# Handling keyboard interrupt')
1104 1104 cleanup()
1105 1105 raise
1106 1106
1107 1107 ret = proc.wait()
1108 1108 if wifexited(ret):
1109 1109 ret = os.WEXITSTATUS(ret)
1110 1110
1111 1111 if proc.timeout:
1112 1112 ret = 'timeout'
1113 1113
1114 1114 if ret:
1115 1115 killdaemons(env['DAEMON_PIDS'])
1116 1116
1117 1117 for s, r in replacements:
1118 1118 output = re.sub(s, r, output)
1119 1119 return ret, output.splitlines(True)
1120 1120
1121 1121 iolock = threading.RLock()
1122 1122
1123 1123 class SkipTest(Exception):
1124 1124 """Raised to indicate that a test is to be skipped."""
1125 1125
1126 1126 class IgnoreTest(Exception):
1127 1127 """Raised to indicate that a test is to be ignored."""
1128 1128
1129 1129 class WarnTest(Exception):
1130 1130 """Raised to indicate that a test warned."""
1131 1131
1132 1132 class TestResult(unittest._TextTestResult):
1133 1133 """Holds results when executing via unittest."""
1134 1134 # Don't worry too much about accessing the non-public _TextTestResult.
1135 1135 # It is relatively common in Python testing tools.
1136 1136 def __init__(self, options, *args, **kwargs):
1137 1137 super(TestResult, self).__init__(*args, **kwargs)
1138 1138
1139 1139 self._options = options
1140 1140
1141 1141 # unittest.TestResult didn't have skipped until 2.7. We need to
1142 1142 # polyfill it.
1143 1143 self.skipped = []
1144 1144
1145 1145 # We have a custom "ignored" result that isn't present in any Python
1146 1146 # unittest implementation. It is very similar to skipped. It may make
1147 1147 # sense to map it into skip some day.
1148 1148 self.ignored = []
1149 1149
1150 1150 # We have a custom "warned" result that isn't present in any Python
1151 1151 # unittest implementation. It is very similar to failed. It may make
1152 1152 # sense to map it into fail some day.
1153 1153 self.warned = []
1154 1154
1155 1155 self.times = []
1156 1156 # Data stored for the benefit of generating xunit reports.
1157 1157 self.successes = []
1158 1158 self.faildata = {}
1159 1159
1160 1160 def addFailure(self, test, reason):
1161 1161 self.failures.append((test, reason))
1162 1162
1163 1163 if self._options.first:
1164 1164 self.stop()
1165 1165 else:
1166 1166 iolock.acquire()
1167 1167 if not self._options.nodiff:
1168 1168 self.stream.write('\nERROR: %s output changed\n' % test)
1169 1169
1170 1170 self.stream.write('!')
1171 1171 self.stream.flush()
1172 1172 iolock.release()
1173 1173
1174 1174 def addSuccess(self, test):
1175 1175 iolock.acquire()
1176 1176 super(TestResult, self).addSuccess(test)
1177 1177 iolock.release()
1178 1178 self.successes.append(test)
1179 1179
1180 1180 def addError(self, test, err):
1181 1181 super(TestResult, self).addError(test, err)
1182 1182 if self._options.first:
1183 1183 self.stop()
1184 1184
1185 1185 # Polyfill.
1186 1186 def addSkip(self, test, reason):
1187 1187 self.skipped.append((test, reason))
1188 1188 iolock.acquire()
1189 1189 if self.showAll:
1190 1190 self.stream.writeln('skipped %s' % reason)
1191 1191 else:
1192 1192 self.stream.write('s')
1193 1193 self.stream.flush()
1194 1194 iolock.release()
1195 1195
1196 1196 def addIgnore(self, test, reason):
1197 1197 self.ignored.append((test, reason))
1198 1198 iolock.acquire()
1199 1199 if self.showAll:
1200 1200 self.stream.writeln('ignored %s' % reason)
1201 1201 else:
1202 1202 if reason != 'not retesting' and reason != "doesn't match keyword":
1203 1203 self.stream.write('i')
1204 1204 else:
1205 1205 self.testsRun += 1
1206 1206 self.stream.flush()
1207 1207 iolock.release()
1208 1208
1209 1209 def addWarn(self, test, reason):
1210 1210 self.warned.append((test, reason))
1211 1211
1212 1212 if self._options.first:
1213 1213 self.stop()
1214 1214
1215 1215 iolock.acquire()
1216 1216 if self.showAll:
1217 1217 self.stream.writeln('warned %s' % reason)
1218 1218 else:
1219 1219 self.stream.write('~')
1220 1220 self.stream.flush()
1221 1221 iolock.release()
1222 1222
1223 1223 def addOutputMismatch(self, test, ret, got, expected):
1224 1224 """Record a mismatch in test output for a particular test."""
1225 1225 if self.shouldStop:
1226 1226 # don't print, some other test case already failed and
1227 1227 # printed, we're just stale and probably failed due to our
1228 1228 # temp dir getting cleaned up.
1229 1229 return
1230 1230
1231 1231 accepted = False
1232 1232 failed = False
1233 1233 lines = []
1234 1234
1235 1235 iolock.acquire()
1236 1236 if self._options.nodiff:
1237 1237 pass
1238 1238 elif self._options.view:
1239 1239 os.system("%s %s %s" %
1240 1240 (self._options.view, test.refpath, test.errpath))
1241 1241 else:
1242 1242 servefail, lines = getdiff(expected, got,
1243 1243 test.refpath, test.errpath)
1244 1244 if servefail:
1245 1245 self.addFailure(
1246 1246 test,
1247 1247 'server failed to start (HGPORT=%s)' % test._startport)
1248 1248 else:
1249 1249 self.stream.write('\n')
1250 1250 for line in lines:
1251 1251 self.stream.write(line)
1252 1252 self.stream.flush()
1253 1253
1254 1254 # handle interactive prompt without releasing iolock
1255 1255 if self._options.interactive:
1256 1256 self.stream.write('Accept this change? [n] ')
1257 1257 answer = sys.stdin.readline().strip()
1258 1258 if answer.lower() in ('y', 'yes'):
1259 1259 if test.name.endswith('.t'):
1260 1260 rename(test.errpath, test.path)
1261 1261 else:
1262 1262 rename(test.errpath, '%s.out' % test.path)
1263 1263 accepted = True
1264 1264 if not accepted and not failed:
1265 1265 self.faildata[test.name] = ''.join(lines)
1266 1266 iolock.release()
1267 1267
1268 1268 return accepted
1269 1269
1270 1270 def startTest(self, test):
1271 1271 super(TestResult, self).startTest(test)
1272 1272
1273 1273 # os.times module computes the user time and system time spent by
1274 1274 # child's processes along with real elapsed time taken by a process.
1275 1275 # This module has one limitation. It can only work for Linux user
1276 1276 # and not for Windows.
1277 1277 test.started = os.times()
1278 1278
1279 1279 def stopTest(self, test, interrupted=False):
1280 1280 super(TestResult, self).stopTest(test)
1281 1281
1282 1282 test.stopped = os.times()
1283 1283
1284 1284 starttime = test.started
1285 1285 endtime = test.stopped
1286 1286 self.times.append((test.name, endtime[2] - starttime[2],
1287 1287 endtime[3] - starttime[3], endtime[4] - starttime[4]))
1288 1288
1289 1289 if interrupted:
1290 1290 iolock.acquire()
1291 1291 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1292 1292 test.name, self.times[-1][3]))
1293 1293 iolock.release()
1294 1294
1295 1295 class TestSuite(unittest.TestSuite):
1296 1296 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1297 1297
1298 1298 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1299 1299 retest=False, keywords=None, loop=False, runs_per_test=1,
1300 1300 loadtest=None,
1301 1301 *args, **kwargs):
1302 1302 """Create a new instance that can run tests with a configuration.
1303 1303
1304 1304 testdir specifies the directory where tests are executed from. This
1305 1305 is typically the ``tests`` directory from Mercurial's source
1306 1306 repository.
1307 1307
1308 1308 jobs specifies the number of jobs to run concurrently. Each test
1309 1309 executes on its own thread. Tests actually spawn new processes, so
1310 1310 state mutation should not be an issue.
1311 1311
1312 1312 whitelist and blacklist denote tests that have been whitelisted and
1313 1313 blacklisted, respectively. These arguments don't belong in TestSuite.
1314 1314 Instead, whitelist and blacklist should be handled by the thing that
1315 1315 populates the TestSuite with tests. They are present to preserve
1316 1316 backwards compatible behavior which reports skipped tests as part
1317 1317 of the results.
1318 1318
1319 1319 retest denotes whether to retest failed tests. This arguably belongs
1320 1320 outside of TestSuite.
1321 1321
1322 1322 keywords denotes key words that will be used to filter which tests
1323 1323 to execute. This arguably belongs outside of TestSuite.
1324 1324
1325 1325 loop denotes whether to loop over tests forever.
1326 1326 """
1327 1327 super(TestSuite, self).__init__(*args, **kwargs)
1328 1328
1329 1329 self._jobs = jobs
1330 1330 self._whitelist = whitelist
1331 1331 self._blacklist = blacklist
1332 1332 self._retest = retest
1333 1333 self._keywords = keywords
1334 1334 self._loop = loop
1335 1335 self._runs_per_test = runs_per_test
1336 1336 self._loadtest = loadtest
1337 1337
1338 1338 def run(self, result):
1339 1339 # We have a number of filters that need to be applied. We do this
1340 1340 # here instead of inside Test because it makes the running logic for
1341 1341 # Test simpler.
1342 1342 tests = []
1343 1343 num_tests = [0]
1344 1344 for test in self._tests:
1345 1345 def get():
1346 1346 num_tests[0] += 1
1347 1347 if getattr(test, 'should_reload', False):
1348 1348 return self._loadtest(test.name, num_tests[0])
1349 1349 return test
1350 1350 if not os.path.exists(test.path):
1351 1351 result.addSkip(test, "Doesn't exist")
1352 1352 continue
1353 1353
1354 1354 if not (self._whitelist and test.name in self._whitelist):
1355 1355 if self._blacklist and test.name in self._blacklist:
1356 1356 result.addSkip(test, 'blacklisted')
1357 1357 continue
1358 1358
1359 1359 if self._retest and not os.path.exists(test.errpath):
1360 1360 result.addIgnore(test, 'not retesting')
1361 1361 continue
1362 1362
1363 1363 if self._keywords:
1364 1364 f = open(test.path, 'rb')
1365 1365 t = f.read().lower() + test.name.lower()
1366 1366 f.close()
1367 1367 ignored = False
1368 1368 for k in self._keywords.lower().split():
1369 1369 if k not in t:
1370 1370 result.addIgnore(test, "doesn't match keyword")
1371 1371 ignored = True
1372 1372 break
1373 1373
1374 1374 if ignored:
1375 1375 continue
1376 1376 for _ in xrange(self._runs_per_test):
1377 1377 tests.append(get())
1378 1378
1379 1379 runtests = list(tests)
1380 1380 done = queue.Queue()
1381 1381 running = 0
1382 1382
1383 1383 def job(test, result):
1384 1384 try:
1385 1385 test(result)
1386 1386 done.put(None)
1387 1387 except KeyboardInterrupt:
1388 1388 pass
1389 1389 except: # re-raises
1390 1390 done.put(('!', test, 'run-test raised an error, see traceback'))
1391 1391 raise
1392 1392
1393 stoppedearly = False
1394
1393 1395 try:
1394 1396 while tests or running:
1395 1397 if not done.empty() or running == self._jobs or not tests:
1396 1398 try:
1397 1399 done.get(True, 1)
1400 running -= 1
1398 1401 if result and result.shouldStop:
1402 stoppedearly = True
1399 1403 break
1400 1404 except queue.Empty:
1401 1405 continue
1402 running -= 1
1403 1406 if tests and not running == self._jobs:
1404 1407 test = tests.pop(0)
1405 1408 if self._loop:
1406 1409 if getattr(test, 'should_reload', False):
1407 1410 num_tests[0] += 1
1408 1411 tests.append(
1409 1412 self._loadtest(test.name, num_tests[0]))
1410 1413 else:
1411 1414 tests.append(test)
1412 1415 t = threading.Thread(target=job, name=test.name,
1413 1416 args=(test, result))
1414 1417 t.start()
1415 1418 running += 1
1419
1420 # If we stop early we still need to wait on started tests to
1421 # finish. Otherwise, there is a race between the test completing
1422 # and the test's cleanup code running. This could result in the
1423 # test reporting incorrect.
1424 if stoppedearly:
1425 while running:
1426 try:
1427 done.get(True, 1)
1428 running -= 1
1429 except queue.Empty:
1430 continue
1416 1431 except KeyboardInterrupt:
1417 1432 for test in runtests:
1418 1433 test.abort()
1419 1434
1420 1435 return result
1421 1436
1422 1437 class TextTestRunner(unittest.TextTestRunner):
1423 1438 """Custom unittest test runner that uses appropriate settings."""
1424 1439
1425 1440 def __init__(self, runner, *args, **kwargs):
1426 1441 super(TextTestRunner, self).__init__(*args, **kwargs)
1427 1442
1428 1443 self._runner = runner
1429 1444
1430 1445 def run(self, test):
1431 1446 result = TestResult(self._runner.options, self.stream,
1432 1447 self.descriptions, self.verbosity)
1433 1448
1434 1449 test(result)
1435 1450
1436 1451 failed = len(result.failures)
1437 1452 warned = len(result.warned)
1438 1453 skipped = len(result.skipped)
1439 1454 ignored = len(result.ignored)
1440 1455
1441 1456 iolock.acquire()
1442 1457 self.stream.writeln('')
1443 1458
1444 1459 if not self._runner.options.noskips:
1445 1460 for test, msg in result.skipped:
1446 1461 self.stream.writeln('Skipped %s: %s' % (test.name, msg))
1447 1462 for test, msg in result.warned:
1448 1463 self.stream.writeln('Warned %s: %s' % (test.name, msg))
1449 1464 for test, msg in result.failures:
1450 1465 self.stream.writeln('Failed %s: %s' % (test.name, msg))
1451 1466 for test, msg in result.errors:
1452 1467 self.stream.writeln('Errored %s: %s' % (test.name, msg))
1453 1468
1454 1469 if self._runner.options.xunit:
1455 1470 xuf = open(self._runner.options.xunit, 'wb')
1456 1471 try:
1457 1472 timesd = dict(
1458 1473 (test, real) for test, cuser, csys, real in result.times)
1459 1474 doc = minidom.Document()
1460 1475 s = doc.createElement('testsuite')
1461 1476 s.setAttribute('name', 'run-tests')
1462 1477 s.setAttribute('tests', str(result.testsRun))
1463 1478 s.setAttribute('errors', "0") # TODO
1464 1479 s.setAttribute('failures', str(failed))
1465 1480 s.setAttribute('skipped', str(skipped + ignored))
1466 1481 doc.appendChild(s)
1467 1482 for tc in result.successes:
1468 1483 t = doc.createElement('testcase')
1469 1484 t.setAttribute('name', tc.name)
1470 1485 t.setAttribute('time', '%.3f' % timesd[tc.name])
1471 1486 s.appendChild(t)
1472 1487 for tc, err in sorted(result.faildata.iteritems()):
1473 1488 t = doc.createElement('testcase')
1474 1489 t.setAttribute('name', tc)
1475 1490 t.setAttribute('time', '%.3f' % timesd[tc])
1476 1491 # createCDATASection expects a unicode or it will convert
1477 1492 # using default conversion rules, which will fail if
1478 1493 # string isn't ASCII.
1479 1494 err = cdatasafe(err).decode('utf-8', 'replace')
1480 1495 cd = doc.createCDATASection(err)
1481 1496 t.appendChild(cd)
1482 1497 s.appendChild(t)
1483 1498 xuf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
1484 1499 finally:
1485 1500 xuf.close()
1486 1501
1487 1502 if self._runner.options.json:
1488 1503 if json is None:
1489 1504 raise ImportError("json module not installed")
1490 1505 jsonpath = os.path.join(self._runner._testdir, 'report.json')
1491 1506 fp = open(jsonpath, 'w')
1492 1507 try:
1493 1508 timesd = {}
1494 1509 for test, cuser, csys, real in result.times:
1495 1510 timesd[test] = (real, cuser, csys)
1496 1511
1497 1512 outcome = {}
1498 1513 for tc in result.successes:
1499 1514 testresult = {'result': 'success',
1500 1515 'time': ('%0.3f' % timesd[tc.name][0]),
1501 1516 'cuser': ('%0.3f' % timesd[tc.name][1]),
1502 1517 'csys': ('%0.3f' % timesd[tc.name][2])}
1503 1518 outcome[tc.name] = testresult
1504 1519
1505 1520 for tc, err in sorted(result.faildata.iteritems()):
1506 1521 testresult = {'result': 'failure',
1507 1522 'time': ('%0.3f' % timesd[tc][0]),
1508 1523 'cuser': ('%0.3f' % timesd[tc][1]),
1509 1524 'csys': ('%0.3f' % timesd[tc][2])}
1510 1525 outcome[tc] = testresult
1511 1526
1512 1527 for tc, reason in result.skipped:
1513 1528 testresult = {'result': 'skip',
1514 1529 'time': ('%0.3f' % timesd[tc.name][0]),
1515 1530 'cuser': ('%0.3f' % timesd[tc.name][1]),
1516 1531 'csys': ('%0.3f' % timesd[tc.name][2])}
1517 1532 outcome[tc.name] = testresult
1518 1533
1519 1534 jsonout = json.dumps(outcome, sort_keys=True, indent=4)
1520 1535 fp.writelines(("testreport =", jsonout))
1521 1536 finally:
1522 1537 fp.close()
1523 1538
1524 1539 self._runner._checkhglib('Tested')
1525 1540
1526 1541 self.stream.writeln('# Ran %d tests, %d skipped, %d warned, %d failed.'
1527 1542 % (result.testsRun,
1528 1543 skipped + ignored, warned, failed))
1529 1544 if failed:
1530 1545 self.stream.writeln('python hash seed: %s' %
1531 1546 os.environ['PYTHONHASHSEED'])
1532 1547 if self._runner.options.time:
1533 1548 self.printtimes(result.times)
1534 1549
1535 1550 iolock.release()
1536 1551
1537 1552 return result
1538 1553
1539 1554 def printtimes(self, times):
1540 1555 # iolock held by run
1541 1556 self.stream.writeln('# Producing time report')
1542 1557 times.sort(key=lambda t: (t[3]))
1543 1558 cols = '%7.3f %7.3f %7.3f %s'
1544 1559 self.stream.writeln('%-7s %-7s %-7s %s' % ('cuser', 'csys', 'real',
1545 1560 'Test'))
1546 1561 for test, cuser, csys, real in times:
1547 1562 self.stream.writeln(cols % (cuser, csys, real, test))
1548 1563
1549 1564 class TestRunner(object):
1550 1565 """Holds context for executing tests.
1551 1566
1552 1567 Tests rely on a lot of state. This object holds it for them.
1553 1568 """
1554 1569
1555 1570 # Programs required to run tests.
1556 1571 REQUIREDTOOLS = [
1557 1572 os.path.basename(sys.executable),
1558 1573 'diff',
1559 1574 'grep',
1560 1575 'unzip',
1561 1576 'gunzip',
1562 1577 'bunzip2',
1563 1578 'sed',
1564 1579 ]
1565 1580
1566 1581 # Maps file extensions to test class.
1567 1582 TESTTYPES = [
1568 1583 ('.py', PythonTest),
1569 1584 ('.t', TTest),
1570 1585 ]
1571 1586
1572 1587 def __init__(self):
1573 1588 self.options = None
1574 1589 self._hgroot = None
1575 1590 self._testdir = None
1576 1591 self._hgtmp = None
1577 1592 self._installdir = None
1578 1593 self._bindir = None
1579 1594 self._tmpbinddir = None
1580 1595 self._pythondir = None
1581 1596 self._coveragefile = None
1582 1597 self._createdfiles = []
1583 1598 self._hgpath = None
1584 1599
1585 1600 def run(self, args, parser=None):
1586 1601 """Run the test suite."""
1587 1602 oldmask = os.umask(022)
1588 1603 try:
1589 1604 parser = parser or getparser()
1590 1605 options, args = parseargs(args, parser)
1591 1606 self.options = options
1592 1607
1593 1608 self._checktools()
1594 1609 tests = self.findtests(args)
1595 1610 return self._run(tests)
1596 1611 finally:
1597 1612 os.umask(oldmask)
1598 1613
1599 1614 def _run(self, tests):
1600 1615 if self.options.random:
1601 1616 random.shuffle(tests)
1602 1617 else:
1603 1618 # keywords for slow tests
1604 1619 slow = 'svn gendoc check-code-hg'.split()
1605 1620 def sortkey(f):
1606 1621 # run largest tests first, as they tend to take the longest
1607 1622 try:
1608 1623 val = -os.stat(f).st_size
1609 1624 except OSError, e:
1610 1625 if e.errno != errno.ENOENT:
1611 1626 raise
1612 1627 return -1e9 # file does not exist, tell early
1613 1628 for kw in slow:
1614 1629 if kw in f:
1615 1630 val *= 10
1616 1631 return val
1617 1632 tests.sort(key=sortkey)
1618 1633
1619 1634 self._testdir = os.environ['TESTDIR'] = os.getcwd()
1620 1635
1621 1636 if 'PYTHONHASHSEED' not in os.environ:
1622 1637 # use a random python hash seed all the time
1623 1638 # we do the randomness ourself to know what seed is used
1624 1639 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
1625 1640
1626 1641 if self.options.tmpdir:
1627 1642 self.options.keep_tmpdir = True
1628 1643 tmpdir = self.options.tmpdir
1629 1644 if os.path.exists(tmpdir):
1630 1645 # Meaning of tmpdir has changed since 1.3: we used to create
1631 1646 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
1632 1647 # tmpdir already exists.
1633 1648 print "error: temp dir %r already exists" % tmpdir
1634 1649 return 1
1635 1650
1636 1651 # Automatically removing tmpdir sounds convenient, but could
1637 1652 # really annoy anyone in the habit of using "--tmpdir=/tmp"
1638 1653 # or "--tmpdir=$HOME".
1639 1654 #vlog("# Removing temp dir", tmpdir)
1640 1655 #shutil.rmtree(tmpdir)
1641 1656 os.makedirs(tmpdir)
1642 1657 else:
1643 1658 d = None
1644 1659 if os.name == 'nt':
1645 1660 # without this, we get the default temp dir location, but
1646 1661 # in all lowercase, which causes troubles with paths (issue3490)
1647 1662 d = os.getenv('TMP')
1648 1663 tmpdir = tempfile.mkdtemp('', 'hgtests.', d)
1649 1664 self._hgtmp = os.environ['HGTMP'] = os.path.realpath(tmpdir)
1650 1665
1651 1666 if self.options.with_hg:
1652 1667 self._installdir = None
1653 1668 self._bindir = os.path.dirname(os.path.realpath(
1654 1669 self.options.with_hg))
1655 1670 self._tmpbindir = os.path.join(self._hgtmp, 'install', 'bin')
1656 1671 os.makedirs(self._tmpbindir)
1657 1672
1658 1673 # This looks redundant with how Python initializes sys.path from
1659 1674 # the location of the script being executed. Needed because the
1660 1675 # "hg" specified by --with-hg is not the only Python script
1661 1676 # executed in the test suite that needs to import 'mercurial'
1662 1677 # ... which means it's not really redundant at all.
1663 1678 self._pythondir = self._bindir
1664 1679 else:
1665 1680 self._installdir = os.path.join(self._hgtmp, "install")
1666 1681 self._bindir = os.environ["BINDIR"] = \
1667 1682 os.path.join(self._installdir, "bin")
1668 1683 self._tmpbindir = self._bindir
1669 1684 self._pythondir = os.path.join(self._installdir, "lib", "python")
1670 1685
1671 1686 os.environ["BINDIR"] = self._bindir
1672 1687 os.environ["PYTHON"] = PYTHON
1673 1688
1674 1689 runtestdir = os.path.abspath(os.path.dirname(__file__))
1675 1690 path = [self._bindir, runtestdir] + os.environ["PATH"].split(os.pathsep)
1676 1691 if self._tmpbindir != self._bindir:
1677 1692 path = [self._tmpbindir] + path
1678 1693 os.environ["PATH"] = os.pathsep.join(path)
1679 1694
1680 1695 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
1681 1696 # can run .../tests/run-tests.py test-foo where test-foo
1682 1697 # adds an extension to HGRC. Also include run-test.py directory to
1683 1698 # import modules like heredoctest.
1684 1699 pypath = [self._pythondir, self._testdir, runtestdir]
1685 1700 # We have to augment PYTHONPATH, rather than simply replacing
1686 1701 # it, in case external libraries are only available via current
1687 1702 # PYTHONPATH. (In particular, the Subversion bindings on OS X
1688 1703 # are in /opt/subversion.)
1689 1704 oldpypath = os.environ.get(IMPL_PATH)
1690 1705 if oldpypath:
1691 1706 pypath.append(oldpypath)
1692 1707 os.environ[IMPL_PATH] = os.pathsep.join(pypath)
1693 1708
1694 1709 if self.options.pure:
1695 1710 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
1696 1711
1697 1712 self._coveragefile = os.path.join(self._testdir, '.coverage')
1698 1713
1699 1714 vlog("# Using TESTDIR", self._testdir)
1700 1715 vlog("# Using HGTMP", self._hgtmp)
1701 1716 vlog("# Using PATH", os.environ["PATH"])
1702 1717 vlog("# Using", IMPL_PATH, os.environ[IMPL_PATH])
1703 1718
1704 1719 try:
1705 1720 return self._runtests(tests) or 0
1706 1721 finally:
1707 1722 time.sleep(.1)
1708 1723 self._cleanup()
1709 1724
1710 1725 def findtests(self, args):
1711 1726 """Finds possible test files from arguments.
1712 1727
1713 1728 If you wish to inject custom tests into the test harness, this would
1714 1729 be a good function to monkeypatch or override in a derived class.
1715 1730 """
1716 1731 if not args:
1717 1732 if self.options.changed:
1718 1733 proc = Popen4('hg st --rev "%s" -man0 .' %
1719 1734 self.options.changed, None, 0)
1720 1735 stdout, stderr = proc.communicate()
1721 1736 args = stdout.strip('\0').split('\0')
1722 1737 else:
1723 1738 args = os.listdir('.')
1724 1739
1725 1740 return [t for t in args
1726 1741 if os.path.basename(t).startswith('test-')
1727 1742 and (t.endswith('.py') or t.endswith('.t'))]
1728 1743
1729 1744 def _runtests(self, tests):
1730 1745 try:
1731 1746 if self._installdir:
1732 1747 self._installhg()
1733 1748 self._checkhglib("Testing")
1734 1749 else:
1735 1750 self._usecorrectpython()
1736 1751
1737 1752 if self.options.restart:
1738 1753 orig = list(tests)
1739 1754 while tests:
1740 1755 if os.path.exists(tests[0] + ".err"):
1741 1756 break
1742 1757 tests.pop(0)
1743 1758 if not tests:
1744 1759 print "running all tests"
1745 1760 tests = orig
1746 1761
1747 1762 tests = [self._gettest(t, i) for i, t in enumerate(tests)]
1748 1763
1749 1764 failed = False
1750 1765 warned = False
1751 1766
1752 1767 suite = TestSuite(self._testdir,
1753 1768 jobs=self.options.jobs,
1754 1769 whitelist=self.options.whitelisted,
1755 1770 blacklist=self.options.blacklist,
1756 1771 retest=self.options.retest,
1757 1772 keywords=self.options.keywords,
1758 1773 loop=self.options.loop,
1759 1774 runs_per_test=self.options.runs_per_test,
1760 1775 tests=tests, loadtest=self._gettest)
1761 1776 verbosity = 1
1762 1777 if self.options.verbose:
1763 1778 verbosity = 2
1764 1779 runner = TextTestRunner(self, verbosity=verbosity)
1765 1780 result = runner.run(suite)
1766 1781
1767 1782 if result.failures:
1768 1783 failed = True
1769 1784 if result.warned:
1770 1785 warned = True
1771 1786
1772 1787 if self.options.anycoverage:
1773 1788 self._outputcoverage()
1774 1789 except KeyboardInterrupt:
1775 1790 failed = True
1776 1791 print "\ninterrupted!"
1777 1792
1778 1793 if failed:
1779 1794 return 1
1780 1795 if warned:
1781 1796 return 80
1782 1797
1783 1798 def _gettest(self, test, count):
1784 1799 """Obtain a Test by looking at its filename.
1785 1800
1786 1801 Returns a Test instance. The Test may not be runnable if it doesn't
1787 1802 map to a known type.
1788 1803 """
1789 1804 lctest = test.lower()
1790 1805 testcls = Test
1791 1806
1792 1807 for ext, cls in self.TESTTYPES:
1793 1808 if lctest.endswith(ext):
1794 1809 testcls = cls
1795 1810 break
1796 1811
1797 1812 refpath = os.path.join(self._testdir, test)
1798 1813 tmpdir = os.path.join(self._hgtmp, 'child%d' % count)
1799 1814
1800 1815 t = testcls(refpath, tmpdir,
1801 1816 keeptmpdir=self.options.keep_tmpdir,
1802 1817 debug=self.options.debug,
1803 1818 timeout=self.options.timeout,
1804 1819 startport=self.options.port + count * 3,
1805 1820 extraconfigopts=self.options.extra_config_opt,
1806 1821 py3kwarnings=self.options.py3k_warnings,
1807 1822 shell=self.options.shell)
1808 1823 t.should_reload = True
1809 1824 return t
1810 1825
1811 1826 def _cleanup(self):
1812 1827 """Clean up state from this test invocation."""
1813 1828
1814 1829 if self.options.keep_tmpdir:
1815 1830 return
1816 1831
1817 1832 vlog("# Cleaning up HGTMP", self._hgtmp)
1818 1833 shutil.rmtree(self._hgtmp, True)
1819 1834 for f in self._createdfiles:
1820 1835 try:
1821 1836 os.remove(f)
1822 1837 except OSError:
1823 1838 pass
1824 1839
1825 1840 def _usecorrectpython(self):
1826 1841 """Configure the environment to use the appropriate Python in tests."""
1827 1842 # Tests must use the same interpreter as us or bad things will happen.
1828 1843 pyexename = sys.platform == 'win32' and 'python.exe' or 'python'
1829 1844 if getattr(os, 'symlink', None):
1830 1845 vlog("# Making python executable in test path a symlink to '%s'" %
1831 1846 sys.executable)
1832 1847 mypython = os.path.join(self._tmpbindir, pyexename)
1833 1848 try:
1834 1849 if os.readlink(mypython) == sys.executable:
1835 1850 return
1836 1851 os.unlink(mypython)
1837 1852 except OSError, err:
1838 1853 if err.errno != errno.ENOENT:
1839 1854 raise
1840 1855 if self._findprogram(pyexename) != sys.executable:
1841 1856 try:
1842 1857 os.symlink(sys.executable, mypython)
1843 1858 self._createdfiles.append(mypython)
1844 1859 except OSError, err:
1845 1860 # child processes may race, which is harmless
1846 1861 if err.errno != errno.EEXIST:
1847 1862 raise
1848 1863 else:
1849 1864 exedir, exename = os.path.split(sys.executable)
1850 1865 vlog("# Modifying search path to find %s as %s in '%s'" %
1851 1866 (exename, pyexename, exedir))
1852 1867 path = os.environ['PATH'].split(os.pathsep)
1853 1868 while exedir in path:
1854 1869 path.remove(exedir)
1855 1870 os.environ['PATH'] = os.pathsep.join([exedir] + path)
1856 1871 if not self._findprogram(pyexename):
1857 1872 print "WARNING: Cannot find %s in search path" % pyexename
1858 1873
1859 1874 def _installhg(self):
1860 1875 """Install hg into the test environment.
1861 1876
1862 1877 This will also configure hg with the appropriate testing settings.
1863 1878 """
1864 1879 vlog("# Performing temporary installation of HG")
1865 1880 installerrs = os.path.join("tests", "install.err")
1866 1881 compiler = ''
1867 1882 if self.options.compiler:
1868 1883 compiler = '--compiler ' + self.options.compiler
1869 1884 if self.options.pure:
1870 1885 pure = "--pure"
1871 1886 else:
1872 1887 pure = ""
1873 1888 py3 = ''
1874 1889 if sys.version_info[0] == 3:
1875 1890 py3 = '--c2to3'
1876 1891
1877 1892 # Run installer in hg root
1878 1893 script = os.path.realpath(sys.argv[0])
1879 1894 hgroot = os.path.dirname(os.path.dirname(script))
1880 1895 self._hgroot = hgroot
1881 1896 os.chdir(hgroot)
1882 1897 nohome = '--home=""'
1883 1898 if os.name == 'nt':
1884 1899 # The --home="" trick works only on OS where os.sep == '/'
1885 1900 # because of a distutils convert_path() fast-path. Avoid it at
1886 1901 # least on Windows for now, deal with .pydistutils.cfg bugs
1887 1902 # when they happen.
1888 1903 nohome = ''
1889 1904 cmd = ('%(exe)s setup.py %(py3)s %(pure)s clean --all'
1890 1905 ' build %(compiler)s --build-base="%(base)s"'
1891 1906 ' install --force --prefix="%(prefix)s"'
1892 1907 ' --install-lib="%(libdir)s"'
1893 1908 ' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
1894 1909 % {'exe': sys.executable, 'py3': py3, 'pure': pure,
1895 1910 'compiler': compiler,
1896 1911 'base': os.path.join(self._hgtmp, "build"),
1897 1912 'prefix': self._installdir, 'libdir': self._pythondir,
1898 1913 'bindir': self._bindir,
1899 1914 'nohome': nohome, 'logfile': installerrs})
1900 1915
1901 1916 # setuptools requires install directories to exist.
1902 1917 def makedirs(p):
1903 1918 try:
1904 1919 os.makedirs(p)
1905 1920 except OSError, e:
1906 1921 if e.errno != errno.EEXIST:
1907 1922 raise
1908 1923 makedirs(self._pythondir)
1909 1924 makedirs(self._bindir)
1910 1925
1911 1926 vlog("# Running", cmd)
1912 1927 if os.system(cmd) == 0:
1913 1928 if not self.options.verbose:
1914 1929 os.remove(installerrs)
1915 1930 else:
1916 1931 f = open(installerrs, 'rb')
1917 1932 for line in f:
1918 1933 sys.stdout.write(line)
1919 1934 f.close()
1920 1935 sys.exit(1)
1921 1936 os.chdir(self._testdir)
1922 1937
1923 1938 self._usecorrectpython()
1924 1939
1925 1940 if self.options.py3k_warnings and not self.options.anycoverage:
1926 1941 vlog("# Updating hg command to enable Py3k Warnings switch")
1927 1942 f = open(os.path.join(self._bindir, 'hg'), 'rb')
1928 1943 lines = [line.rstrip() for line in f]
1929 1944 lines[0] += ' -3'
1930 1945 f.close()
1931 1946 f = open(os.path.join(self._bindir, 'hg'), 'wb')
1932 1947 for line in lines:
1933 1948 f.write(line + '\n')
1934 1949 f.close()
1935 1950
1936 1951 hgbat = os.path.join(self._bindir, 'hg.bat')
1937 1952 if os.path.isfile(hgbat):
1938 1953 # hg.bat expects to be put in bin/scripts while run-tests.py
1939 1954 # installation layout put it in bin/ directly. Fix it
1940 1955 f = open(hgbat, 'rb')
1941 1956 data = f.read()
1942 1957 f.close()
1943 1958 if '"%~dp0..\python" "%~dp0hg" %*' in data:
1944 1959 data = data.replace('"%~dp0..\python" "%~dp0hg" %*',
1945 1960 '"%~dp0python" "%~dp0hg" %*')
1946 1961 f = open(hgbat, 'wb')
1947 1962 f.write(data)
1948 1963 f.close()
1949 1964 else:
1950 1965 print 'WARNING: cannot fix hg.bat reference to python.exe'
1951 1966
1952 1967 if self.options.anycoverage:
1953 1968 custom = os.path.join(self._testdir, 'sitecustomize.py')
1954 1969 target = os.path.join(self._pythondir, 'sitecustomize.py')
1955 1970 vlog('# Installing coverage trigger to %s' % target)
1956 1971 shutil.copyfile(custom, target)
1957 1972 rc = os.path.join(self._testdir, '.coveragerc')
1958 1973 vlog('# Installing coverage rc to %s' % rc)
1959 1974 os.environ['COVERAGE_PROCESS_START'] = rc
1960 1975 covdir = os.path.join(self._installdir, '..', 'coverage')
1961 1976 try:
1962 1977 os.mkdir(covdir)
1963 1978 except OSError, e:
1964 1979 if e.errno != errno.EEXIST:
1965 1980 raise
1966 1981
1967 1982 os.environ['COVERAGE_DIR'] = covdir
1968 1983
1969 1984 def _checkhglib(self, verb):
1970 1985 """Ensure that the 'mercurial' package imported by python is
1971 1986 the one we expect it to be. If not, print a warning to stderr."""
1972 1987 if ((self._bindir == self._pythondir) and
1973 1988 (self._bindir != self._tmpbindir)):
1974 1989 # The pythondir has been inferred from --with-hg flag.
1975 1990 # We cannot expect anything sensible here.
1976 1991 return
1977 1992 expecthg = os.path.join(self._pythondir, 'mercurial')
1978 1993 actualhg = self._gethgpath()
1979 1994 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
1980 1995 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
1981 1996 ' (expected %s)\n'
1982 1997 % (verb, actualhg, expecthg))
1983 1998 def _gethgpath(self):
1984 1999 """Return the path to the mercurial package that is actually found by
1985 2000 the current Python interpreter."""
1986 2001 if self._hgpath is not None:
1987 2002 return self._hgpath
1988 2003
1989 2004 cmd = '%s -c "import mercurial; print (mercurial.__path__[0])"'
1990 2005 pipe = os.popen(cmd % PYTHON)
1991 2006 try:
1992 2007 self._hgpath = pipe.read().strip()
1993 2008 finally:
1994 2009 pipe.close()
1995 2010
1996 2011 return self._hgpath
1997 2012
1998 2013 def _outputcoverage(self):
1999 2014 """Produce code coverage output."""
2000 2015 from coverage import coverage
2001 2016
2002 2017 vlog('# Producing coverage report')
2003 2018 # chdir is the easiest way to get short, relative paths in the
2004 2019 # output.
2005 2020 os.chdir(self._hgroot)
2006 2021 covdir = os.path.join(self._installdir, '..', 'coverage')
2007 2022 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2008 2023
2009 2024 # Map install directory paths back to source directory.
2010 2025 cov.config.paths['srcdir'] = ['.', self._pythondir]
2011 2026
2012 2027 cov.combine()
2013 2028
2014 2029 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2015 2030 cov.report(ignore_errors=True, omit=omit)
2016 2031
2017 2032 if self.options.htmlcov:
2018 2033 htmldir = os.path.join(self._testdir, 'htmlcov')
2019 2034 cov.html_report(directory=htmldir, omit=omit)
2020 2035 if self.options.annotate:
2021 2036 adir = os.path.join(self._testdir, 'annotated')
2022 2037 if not os.path.isdir(adir):
2023 2038 os.mkdir(adir)
2024 2039 cov.annotate(directory=adir, omit=omit)
2025 2040
2026 2041 def _findprogram(self, program):
2027 2042 """Search PATH for a executable program"""
2028 2043 for p in os.environ.get('PATH', os.defpath).split(os.pathsep):
2029 2044 name = os.path.join(p, program)
2030 2045 if os.name == 'nt' or os.access(name, os.X_OK):
2031 2046 return name
2032 2047 return None
2033 2048
2034 2049 def _checktools(self):
2035 2050 """Ensure tools required to run tests are present."""
2036 2051 for p in self.REQUIREDTOOLS:
2037 2052 if os.name == 'nt' and not p.endswith('.exe'):
2038 2053 p += '.exe'
2039 2054 found = self._findprogram(p)
2040 2055 if found:
2041 2056 vlog("# Found prerequisite", p, "at", found)
2042 2057 else:
2043 2058 print "WARNING: Did not find prerequisite tool: %s " % p
2044 2059
2045 2060 if __name__ == '__main__':
2046 2061 runner = TestRunner()
2047 2062
2048 2063 try:
2049 2064 import msvcrt
2050 2065 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
2051 2066 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2052 2067 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
2053 2068 except ImportError:
2054 2069 pass
2055 2070
2056 2071 sys.exit(runner.run(sys.argv[1:]))
@@ -1,492 +1,493 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Smoke test
4 4 ============
5 5
6 6 $ $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE
7 7
8 8 # Ran 0 tests, 0 skipped, 0 warned, 0 failed.
9 9
10 10 a succesful test
11 11 =======================
12 12
13 13 $ cat > test-success.t << EOF
14 14 > $ echo babar
15 15 > babar
16 16 > $ echo xyzzy
17 17 > xyzzy
18 18 > EOF
19 19
20 20 $ $TESTDIR/run-tests.py --with-hg=`which hg`
21 21 .
22 22 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
23 23
24 24 failing test
25 25 ==================
26 26
27 27 $ cat > test-failure.t << EOF
28 28 > $ echo babar
29 29 > rataxes
30 30 > This is a noop statement so that
31 31 > this test is still more bytes than success.
32 32 > EOF
33 33
34 34 >>> fh = open('test-failure-unicode.t', 'wb')
35 35 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8'))
36 36 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8'))
37 37
38 38 $ $TESTDIR/run-tests.py --with-hg=`which hg`
39 39
40 40 --- $TESTTMP/test-failure.t
41 41 +++ $TESTTMP/test-failure.t.err
42 42 @@ -1,4 +1,4 @@
43 43 $ echo babar
44 44 - rataxes
45 45 + babar
46 46 This is a noop statement so that
47 47 this test is still more bytes than success.
48 48
49 49 ERROR: test-failure.t output changed
50 50 !.
51 51 --- $TESTTMP/test-failure-unicode.t
52 52 +++ $TESTTMP/test-failure-unicode.t.err
53 53 @@ -1,2 +1,2 @@
54 54 $ echo babar\xce\xb1 (esc)
55 55 - l\xce\xb5\xce\xb5t (esc)
56 56 + babar\xce\xb1 (esc)
57 57
58 58 ERROR: test-failure-unicode.t output changed
59 59 !
60 60 Failed test-failure.t: output changed
61 61 Failed test-failure-unicode.t: output changed
62 62 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
63 63 python hash seed: * (glob)
64 64 [1]
65 65
66 66 test --xunit support
67 67 $ $TESTDIR/run-tests.py --with-hg=`which hg` --xunit=xunit.xml
68 68
69 69 --- $TESTTMP/test-failure.t
70 70 +++ $TESTTMP/test-failure.t.err
71 71 @@ -1,4 +1,4 @@
72 72 $ echo babar
73 73 - rataxes
74 74 + babar
75 75 This is a noop statement so that
76 76 this test is still more bytes than success.
77 77
78 78 ERROR: test-failure.t output changed
79 79 !.
80 80 --- $TESTTMP/test-failure-unicode.t
81 81 +++ $TESTTMP/test-failure-unicode.t.err
82 82 @@ -1,2 +1,2 @@
83 83 $ echo babar\xce\xb1 (esc)
84 84 - l\xce\xb5\xce\xb5t (esc)
85 85 + babar\xce\xb1 (esc)
86 86
87 87 ERROR: test-failure-unicode.t output changed
88 88 !
89 89 Failed test-failure.t: output changed
90 90 Failed test-failure-unicode.t: output changed
91 91 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
92 92 python hash seed: * (glob)
93 93 [1]
94 94 $ cat xunit.xml
95 95 <?xml version="1.0" encoding="utf-8"?>
96 96 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
97 97 <testcase name="test-success.t" time="*"/> (glob)
98 98 <testcase name="test-failure-unicode.t" time="*"> (glob)
99 99 <![CDATA[--- $TESTTMP/test-failure-unicode.t
100 100 +++ $TESTTMP/test-failure-unicode.t.err
101 101 @@ -1,2 +1,2 @@
102 102 $ echo babar\xce\xb1 (esc)
103 103 - l\xce\xb5\xce\xb5t (esc)
104 104 + babar\xce\xb1 (esc)
105 105 ]]> </testcase>
106 106 <testcase name="test-failure.t" time="*"> (glob)
107 107 <![CDATA[--- $TESTTMP/test-failure.t
108 108 +++ $TESTTMP/test-failure.t.err
109 109 @@ -1,4 +1,4 @@
110 110 $ echo babar
111 111 - rataxes
112 112 + babar
113 113 This is a noop statement so that
114 114 this test is still more bytes than success.
115 115 ]]> </testcase>
116 116 </testsuite>
117 117
118 118 $ rm test-failure-unicode.t
119 119
120 120 test for --retest
121 121 ====================
122 122
123 123 $ $TESTDIR/run-tests.py --with-hg=`which hg` --retest
124 124
125 125 --- $TESTTMP/test-failure.t
126 126 +++ $TESTTMP/test-failure.t.err
127 127 @@ -1,4 +1,4 @@
128 128 $ echo babar
129 129 - rataxes
130 130 + babar
131 131 This is a noop statement so that
132 132 this test is still more bytes than success.
133 133
134 134 ERROR: test-failure.t output changed
135 135 !
136 136 Failed test-failure.t: output changed
137 137 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
138 138 python hash seed: * (glob)
139 139 [1]
140 140
141 141 Selecting Tests To Run
142 142 ======================
143 143
144 144 successful
145 145
146 146 $ $TESTDIR/run-tests.py --with-hg=`which hg` test-success.t
147 147 .
148 148 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
149 149
150 150 success w/ keyword
151 151 $ $TESTDIR/run-tests.py --with-hg=`which hg` -k xyzzy
152 152 .
153 153 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
154 154
155 155 failed
156 156
157 157 $ $TESTDIR/run-tests.py --with-hg=`which hg` test-failure.t
158 158
159 159 --- $TESTTMP/test-failure.t
160 160 +++ $TESTTMP/test-failure.t.err
161 161 @@ -1,4 +1,4 @@
162 162 $ echo babar
163 163 - rataxes
164 164 + babar
165 165 This is a noop statement so that
166 166 this test is still more bytes than success.
167 167
168 168 ERROR: test-failure.t output changed
169 169 !
170 170 Failed test-failure.t: output changed
171 171 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
172 172 python hash seed: * (glob)
173 173 [1]
174 174
175 175 failure w/ keyword
176 176 $ $TESTDIR/run-tests.py --with-hg=`which hg` -k rataxes
177 177
178 178 --- $TESTTMP/test-failure.t
179 179 +++ $TESTTMP/test-failure.t.err
180 180 @@ -1,4 +1,4 @@
181 181 $ echo babar
182 182 - rataxes
183 183 + babar
184 184 This is a noop statement so that
185 185 this test is still more bytes than success.
186 186
187 187 ERROR: test-failure.t output changed
188 188 !
189 189 Failed test-failure.t: output changed
190 190 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
191 191 python hash seed: * (glob)
192 192 [1]
193 193
194 194 Verify that when a process fails to start we show a useful message
195 195 ==================================================================
196 196 NOTE: there is currently a bug where this shows "2 failed" even though
197 197 it's actually the same test being reported for failure twice.
198 198
199 199 $ cat > test-serve-fail.t <<EOF
200 200 > $ echo 'abort: child process failed to start blah'
201 201 > EOF
202 202 $ $TESTDIR/run-tests.py --with-hg=`which hg` test-serve-fail.t
203 203
204 204 ERROR: test-serve-fail.t output changed
205 205 !
206 206 ERROR: test-serve-fail.t output changed
207 207 !
208 208 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
209 209 Failed test-serve-fail.t: output changed
210 210 # Ran 1 tests, 0 skipped, 0 warned, 2 failed.
211 211 python hash seed: * (glob)
212 212 [1]
213 213 $ rm test-serve-fail.t
214 214
215 215 Running In Debug Mode
216 216 ======================
217 217
218 218 $ $TESTDIR/run-tests.py --with-hg=`which hg` --debug 2>&1 | grep -v pwd
219 219 + echo *SALT* 0 0 (glob)
220 220 *SALT* 0 0 (glob)
221 221 + echo babar
222 222 babar
223 223 + echo *SALT* 4 0 (glob)
224 224 *SALT* 4 0 (glob)
225 225 .+ echo *SALT* 0 0 (glob)
226 226 *SALT* 0 0 (glob)
227 227 + echo babar
228 228 babar
229 229 + echo *SALT* 2 0 (glob)
230 230 *SALT* 2 0 (glob)
231 231 + echo xyzzy
232 232 xyzzy
233 233 + echo *SALT* 4 0 (glob)
234 234 *SALT* 4 0 (glob)
235 235 .
236 236 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
237 237
238 238 Parallel runs
239 239 ==============
240 240
241 241 (duplicate the failing test to get predictable output)
242 242 $ cp test-failure.t test-failure-copy.t
243 243
244 244 $ $TESTDIR/run-tests.py --with-hg=`which hg` --jobs 2 test-failure*.t -n
245 245 !!
246 246 Failed test-failure*.t: output changed (glob)
247 247 Failed test-failure*.t: output changed (glob)
248 248 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
249 249 python hash seed: * (glob)
250 250 [1]
251 251
252 252 failures in parallel with --first should only print one failure
253 253 >>> f = open('test-nothing.t', 'w')
254 254 >>> f.write('foo\n' * 1024)
255 255 >>> f.write(' $ sleep 1')
256 256 $ $TESTDIR/run-tests.py --with-hg=`which hg` --jobs 2 --first
257 257
258 258 --- $TESTTMP/test-failure*.t (glob)
259 259 +++ $TESTTMP/test-failure*.t.err (glob)
260 260 @@ -1,4 +1,4 @@
261 261 $ echo babar
262 262 - rataxes
263 263 + babar
264 264 This is a noop statement so that
265 265 this test is still more bytes than success.
266 266
267 267 Failed test-failure*.t: output changed (glob)
268 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
268 Failed test-nothing.t: output changed
269 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
269 270 python hash seed: * (glob)
270 271 [1]
271 272
272 273
273 274 (delete the duplicated test file)
274 275 $ rm test-failure-copy.t test-nothing.t
275 276
276 277
277 278 Interactive run
278 279 ===============
279 280
280 281 (backup the failing test)
281 282 $ cp test-failure.t backup
282 283
283 284 Refuse the fix
284 285
285 286 $ echo 'n' | $TESTDIR/run-tests.py --with-hg=`which hg` -i
286 287
287 288 --- $TESTTMP/test-failure.t
288 289 +++ $TESTTMP/test-failure.t.err
289 290 @@ -1,4 +1,4 @@
290 291 $ echo babar
291 292 - rataxes
292 293 + babar
293 294 This is a noop statement so that
294 295 this test is still more bytes than success.
295 296 Accept this change? [n]
296 297 ERROR: test-failure.t output changed
297 298 !.
298 299 Failed test-failure.t: output changed
299 300 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
300 301 python hash seed: * (glob)
301 302 [1]
302 303
303 304 $ cat test-failure.t
304 305 $ echo babar
305 306 rataxes
306 307 This is a noop statement so that
307 308 this test is still more bytes than success.
308 309
309 310 Interactive with custom view
310 311
311 312 $ echo 'n' | $TESTDIR/run-tests.py --with-hg=`which hg` -i --view echo
312 313 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
313 314 Accept this change? [n]* (glob)
314 315 ERROR: test-failure.t output changed
315 316 !.
316 317 Failed test-failure.t: output changed
317 318 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
318 319 python hash seed: * (glob)
319 320 [1]
320 321
321 322 View the fix
322 323
323 324 $ echo 'y' | $TESTDIR/run-tests.py --with-hg=`which hg` --view echo
324 325 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
325 326
326 327 ERROR: test-failure.t output changed
327 328 !.
328 329 Failed test-failure.t: output changed
329 330 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
330 331 python hash seed: * (glob)
331 332 [1]
332 333
333 334 Accept the fix
334 335
335 336 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
336 337 $ echo " saved backup bundle to \$TESTTMP/foo.hg" >> test-failure.t
337 338 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
338 339 $ echo " saved backup bundle to \$TESTTMP/foo.hg (glob)" >> test-failure.t
339 340 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
340 341 $ echo " saved backup bundle to \$TESTTMP/*.hg (glob)" >> test-failure.t
341 342 $ echo 'y' | $TESTDIR/run-tests.py --with-hg=`which hg` -i 2>&1 | \
342 343 > sed -e 's,(glob)$,&<,g'
343 344
344 345 --- $TESTTMP/test-failure.t
345 346 +++ $TESTTMP/test-failure.t.err
346 347 @@ -1,9 +1,9 @@
347 348 $ echo babar
348 349 - rataxes
349 350 + babar
350 351 This is a noop statement so that
351 352 this test is still more bytes than success.
352 353 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
353 354 - saved backup bundle to $TESTTMP/foo.hg
354 355 + saved backup bundle to $TESTTMP/foo.hg (glob)<
355 356 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
356 357 saved backup bundle to $TESTTMP/foo.hg (glob)<
357 358 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
358 359 Accept this change? [n] ..
359 360 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
360 361
361 362 $ sed -e 's,(glob)$,&<,g' test-failure.t
362 363 $ echo babar
363 364 babar
364 365 This is a noop statement so that
365 366 this test is still more bytes than success.
366 367 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
367 368 saved backup bundle to $TESTTMP/foo.hg (glob)<
368 369 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
369 370 saved backup bundle to $TESTTMP/foo.hg (glob)<
370 371 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
371 372 saved backup bundle to $TESTTMP/*.hg (glob)<
372 373
373 374 (reinstall)
374 375 $ mv backup test-failure.t
375 376
376 377 No Diff
377 378 ===============
378 379
379 380 $ $TESTDIR/run-tests.py --with-hg=`which hg` --nodiff
380 381 !.
381 382 Failed test-failure.t: output changed
382 383 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
383 384 python hash seed: * (glob)
384 385 [1]
385 386
386 387 test for --time
387 388 ==================
388 389
389 390 $ $TESTDIR/run-tests.py --with-hg=`which hg` test-success.t --time
390 391 .
391 392 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
392 393 # Producing time report
393 394 cuser csys real Test
394 395 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
395 396
396 397 test for --time with --job enabled
397 398 ====================================
398 399
399 400 $ $TESTDIR/run-tests.py --with-hg=`which hg` test-success.t --time --jobs 2
400 401 .
401 402 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
402 403 # Producing time report
403 404 cuser csys real Test
404 405 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
405 406
406 407 Skips
407 408 ================
408 409 $ cat > test-skip.t <<EOF
409 410 > $ echo xyzzy
410 411 > #require false
411 412 > EOF
412 413 $ $TESTDIR/run-tests.py --with-hg=`which hg` --nodiff
413 414 !.s
414 415 Skipped test-skip.t: skipped
415 416 Failed test-failure.t: output changed
416 417 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
417 418 python hash seed: * (glob)
418 419 [1]
419 420
420 421 $ $TESTDIR/run-tests.py --with-hg=`which hg` --keyword xyzzy
421 422 .s
422 423 Skipped test-skip.t: skipped
423 424 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
424 425
425 426 Skips with xml
426 427 $ $TESTDIR/run-tests.py --with-hg=`which hg` --keyword xyzzy \
427 428 > --xunit=xunit.xml
428 429 .s
429 430 Skipped test-skip.t: skipped
430 431 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
431 432 $ cat xunit.xml
432 433 <?xml version="1.0" encoding="utf-8"?>
433 434 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
434 435 <testcase name="test-success.t" time="*"/> (glob)
435 436 </testsuite>
436 437
437 438 Missing skips or blacklisted skips don't count as executed:
438 439 $ echo test-failure.t > blacklist
439 440 $ $TESTDIR/run-tests.py --with-hg=`which hg` --blacklist=blacklist \
440 441 > test-failure.t test-bogus.t
441 442 ss
442 443 Skipped test-bogus.t: Doesn't exist
443 444 Skipped test-failure.t: blacklisted
444 445 # Ran 0 tests, 2 skipped, 0 warned, 0 failed.
445 446
446 447 #if json
447 448
448 449 test for --json
449 450 ==================
450 451
451 452 $ $TESTDIR/run-tests.py --with-hg=`which hg` --json
452 453
453 454 --- $TESTTMP/test-failure.t
454 455 +++ $TESTTMP/test-failure.t.err
455 456 @@ -1,4 +1,4 @@
456 457 $ echo babar
457 458 - rataxes
458 459 + babar
459 460 This is a noop statement so that
460 461 this test is still more bytes than success.
461 462
462 463 ERROR: test-failure.t output changed
463 464 !.s
464 465 Skipped test-skip.t: skipped
465 466 Failed test-failure.t: output changed
466 467 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
467 468 python hash seed: * (glob)
468 469 [1]
469 470
470 471 $ cat report.json
471 472 testreport ={
472 473 "test-failure.t": [\{] (re)
473 474 "csys": "\s*[\d\.]{4,5}", ? (re)
474 475 "cuser": "\s*[\d\.]{4,5}", ? (re)
475 476 "result": "failure", ? (re)
476 477 "time": "\s*[\d\.]{4,5}" (re)
477 478 }, ? (re)
478 479 "test-skip.t": {
479 480 "csys": "\s*[\d\.]{4,5}", ? (re)
480 481 "cuser": "\s*[\d\.]{4,5}", ? (re)
481 482 "result": "skip", ? (re)
482 483 "time": "\s*[\d\.]{4,5}" (re)
483 484 }, ? (re)
484 485 "test-success.t": [\{] (re)
485 486 "csys": "\s*[\d\.]{4,5}", ? (re)
486 487 "cuser": "\s*[\d\.]{4,5}", ? (re)
487 488 "result": "success", ? (re)
488 489 "time": "\s*[\d\.]{4,5}" (re)
489 490 }
490 491 } (no-eol)
491 492
492 493 #endif
General Comments 0
You need to be logged in to leave comments. Login now