##// END OF EJS Templates
run-tests: don't error when glob matched line ends with backslash...
Gregory Szorc -
r24811:a2dcf460 stable
parent child Browse files
Show More
@@ -1,2083 +1,2083 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 #
39 39 # (You could use any subset of the tests: test-s* happens to match
40 40 # enough that it's worth doing parallel runs, few enough that it
41 41 # completes fairly quickly, includes both shell and Python scripts, and
42 42 # includes some scripts that run daemon processes.)
43 43
44 44 from distutils import version
45 45 import difflib
46 46 import errno
47 47 import optparse
48 48 import os
49 49 import shutil
50 50 import subprocess
51 51 import signal
52 52 import sys
53 53 import tempfile
54 54 import time
55 55 import random
56 56 import re
57 57 import threading
58 58 import killdaemons as killmod
59 59 import Queue as queue
60 60 from xml.dom import minidom
61 61 import unittest
62 62
63 63 try:
64 64 import json
65 65 except ImportError:
66 66 try:
67 67 import simplejson as json
68 68 except ImportError:
69 69 json = None
70 70
71 71 processlock = threading.Lock()
72 72
73 73 # subprocess._cleanup can race with any Popen.wait or Popen.poll on py24
74 74 # http://bugs.python.org/issue1731717 for details. We shouldn't be producing
75 75 # zombies but it's pretty harmless even if we do.
76 76 if sys.version_info < (2, 5):
77 77 subprocess._cleanup = lambda: None
78 78
79 79 wifexited = getattr(os, "WIFEXITED", lambda x: False)
80 80
81 81 closefds = os.name == 'posix'
82 82 def Popen4(cmd, wd, timeout, env=None):
83 83 processlock.acquire()
84 84 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
85 85 close_fds=closefds,
86 86 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
87 87 stderr=subprocess.STDOUT)
88 88 processlock.release()
89 89
90 90 p.fromchild = p.stdout
91 91 p.tochild = p.stdin
92 92 p.childerr = p.stderr
93 93
94 94 p.timeout = False
95 95 if timeout:
96 96 def t():
97 97 start = time.time()
98 98 while time.time() - start < timeout and p.returncode is None:
99 99 time.sleep(.1)
100 100 p.timeout = True
101 101 if p.returncode is None:
102 102 terminate(p)
103 103 threading.Thread(target=t).start()
104 104
105 105 return p
106 106
107 107 PYTHON = sys.executable.replace('\\', '/')
108 108 IMPL_PATH = 'PYTHONPATH'
109 109 if 'java' in sys.platform:
110 110 IMPL_PATH = 'JYTHONPATH'
111 111
112 112 defaults = {
113 113 'jobs': ('HGTEST_JOBS', 1),
114 114 'timeout': ('HGTEST_TIMEOUT', 180),
115 115 'port': ('HGTEST_PORT', 20059),
116 116 'shell': ('HGTEST_SHELL', 'sh'),
117 117 }
118 118
119 119 def parselistfiles(files, listtype, warn=True):
120 120 entries = dict()
121 121 for filename in files:
122 122 try:
123 123 path = os.path.expanduser(os.path.expandvars(filename))
124 124 f = open(path, "rb")
125 125 except IOError, err:
126 126 if err.errno != errno.ENOENT:
127 127 raise
128 128 if warn:
129 129 print "warning: no such %s file: %s" % (listtype, filename)
130 130 continue
131 131
132 132 for line in f.readlines():
133 133 line = line.split('#', 1)[0].strip()
134 134 if line:
135 135 entries[line] = filename
136 136
137 137 f.close()
138 138 return entries
139 139
140 140 def getparser():
141 141 """Obtain the OptionParser used by the CLI."""
142 142 parser = optparse.OptionParser("%prog [options] [tests]")
143 143
144 144 # keep these sorted
145 145 parser.add_option("--blacklist", action="append",
146 146 help="skip tests listed in the specified blacklist file")
147 147 parser.add_option("--whitelist", action="append",
148 148 help="always run tests listed in the specified whitelist file")
149 149 parser.add_option("--changed", type="string",
150 150 help="run tests that are changed in parent rev or working directory")
151 151 parser.add_option("-C", "--annotate", action="store_true",
152 152 help="output files annotated with coverage")
153 153 parser.add_option("-c", "--cover", action="store_true",
154 154 help="print a test coverage report")
155 155 parser.add_option("-d", "--debug", action="store_true",
156 156 help="debug mode: write output of test scripts to console"
157 157 " rather than capturing and diffing it (disables timeout)")
158 158 parser.add_option("-f", "--first", action="store_true",
159 159 help="exit on the first test failure")
160 160 parser.add_option("-H", "--htmlcov", action="store_true",
161 161 help="create an HTML report of the coverage of the files")
162 162 parser.add_option("-i", "--interactive", action="store_true",
163 163 help="prompt to accept changed output")
164 164 parser.add_option("-j", "--jobs", type="int",
165 165 help="number of jobs to run in parallel"
166 166 " (default: $%s or %d)" % defaults['jobs'])
167 167 parser.add_option("--keep-tmpdir", action="store_true",
168 168 help="keep temporary directory after running tests")
169 169 parser.add_option("-k", "--keywords",
170 170 help="run tests matching keywords")
171 171 parser.add_option("-l", "--local", action="store_true",
172 172 help="shortcut for --with-hg=<testdir>/../hg")
173 173 parser.add_option("--loop", action="store_true",
174 174 help="loop tests repeatedly")
175 175 parser.add_option("--runs-per-test", type="int", dest="runs_per_test",
176 176 help="run each test N times (default=1)", default=1)
177 177 parser.add_option("-n", "--nodiff", action="store_true",
178 178 help="skip showing test changes")
179 179 parser.add_option("-p", "--port", type="int",
180 180 help="port on which servers should listen"
181 181 " (default: $%s or %d)" % defaults['port'])
182 182 parser.add_option("--compiler", type="string",
183 183 help="compiler to build with")
184 184 parser.add_option("--pure", action="store_true",
185 185 help="use pure Python code instead of C extensions")
186 186 parser.add_option("-R", "--restart", action="store_true",
187 187 help="restart at last error")
188 188 parser.add_option("-r", "--retest", action="store_true",
189 189 help="retest failed tests")
190 190 parser.add_option("-S", "--noskips", action="store_true",
191 191 help="don't report skip tests verbosely")
192 192 parser.add_option("--shell", type="string",
193 193 help="shell to use (default: $%s or %s)" % defaults['shell'])
194 194 parser.add_option("-t", "--timeout", type="int",
195 195 help="kill errant tests after TIMEOUT seconds"
196 196 " (default: $%s or %d)" % defaults['timeout'])
197 197 parser.add_option("--time", action="store_true",
198 198 help="time how long each test takes")
199 199 parser.add_option("--json", action="store_true",
200 200 help="store test result data in 'report.json' file")
201 201 parser.add_option("--tmpdir", type="string",
202 202 help="run tests in the given temporary directory"
203 203 " (implies --keep-tmpdir)")
204 204 parser.add_option("-v", "--verbose", action="store_true",
205 205 help="output verbose messages")
206 206 parser.add_option("--xunit", type="string",
207 207 help="record xunit results at specified path")
208 208 parser.add_option("--view", type="string",
209 209 help="external diff viewer")
210 210 parser.add_option("--with-hg", type="string",
211 211 metavar="HG",
212 212 help="test using specified hg script rather than a "
213 213 "temporary installation")
214 214 parser.add_option("-3", "--py3k-warnings", action="store_true",
215 215 help="enable Py3k warnings on Python 2.6+")
216 216 parser.add_option('--extra-config-opt', action="append",
217 217 help='set the given config opt in the test hgrc')
218 218 parser.add_option('--random', action="store_true",
219 219 help='run tests in random order')
220 220
221 221 for option, (envvar, default) in defaults.items():
222 222 defaults[option] = type(default)(os.environ.get(envvar, default))
223 223 parser.set_defaults(**defaults)
224 224
225 225 return parser
226 226
227 227 def parseargs(args, parser):
228 228 """Parse arguments with our OptionParser and validate results."""
229 229 (options, args) = parser.parse_args(args)
230 230
231 231 # jython is always pure
232 232 if 'java' in sys.platform or '__pypy__' in sys.modules:
233 233 options.pure = True
234 234
235 235 if options.with_hg:
236 236 options.with_hg = os.path.expanduser(options.with_hg)
237 237 if not (os.path.isfile(options.with_hg) and
238 238 os.access(options.with_hg, os.X_OK)):
239 239 parser.error('--with-hg must specify an executable hg script')
240 240 if not os.path.basename(options.with_hg) == 'hg':
241 241 sys.stderr.write('warning: --with-hg should specify an hg script\n')
242 242 if options.local:
243 243 testdir = os.path.dirname(os.path.realpath(sys.argv[0]))
244 244 hgbin = os.path.join(os.path.dirname(testdir), 'hg')
245 245 if os.name != 'nt' and not os.access(hgbin, os.X_OK):
246 246 parser.error('--local specified, but %r not found or not executable'
247 247 % hgbin)
248 248 options.with_hg = hgbin
249 249
250 250 options.anycoverage = options.cover or options.annotate or options.htmlcov
251 251 if options.anycoverage:
252 252 try:
253 253 import coverage
254 254 covver = version.StrictVersion(coverage.__version__).version
255 255 if covver < (3, 3):
256 256 parser.error('coverage options require coverage 3.3 or later')
257 257 except ImportError:
258 258 parser.error('coverage options now require the coverage package')
259 259
260 260 if options.anycoverage and options.local:
261 261 # this needs some path mangling somewhere, I guess
262 262 parser.error("sorry, coverage options do not work when --local "
263 263 "is specified")
264 264
265 265 if options.anycoverage and options.with_hg:
266 266 parser.error("sorry, coverage options do not work when --with-hg "
267 267 "is specified")
268 268
269 269 global verbose
270 270 if options.verbose:
271 271 verbose = ''
272 272
273 273 if options.tmpdir:
274 274 options.tmpdir = os.path.expanduser(options.tmpdir)
275 275
276 276 if options.jobs < 1:
277 277 parser.error('--jobs must be positive')
278 278 if options.interactive and options.debug:
279 279 parser.error("-i/--interactive and -d/--debug are incompatible")
280 280 if options.debug:
281 281 if options.timeout != defaults['timeout']:
282 282 sys.stderr.write(
283 283 'warning: --timeout option ignored with --debug\n')
284 284 options.timeout = 0
285 285 if options.py3k_warnings:
286 286 if sys.version_info[:2] < (2, 6) or sys.version_info[:2] >= (3, 0):
287 287 parser.error('--py3k-warnings can only be used on Python 2.6+')
288 288 if options.blacklist:
289 289 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
290 290 if options.whitelist:
291 291 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
292 292 else:
293 293 options.whitelisted = {}
294 294
295 295 return (options, args)
296 296
297 297 def rename(src, dst):
298 298 """Like os.rename(), trade atomicity and opened files friendliness
299 299 for existing destination support.
300 300 """
301 301 shutil.copy(src, dst)
302 302 os.remove(src)
303 303
304 304 def getdiff(expected, output, ref, err):
305 305 servefail = False
306 306 lines = []
307 307 for line in difflib.unified_diff(expected, output, ref, err):
308 308 if line.startswith('+++') or line.startswith('---'):
309 309 line = line.replace('\\', '/')
310 310 if line.endswith(' \n'):
311 311 line = line[:-2] + '\n'
312 312 lines.append(line)
313 313 if not servefail and line.startswith(
314 314 '+ abort: child process failed to start'):
315 315 servefail = True
316 316
317 317 return servefail, lines
318 318
319 319 verbose = False
320 320 def vlog(*msg):
321 321 """Log only when in verbose mode."""
322 322 if verbose is False:
323 323 return
324 324
325 325 return log(*msg)
326 326
327 327 # Bytes that break XML even in a CDATA block: control characters 0-31
328 328 # sans \t, \n and \r
329 329 CDATA_EVIL = re.compile(r"[\000-\010\013\014\016-\037]")
330 330
331 331 def cdatasafe(data):
332 332 """Make a string safe to include in a CDATA block.
333 333
334 334 Certain control characters are illegal in a CDATA block, and
335 335 there's no way to include a ]]> in a CDATA either. This function
336 336 replaces illegal bytes with ? and adds a space between the ]] so
337 337 that it won't break the CDATA block.
338 338 """
339 339 return CDATA_EVIL.sub('?', data).replace(']]>', '] ]>')
340 340
341 341 def log(*msg):
342 342 """Log something to stdout.
343 343
344 344 Arguments are strings to print.
345 345 """
346 346 iolock.acquire()
347 347 if verbose:
348 348 print verbose,
349 349 for m in msg:
350 350 print m,
351 351 print
352 352 sys.stdout.flush()
353 353 iolock.release()
354 354
355 355 def terminate(proc):
356 356 """Terminate subprocess (with fallback for Python versions < 2.6)"""
357 357 vlog('# Terminating process %d' % proc.pid)
358 358 try:
359 359 getattr(proc, 'terminate', lambda : os.kill(proc.pid, signal.SIGTERM))()
360 360 except OSError:
361 361 pass
362 362
363 363 def killdaemons(pidfile):
364 364 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
365 365 logfn=vlog)
366 366
367 367 class Test(unittest.TestCase):
368 368 """Encapsulates a single, runnable test.
369 369
370 370 While this class conforms to the unittest.TestCase API, it differs in that
371 371 instances need to be instantiated manually. (Typically, unittest.TestCase
372 372 classes are instantiated automatically by scanning modules.)
373 373 """
374 374
375 375 # Status code reserved for skipped tests (used by hghave).
376 376 SKIPPED_STATUS = 80
377 377
378 378 def __init__(self, path, tmpdir, keeptmpdir=False,
379 379 debug=False,
380 380 timeout=defaults['timeout'],
381 381 startport=defaults['port'], extraconfigopts=None,
382 382 py3kwarnings=False, shell=None):
383 383 """Create a test from parameters.
384 384
385 385 path is the full path to the file defining the test.
386 386
387 387 tmpdir is the main temporary directory to use for this test.
388 388
389 389 keeptmpdir determines whether to keep the test's temporary directory
390 390 after execution. It defaults to removal (False).
391 391
392 392 debug mode will make the test execute verbosely, with unfiltered
393 393 output.
394 394
395 395 timeout controls the maximum run time of the test. It is ignored when
396 396 debug is True.
397 397
398 398 startport controls the starting port number to use for this test. Each
399 399 test will reserve 3 port numbers for execution. It is the caller's
400 400 responsibility to allocate a non-overlapping port range to Test
401 401 instances.
402 402
403 403 extraconfigopts is an iterable of extra hgrc config options. Values
404 404 must have the form "key=value" (something understood by hgrc). Values
405 405 of the form "foo.key=value" will result in "[foo] key=value".
406 406
407 407 py3kwarnings enables Py3k warnings.
408 408
409 409 shell is the shell to execute tests in.
410 410 """
411 411
412 412 self.path = path
413 413 self.name = os.path.basename(path)
414 414 self._testdir = os.path.dirname(path)
415 415 self.errpath = os.path.join(self._testdir, '%s.err' % self.name)
416 416
417 417 self._threadtmp = tmpdir
418 418 self._keeptmpdir = keeptmpdir
419 419 self._debug = debug
420 420 self._timeout = timeout
421 421 self._startport = startport
422 422 self._extraconfigopts = extraconfigopts or []
423 423 self._py3kwarnings = py3kwarnings
424 424 self._shell = shell
425 425
426 426 self._aborted = False
427 427 self._daemonpids = []
428 428 self._finished = None
429 429 self._ret = None
430 430 self._out = None
431 431 self._skipped = None
432 432 self._testtmp = None
433 433
434 434 # If we're not in --debug mode and reference output file exists,
435 435 # check test output against it.
436 436 if debug:
437 437 self._refout = None # to match "out is None"
438 438 elif os.path.exists(self.refpath):
439 439 f = open(self.refpath, 'rb')
440 440 self._refout = f.read().splitlines(True)
441 441 f.close()
442 442 else:
443 443 self._refout = []
444 444
445 445 def __str__(self):
446 446 return self.name
447 447
448 448 def shortDescription(self):
449 449 return self.name
450 450
451 451 def setUp(self):
452 452 """Tasks to perform before run()."""
453 453 self._finished = False
454 454 self._ret = None
455 455 self._out = None
456 456 self._skipped = None
457 457
458 458 try:
459 459 os.mkdir(self._threadtmp)
460 460 except OSError, e:
461 461 if e.errno != errno.EEXIST:
462 462 raise
463 463
464 464 self._testtmp = os.path.join(self._threadtmp,
465 465 os.path.basename(self.path))
466 466 os.mkdir(self._testtmp)
467 467
468 468 # Remove any previous output files.
469 469 if os.path.exists(self.errpath):
470 470 try:
471 471 os.remove(self.errpath)
472 472 except OSError, e:
473 473 # We might have raced another test to clean up a .err
474 474 # file, so ignore ENOENT when removing a previous .err
475 475 # file.
476 476 if e.errno != errno.ENOENT:
477 477 raise
478 478
479 479 def run(self, result):
480 480 """Run this test and report results against a TestResult instance."""
481 481 # This function is extremely similar to unittest.TestCase.run(). Once
482 482 # we require Python 2.7 (or at least its version of unittest), this
483 483 # function can largely go away.
484 484 self._result = result
485 485 result.startTest(self)
486 486 try:
487 487 try:
488 488 self.setUp()
489 489 except (KeyboardInterrupt, SystemExit):
490 490 self._aborted = True
491 491 raise
492 492 except Exception:
493 493 result.addError(self, sys.exc_info())
494 494 return
495 495
496 496 success = False
497 497 try:
498 498 self.runTest()
499 499 except KeyboardInterrupt:
500 500 self._aborted = True
501 501 raise
502 502 except SkipTest, e:
503 503 result.addSkip(self, str(e))
504 504 # The base class will have already counted this as a
505 505 # test we "ran", but we want to exclude skipped tests
506 506 # from those we count towards those run.
507 507 result.testsRun -= 1
508 508 except IgnoreTest, e:
509 509 result.addIgnore(self, str(e))
510 510 # As with skips, ignores also should be excluded from
511 511 # the number of tests executed.
512 512 result.testsRun -= 1
513 513 except WarnTest, e:
514 514 result.addWarn(self, str(e))
515 515 except self.failureException, e:
516 516 # This differs from unittest in that we don't capture
517 517 # the stack trace. This is for historical reasons and
518 518 # this decision could be revisited in the future,
519 519 # especially for PythonTest instances.
520 520 if result.addFailure(self, str(e)):
521 521 success = True
522 522 except Exception:
523 523 result.addError(self, sys.exc_info())
524 524 else:
525 525 success = True
526 526
527 527 try:
528 528 self.tearDown()
529 529 except (KeyboardInterrupt, SystemExit):
530 530 self._aborted = True
531 531 raise
532 532 except Exception:
533 533 result.addError(self, sys.exc_info())
534 534 success = False
535 535
536 536 if success:
537 537 result.addSuccess(self)
538 538 finally:
539 539 result.stopTest(self, interrupted=self._aborted)
540 540
541 541 def runTest(self):
542 542 """Run this test instance.
543 543
544 544 This will return a tuple describing the result of the test.
545 545 """
546 546 env = self._getenv()
547 547 self._daemonpids.append(env['DAEMON_PIDS'])
548 548 self._createhgrc(env['HGRCPATH'])
549 549
550 550 vlog('# Test', self.name)
551 551
552 552 ret, out = self._run(env)
553 553 self._finished = True
554 554 self._ret = ret
555 555 self._out = out
556 556
557 557 def describe(ret):
558 558 if ret < 0:
559 559 return 'killed by signal: %d' % -ret
560 560 return 'returned error code %d' % ret
561 561
562 562 self._skipped = False
563 563
564 564 if ret == self.SKIPPED_STATUS:
565 565 if out is None: # Debug mode, nothing to parse.
566 566 missing = ['unknown']
567 567 failed = None
568 568 else:
569 569 missing, failed = TTest.parsehghaveoutput(out)
570 570
571 571 if not missing:
572 572 missing = ['skipped']
573 573
574 574 if failed:
575 575 self.fail('hg have failed checking for %s' % failed[-1])
576 576 else:
577 577 self._skipped = True
578 578 raise SkipTest(missing[-1])
579 579 elif ret == 'timeout':
580 580 self.fail('timed out')
581 581 elif ret is False:
582 582 raise WarnTest('no result code from test')
583 583 elif out != self._refout:
584 584 # Diff generation may rely on written .err file.
585 585 if (ret != 0 or out != self._refout) and not self._skipped \
586 586 and not self._debug:
587 587 f = open(self.errpath, 'wb')
588 588 for line in out:
589 589 f.write(line)
590 590 f.close()
591 591
592 592 # The result object handles diff calculation for us.
593 593 if self._result.addOutputMismatch(self, ret, out, self._refout):
594 594 # change was accepted, skip failing
595 595 return
596 596
597 597 if ret:
598 598 msg = 'output changed and ' + describe(ret)
599 599 else:
600 600 msg = 'output changed'
601 601
602 602 self.fail(msg)
603 603 elif ret:
604 604 self.fail(describe(ret))
605 605
606 606 def tearDown(self):
607 607 """Tasks to perform after run()."""
608 608 for entry in self._daemonpids:
609 609 killdaemons(entry)
610 610 self._daemonpids = []
611 611
612 612 if not self._keeptmpdir:
613 613 shutil.rmtree(self._testtmp, True)
614 614 shutil.rmtree(self._threadtmp, True)
615 615
616 616 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
617 617 and not self._debug and self._out:
618 618 f = open(self.errpath, 'wb')
619 619 for line in self._out:
620 620 f.write(line)
621 621 f.close()
622 622
623 623 vlog("# Ret was:", self._ret)
624 624
625 625 def _run(self, env):
626 626 # This should be implemented in child classes to run tests.
627 627 raise SkipTest('unknown test type')
628 628
629 629 def abort(self):
630 630 """Terminate execution of this test."""
631 631 self._aborted = True
632 632
633 633 def _getreplacements(self):
634 634 """Obtain a mapping of text replacements to apply to test output.
635 635
636 636 Test output needs to be normalized so it can be compared to expected
637 637 output. This function defines how some of that normalization will
638 638 occur.
639 639 """
640 640 r = [
641 641 (r':%s\b' % self._startport, ':$HGPORT'),
642 642 (r':%s\b' % (self._startport + 1), ':$HGPORT1'),
643 643 (r':%s\b' % (self._startport + 2), ':$HGPORT2'),
644 644 (r'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
645 645 r'\1 (glob)'),
646 646 ]
647 647
648 648 if os.name == 'nt':
649 649 r.append(
650 650 (''.join(c.isalpha() and '[%s%s]' % (c.lower(), c.upper()) or
651 651 c in '/\\' and r'[/\\]' or c.isdigit() and c or '\\' + c
652 652 for c in self._testtmp), '$TESTTMP'))
653 653 else:
654 654 r.append((re.escape(self._testtmp), '$TESTTMP'))
655 655
656 656 return r
657 657
658 658 def _getenv(self):
659 659 """Obtain environment variables to use during test execution."""
660 660 env = os.environ.copy()
661 661 env['TESTTMP'] = self._testtmp
662 662 env['HOME'] = self._testtmp
663 663 env["HGPORT"] = str(self._startport)
664 664 env["HGPORT1"] = str(self._startport + 1)
665 665 env["HGPORT2"] = str(self._startport + 2)
666 666 env["HGRCPATH"] = os.path.join(self._threadtmp, '.hgrc')
667 667 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, 'daemon.pids')
668 668 env["HGEDITOR"] = ('"' + sys.executable + '"'
669 669 + ' -c "import sys; sys.exit(0)"')
670 670 env["HGMERGE"] = "internal:merge"
671 671 env["HGUSER"] = "test"
672 672 env["HGENCODING"] = "ascii"
673 673 env["HGENCODINGMODE"] = "strict"
674 674
675 675 # Reset some environment variables to well-known values so that
676 676 # the tests produce repeatable output.
677 677 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
678 678 env['TZ'] = 'GMT'
679 679 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
680 680 env['COLUMNS'] = '80'
681 681 env['TERM'] = 'xterm'
682 682
683 683 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
684 684 'NO_PROXY').split():
685 685 if k in env:
686 686 del env[k]
687 687
688 688 # unset env related to hooks
689 689 for k in env.keys():
690 690 if k.startswith('HG_'):
691 691 del env[k]
692 692
693 693 return env
694 694
695 695 def _createhgrc(self, path):
696 696 """Create an hgrc file for this test."""
697 697 hgrc = open(path, 'wb')
698 698 hgrc.write('[ui]\n')
699 699 hgrc.write('slash = True\n')
700 700 hgrc.write('interactive = False\n')
701 701 hgrc.write('mergemarkers = detailed\n')
702 702 hgrc.write('promptecho = True\n')
703 703 hgrc.write('[defaults]\n')
704 704 hgrc.write('backout = -d "0 0"\n')
705 705 hgrc.write('commit = -d "0 0"\n')
706 706 hgrc.write('shelve = --date "0 0"\n')
707 707 hgrc.write('tag = -d "0 0"\n')
708 708 hgrc.write('[devel]\n')
709 709 hgrc.write('all = true\n')
710 710 hgrc.write('[largefiles]\n')
711 711 hgrc.write('usercache = %s\n' %
712 712 (os.path.join(self._testtmp, '.cache/largefiles')))
713 713
714 714 for opt in self._extraconfigopts:
715 715 section, key = opt.split('.', 1)
716 716 assert '=' in key, ('extra config opt %s must '
717 717 'have an = for assignment' % opt)
718 718 hgrc.write('[%s]\n%s\n' % (section, key))
719 719 hgrc.close()
720 720
721 721 def fail(self, msg):
722 722 # unittest differentiates between errored and failed.
723 723 # Failed is denoted by AssertionError (by default at least).
724 724 raise AssertionError(msg)
725 725
726 726 def _runcommand(self, cmd, env, normalizenewlines=False):
727 727 """Run command in a sub-process, capturing the output (stdout and
728 728 stderr).
729 729
730 730 Return a tuple (exitcode, output). output is None in debug mode.
731 731 """
732 732 if self._debug:
733 733 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
734 734 env=env)
735 735 ret = proc.wait()
736 736 return (ret, None)
737 737
738 738 proc = Popen4(cmd, self._testtmp, self._timeout, env)
739 739 def cleanup():
740 740 terminate(proc)
741 741 ret = proc.wait()
742 742 if ret == 0:
743 743 ret = signal.SIGTERM << 8
744 744 killdaemons(env['DAEMON_PIDS'])
745 745 return ret
746 746
747 747 output = ''
748 748 proc.tochild.close()
749 749
750 750 try:
751 751 output = proc.fromchild.read()
752 752 except KeyboardInterrupt:
753 753 vlog('# Handling keyboard interrupt')
754 754 cleanup()
755 755 raise
756 756
757 757 ret = proc.wait()
758 758 if wifexited(ret):
759 759 ret = os.WEXITSTATUS(ret)
760 760
761 761 if proc.timeout:
762 762 ret = 'timeout'
763 763
764 764 if ret:
765 765 killdaemons(env['DAEMON_PIDS'])
766 766
767 767 for s, r in self._getreplacements():
768 768 output = re.sub(s, r, output)
769 769
770 770 if normalizenewlines:
771 771 output = output.replace('\r\n', '\n')
772 772
773 773 return ret, output.splitlines(True)
774 774
775 775 class PythonTest(Test):
776 776 """A Python-based test."""
777 777
778 778 @property
779 779 def refpath(self):
780 780 return os.path.join(self._testdir, '%s.out' % self.name)
781 781
782 782 def _run(self, env):
783 783 py3kswitch = self._py3kwarnings and ' -3' or ''
784 784 cmd = '%s%s "%s"' % (PYTHON, py3kswitch, self.path)
785 785 vlog("# Running", cmd)
786 786 normalizenewlines = os.name == 'nt'
787 787 result = self._runcommand(cmd, env,
788 788 normalizenewlines=normalizenewlines)
789 789 if self._aborted:
790 790 raise KeyboardInterrupt()
791 791
792 792 return result
793 793
794 794 # This script may want to drop globs from lines matching these patterns on
795 795 # Windows, but check-code.py wants a glob on these lines unconditionally. Don't
796 796 # warn if that is the case for anything matching these lines.
797 797 checkcodeglobpats = [
798 798 re.compile(r'^pushing to \$TESTTMP/.*[^)]$'),
799 799 re.compile(r'^moving \S+/.*[^)]$'),
800 800 re.compile(r'^pulling from \$TESTTMP/.*[^)]$')
801 801 ]
802 802
803 803 class TTest(Test):
804 804 """A "t test" is a test backed by a .t file."""
805 805
806 806 SKIPPED_PREFIX = 'skipped: '
807 807 FAILED_PREFIX = 'hghave check failed: '
808 808 NEEDESCAPE = re.compile(r'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
809 809
810 810 ESCAPESUB = re.compile(r'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
811 811 ESCAPEMAP = dict((chr(i), r'\x%02x' % i) for i in range(256))
812 812 ESCAPEMAP.update({'\\': '\\\\', '\r': r'\r'})
813 813
814 814 @property
815 815 def refpath(self):
816 816 return os.path.join(self._testdir, self.name)
817 817
818 818 def _run(self, env):
819 819 f = open(self.path, 'rb')
820 820 lines = f.readlines()
821 821 f.close()
822 822
823 823 salt, script, after, expected = self._parsetest(lines)
824 824
825 825 # Write out the generated script.
826 826 fname = '%s.sh' % self._testtmp
827 827 f = open(fname, 'wb')
828 828 for l in script:
829 829 f.write(l)
830 830 f.close()
831 831
832 832 cmd = '%s "%s"' % (self._shell, fname)
833 833 vlog("# Running", cmd)
834 834
835 835 exitcode, output = self._runcommand(cmd, env)
836 836
837 837 if self._aborted:
838 838 raise KeyboardInterrupt()
839 839
840 840 # Do not merge output if skipped. Return hghave message instead.
841 841 # Similarly, with --debug, output is None.
842 842 if exitcode == self.SKIPPED_STATUS or output is None:
843 843 return exitcode, output
844 844
845 845 return self._processoutput(exitcode, output, salt, after, expected)
846 846
847 847 def _hghave(self, reqs):
848 848 # TODO do something smarter when all other uses of hghave are gone.
849 849 tdir = self._testdir.replace('\\', '/')
850 850 proc = Popen4('%s -c "%s/hghave %s"' %
851 851 (self._shell, tdir, ' '.join(reqs)),
852 852 self._testtmp, 0, self._getenv())
853 853 stdout, stderr = proc.communicate()
854 854 ret = proc.wait()
855 855 if wifexited(ret):
856 856 ret = os.WEXITSTATUS(ret)
857 857 if ret == 2:
858 858 print stdout
859 859 sys.exit(1)
860 860
861 861 return ret == 0
862 862
863 863 def _parsetest(self, lines):
864 864 # We generate a shell script which outputs unique markers to line
865 865 # up script results with our source. These markers include input
866 866 # line number and the last return code.
867 867 salt = "SALT" + str(time.time())
868 868 def addsalt(line, inpython):
869 869 if inpython:
870 870 script.append('%s %d 0\n' % (salt, line))
871 871 else:
872 872 script.append('echo %s %s $?\n' % (salt, line))
873 873
874 874 script = []
875 875
876 876 # After we run the shell script, we re-unify the script output
877 877 # with non-active parts of the source, with synchronization by our
878 878 # SALT line number markers. The after table contains the non-active
879 879 # components, ordered by line number.
880 880 after = {}
881 881
882 882 # Expected shell script output.
883 883 expected = {}
884 884
885 885 pos = prepos = -1
886 886
887 887 # True or False when in a true or false conditional section
888 888 skipping = None
889 889
890 890 # We keep track of whether or not we're in a Python block so we
891 891 # can generate the surrounding doctest magic.
892 892 inpython = False
893 893
894 894 if self._debug:
895 895 script.append('set -x\n')
896 896 if os.getenv('MSYSTEM'):
897 897 script.append('alias pwd="pwd -W"\n')
898 898
899 899 for n, l in enumerate(lines):
900 900 if not l.endswith('\n'):
901 901 l += '\n'
902 902 if l.startswith('#require'):
903 903 lsplit = l.split()
904 904 if len(lsplit) < 2 or lsplit[0] != '#require':
905 905 after.setdefault(pos, []).append(' !!! invalid #require\n')
906 906 if not self._hghave(lsplit[1:]):
907 907 script = ["exit 80\n"]
908 908 break
909 909 after.setdefault(pos, []).append(l)
910 910 elif l.startswith('#if'):
911 911 lsplit = l.split()
912 912 if len(lsplit) < 2 or lsplit[0] != '#if':
913 913 after.setdefault(pos, []).append(' !!! invalid #if\n')
914 914 if skipping is not None:
915 915 after.setdefault(pos, []).append(' !!! nested #if\n')
916 916 skipping = not self._hghave(lsplit[1:])
917 917 after.setdefault(pos, []).append(l)
918 918 elif l.startswith('#else'):
919 919 if skipping is None:
920 920 after.setdefault(pos, []).append(' !!! missing #if\n')
921 921 skipping = not skipping
922 922 after.setdefault(pos, []).append(l)
923 923 elif l.startswith('#endif'):
924 924 if skipping is None:
925 925 after.setdefault(pos, []).append(' !!! missing #if\n')
926 926 skipping = None
927 927 after.setdefault(pos, []).append(l)
928 928 elif skipping:
929 929 after.setdefault(pos, []).append(l)
930 930 elif l.startswith(' >>> '): # python inlines
931 931 after.setdefault(pos, []).append(l)
932 932 prepos = pos
933 933 pos = n
934 934 if not inpython:
935 935 # We've just entered a Python block. Add the header.
936 936 inpython = True
937 937 addsalt(prepos, False) # Make sure we report the exit code.
938 938 script.append('%s -m heredoctest <<EOF\n' % PYTHON)
939 939 addsalt(n, True)
940 940 script.append(l[2:])
941 941 elif l.startswith(' ... '): # python inlines
942 942 after.setdefault(prepos, []).append(l)
943 943 script.append(l[2:])
944 944 elif l.startswith(' $ '): # commands
945 945 if inpython:
946 946 script.append('EOF\n')
947 947 inpython = False
948 948 after.setdefault(pos, []).append(l)
949 949 prepos = pos
950 950 pos = n
951 951 addsalt(n, False)
952 952 cmd = l[4:].split()
953 953 if len(cmd) == 2 and cmd[0] == 'cd':
954 954 l = ' $ cd %s || exit 1\n' % cmd[1]
955 955 script.append(l[4:])
956 956 elif l.startswith(' > '): # continuations
957 957 after.setdefault(prepos, []).append(l)
958 958 script.append(l[4:])
959 959 elif l.startswith(' '): # results
960 960 # Queue up a list of expected results.
961 961 expected.setdefault(pos, []).append(l[2:])
962 962 else:
963 963 if inpython:
964 964 script.append('EOF\n')
965 965 inpython = False
966 966 # Non-command/result. Queue up for merged output.
967 967 after.setdefault(pos, []).append(l)
968 968
969 969 if inpython:
970 970 script.append('EOF\n')
971 971 if skipping is not None:
972 972 after.setdefault(pos, []).append(' !!! missing #endif\n')
973 973 addsalt(n + 1, False)
974 974
975 975 return salt, script, after, expected
976 976
977 977 def _processoutput(self, exitcode, output, salt, after, expected):
978 978 # Merge the script output back into a unified test.
979 979 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
980 980 if exitcode != 0:
981 981 warnonly = 3
982 982
983 983 pos = -1
984 984 postout = []
985 985 for l in output:
986 986 lout, lcmd = l, None
987 987 if salt in l:
988 988 lout, lcmd = l.split(salt, 1)
989 989
990 990 if lout:
991 991 if not lout.endswith('\n'):
992 992 lout += ' (no-eol)\n'
993 993
994 994 # Find the expected output at the current position.
995 995 el = None
996 996 if expected.get(pos, None):
997 997 el = expected[pos].pop(0)
998 998
999 999 r = TTest.linematch(el, lout)
1000 1000 if isinstance(r, str):
1001 1001 if r == '+glob':
1002 1002 lout = el[:-1] + ' (glob)\n'
1003 1003 r = '' # Warn only this line.
1004 1004 elif r == '-glob':
1005 1005 lout = ''.join(el.rsplit(' (glob)', 1))
1006 1006 r = '' # Warn only this line.
1007 1007 else:
1008 1008 log('\ninfo, unknown linematch result: %r\n' % r)
1009 1009 r = False
1010 1010 if r:
1011 1011 postout.append(' ' + el)
1012 1012 else:
1013 1013 if self.NEEDESCAPE(lout):
1014 1014 lout = TTest._stringescape('%s (esc)\n' %
1015 1015 lout.rstrip('\n'))
1016 1016 postout.append(' ' + lout) # Let diff deal with it.
1017 1017 if r != '': # If line failed.
1018 1018 warnonly = 3 # for sure not
1019 1019 elif warnonly == 1: # Is "not yet" and line is warn only.
1020 1020 warnonly = 2 # Yes do warn.
1021 1021
1022 1022 if lcmd:
1023 1023 # Add on last return code.
1024 1024 ret = int(lcmd.split()[1])
1025 1025 if ret != 0:
1026 1026 postout.append(' [%s]\n' % ret)
1027 1027 if pos in after:
1028 1028 # Merge in non-active test bits.
1029 1029 postout += after.pop(pos)
1030 1030 pos = int(lcmd.split()[0])
1031 1031
1032 1032 if pos in after:
1033 1033 postout += after.pop(pos)
1034 1034
1035 1035 if warnonly == 2:
1036 1036 exitcode = False # Set exitcode to warned.
1037 1037
1038 1038 return exitcode, postout
1039 1039
1040 1040 @staticmethod
1041 1041 def rematch(el, l):
1042 1042 try:
1043 1043 # use \Z to ensure that the regex matches to the end of the string
1044 1044 if os.name == 'nt':
1045 1045 return re.match(el + r'\r?\n\Z', l)
1046 1046 return re.match(el + r'\n\Z', l)
1047 1047 except re.error:
1048 1048 # el is an invalid regex
1049 1049 return False
1050 1050
1051 1051 @staticmethod
1052 1052 def globmatch(el, l):
1053 1053 # The only supported special characters are * and ? plus / which also
1054 1054 # matches \ on windows. Escaping of these characters is supported.
1055 1055 if el + '\n' == l:
1056 1056 if os.altsep:
1057 1057 # matching on "/" is not needed for this line
1058 1058 for pat in checkcodeglobpats:
1059 1059 if pat.match(el):
1060 1060 return True
1061 1061 return '-glob'
1062 1062 return True
1063 1063 i, n = 0, len(el)
1064 1064 res = ''
1065 1065 while i < n:
1066 1066 c = el[i]
1067 1067 i += 1
1068 if c == '\\' and el[i] in '*?\\/':
1068 if c == '\\' and i < n and el[i] in '*?\\/':
1069 1069 res += el[i - 1:i + 1]
1070 1070 i += 1
1071 1071 elif c == '*':
1072 1072 res += '.*'
1073 1073 elif c == '?':
1074 1074 res += '.'
1075 1075 elif c == '/' and os.altsep:
1076 1076 res += '[/\\\\]'
1077 1077 else:
1078 1078 res += re.escape(c)
1079 1079 return TTest.rematch(res, l)
1080 1080
1081 1081 @staticmethod
1082 1082 def linematch(el, l):
1083 1083 if el == l: # perfect match (fast)
1084 1084 return True
1085 1085 if el:
1086 1086 if el.endswith(" (esc)\n"):
1087 1087 el = el[:-7].decode('string-escape') + '\n'
1088 1088 if el == l or os.name == 'nt' and el[:-1] + '\r\n' == l:
1089 1089 return True
1090 1090 if el.endswith(" (re)\n"):
1091 1091 return TTest.rematch(el[:-6], l)
1092 1092 if el.endswith(" (glob)\n"):
1093 1093 # ignore '(glob)' added to l by 'replacements'
1094 1094 if l.endswith(" (glob)\n"):
1095 1095 l = l[:-8] + "\n"
1096 1096 return TTest.globmatch(el[:-8], l)
1097 1097 if os.altsep and l.replace('\\', '/') == el:
1098 1098 return '+glob'
1099 1099 return False
1100 1100
1101 1101 @staticmethod
1102 1102 def parsehghaveoutput(lines):
1103 1103 '''Parse hghave log lines.
1104 1104
1105 1105 Return tuple of lists (missing, failed):
1106 1106 * the missing/unknown features
1107 1107 * the features for which existence check failed'''
1108 1108 missing = []
1109 1109 failed = []
1110 1110 for line in lines:
1111 1111 if line.startswith(TTest.SKIPPED_PREFIX):
1112 1112 line = line.splitlines()[0]
1113 1113 missing.append(line[len(TTest.SKIPPED_PREFIX):])
1114 1114 elif line.startswith(TTest.FAILED_PREFIX):
1115 1115 line = line.splitlines()[0]
1116 1116 failed.append(line[len(TTest.FAILED_PREFIX):])
1117 1117
1118 1118 return missing, failed
1119 1119
1120 1120 @staticmethod
1121 1121 def _escapef(m):
1122 1122 return TTest.ESCAPEMAP[m.group(0)]
1123 1123
1124 1124 @staticmethod
1125 1125 def _stringescape(s):
1126 1126 return TTest.ESCAPESUB(TTest._escapef, s)
1127 1127
1128 1128 iolock = threading.RLock()
1129 1129
1130 1130 class SkipTest(Exception):
1131 1131 """Raised to indicate that a test is to be skipped."""
1132 1132
1133 1133 class IgnoreTest(Exception):
1134 1134 """Raised to indicate that a test is to be ignored."""
1135 1135
1136 1136 class WarnTest(Exception):
1137 1137 """Raised to indicate that a test warned."""
1138 1138
1139 1139 class TestResult(unittest._TextTestResult):
1140 1140 """Holds results when executing via unittest."""
1141 1141 # Don't worry too much about accessing the non-public _TextTestResult.
1142 1142 # It is relatively common in Python testing tools.
1143 1143 def __init__(self, options, *args, **kwargs):
1144 1144 super(TestResult, self).__init__(*args, **kwargs)
1145 1145
1146 1146 self._options = options
1147 1147
1148 1148 # unittest.TestResult didn't have skipped until 2.7. We need to
1149 1149 # polyfill it.
1150 1150 self.skipped = []
1151 1151
1152 1152 # We have a custom "ignored" result that isn't present in any Python
1153 1153 # unittest implementation. It is very similar to skipped. It may make
1154 1154 # sense to map it into skip some day.
1155 1155 self.ignored = []
1156 1156
1157 1157 # We have a custom "warned" result that isn't present in any Python
1158 1158 # unittest implementation. It is very similar to failed. It may make
1159 1159 # sense to map it into fail some day.
1160 1160 self.warned = []
1161 1161
1162 1162 self.times = []
1163 1163 # Data stored for the benefit of generating xunit reports.
1164 1164 self.successes = []
1165 1165 self.faildata = {}
1166 1166
1167 1167 def addFailure(self, test, reason):
1168 1168 self.failures.append((test, reason))
1169 1169
1170 1170 if self._options.first:
1171 1171 self.stop()
1172 1172 else:
1173 1173 iolock.acquire()
1174 1174 if not self._options.nodiff:
1175 1175 self.stream.write('\nERROR: %s output changed\n' % test)
1176 1176
1177 1177 self.stream.write('!')
1178 1178 self.stream.flush()
1179 1179 iolock.release()
1180 1180
1181 1181 def addSuccess(self, test):
1182 1182 iolock.acquire()
1183 1183 super(TestResult, self).addSuccess(test)
1184 1184 iolock.release()
1185 1185 self.successes.append(test)
1186 1186
1187 1187 def addError(self, test, err):
1188 1188 super(TestResult, self).addError(test, err)
1189 1189 if self._options.first:
1190 1190 self.stop()
1191 1191
1192 1192 # Polyfill.
1193 1193 def addSkip(self, test, reason):
1194 1194 self.skipped.append((test, reason))
1195 1195 iolock.acquire()
1196 1196 if self.showAll:
1197 1197 self.stream.writeln('skipped %s' % reason)
1198 1198 else:
1199 1199 self.stream.write('s')
1200 1200 self.stream.flush()
1201 1201 iolock.release()
1202 1202
1203 1203 def addIgnore(self, test, reason):
1204 1204 self.ignored.append((test, reason))
1205 1205 iolock.acquire()
1206 1206 if self.showAll:
1207 1207 self.stream.writeln('ignored %s' % reason)
1208 1208 else:
1209 1209 if reason != 'not retesting' and reason != "doesn't match keyword":
1210 1210 self.stream.write('i')
1211 1211 else:
1212 1212 self.testsRun += 1
1213 1213 self.stream.flush()
1214 1214 iolock.release()
1215 1215
1216 1216 def addWarn(self, test, reason):
1217 1217 self.warned.append((test, reason))
1218 1218
1219 1219 if self._options.first:
1220 1220 self.stop()
1221 1221
1222 1222 iolock.acquire()
1223 1223 if self.showAll:
1224 1224 self.stream.writeln('warned %s' % reason)
1225 1225 else:
1226 1226 self.stream.write('~')
1227 1227 self.stream.flush()
1228 1228 iolock.release()
1229 1229
1230 1230 def addOutputMismatch(self, test, ret, got, expected):
1231 1231 """Record a mismatch in test output for a particular test."""
1232 1232 if self.shouldStop:
1233 1233 # don't print, some other test case already failed and
1234 1234 # printed, we're just stale and probably failed due to our
1235 1235 # temp dir getting cleaned up.
1236 1236 return
1237 1237
1238 1238 accepted = False
1239 1239 failed = False
1240 1240 lines = []
1241 1241
1242 1242 iolock.acquire()
1243 1243 if self._options.nodiff:
1244 1244 pass
1245 1245 elif self._options.view:
1246 1246 os.system("%s %s %s" %
1247 1247 (self._options.view, test.refpath, test.errpath))
1248 1248 else:
1249 1249 servefail, lines = getdiff(expected, got,
1250 1250 test.refpath, test.errpath)
1251 1251 if servefail:
1252 1252 self.addFailure(
1253 1253 test,
1254 1254 'server failed to start (HGPORT=%s)' % test._startport)
1255 1255 else:
1256 1256 self.stream.write('\n')
1257 1257 for line in lines:
1258 1258 self.stream.write(line)
1259 1259 self.stream.flush()
1260 1260
1261 1261 # handle interactive prompt without releasing iolock
1262 1262 if self._options.interactive:
1263 1263 self.stream.write('Accept this change? [n] ')
1264 1264 answer = sys.stdin.readline().strip()
1265 1265 if answer.lower() in ('y', 'yes'):
1266 1266 if test.name.endswith('.t'):
1267 1267 rename(test.errpath, test.path)
1268 1268 else:
1269 1269 rename(test.errpath, '%s.out' % test.path)
1270 1270 accepted = True
1271 1271 if not accepted and not failed:
1272 1272 self.faildata[test.name] = ''.join(lines)
1273 1273 iolock.release()
1274 1274
1275 1275 return accepted
1276 1276
1277 1277 def startTest(self, test):
1278 1278 super(TestResult, self).startTest(test)
1279 1279
1280 1280 # os.times module computes the user time and system time spent by
1281 1281 # child's processes along with real elapsed time taken by a process.
1282 1282 # This module has one limitation. It can only work for Linux user
1283 1283 # and not for Windows.
1284 1284 test.started = os.times()
1285 1285
1286 1286 def stopTest(self, test, interrupted=False):
1287 1287 super(TestResult, self).stopTest(test)
1288 1288
1289 1289 test.stopped = os.times()
1290 1290
1291 1291 starttime = test.started
1292 1292 endtime = test.stopped
1293 1293 self.times.append((test.name, endtime[2] - starttime[2],
1294 1294 endtime[3] - starttime[3], endtime[4] - starttime[4]))
1295 1295
1296 1296 if interrupted:
1297 1297 iolock.acquire()
1298 1298 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1299 1299 test.name, self.times[-1][3]))
1300 1300 iolock.release()
1301 1301
1302 1302 class TestSuite(unittest.TestSuite):
1303 1303 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1304 1304
1305 1305 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1306 1306 retest=False, keywords=None, loop=False, runs_per_test=1,
1307 1307 loadtest=None,
1308 1308 *args, **kwargs):
1309 1309 """Create a new instance that can run tests with a configuration.
1310 1310
1311 1311 testdir specifies the directory where tests are executed from. This
1312 1312 is typically the ``tests`` directory from Mercurial's source
1313 1313 repository.
1314 1314
1315 1315 jobs specifies the number of jobs to run concurrently. Each test
1316 1316 executes on its own thread. Tests actually spawn new processes, so
1317 1317 state mutation should not be an issue.
1318 1318
1319 1319 whitelist and blacklist denote tests that have been whitelisted and
1320 1320 blacklisted, respectively. These arguments don't belong in TestSuite.
1321 1321 Instead, whitelist and blacklist should be handled by the thing that
1322 1322 populates the TestSuite with tests. They are present to preserve
1323 1323 backwards compatible behavior which reports skipped tests as part
1324 1324 of the results.
1325 1325
1326 1326 retest denotes whether to retest failed tests. This arguably belongs
1327 1327 outside of TestSuite.
1328 1328
1329 1329 keywords denotes key words that will be used to filter which tests
1330 1330 to execute. This arguably belongs outside of TestSuite.
1331 1331
1332 1332 loop denotes whether to loop over tests forever.
1333 1333 """
1334 1334 super(TestSuite, self).__init__(*args, **kwargs)
1335 1335
1336 1336 self._jobs = jobs
1337 1337 self._whitelist = whitelist
1338 1338 self._blacklist = blacklist
1339 1339 self._retest = retest
1340 1340 self._keywords = keywords
1341 1341 self._loop = loop
1342 1342 self._runs_per_test = runs_per_test
1343 1343 self._loadtest = loadtest
1344 1344
1345 1345 def run(self, result):
1346 1346 # We have a number of filters that need to be applied. We do this
1347 1347 # here instead of inside Test because it makes the running logic for
1348 1348 # Test simpler.
1349 1349 tests = []
1350 1350 num_tests = [0]
1351 1351 for test in self._tests:
1352 1352 def get():
1353 1353 num_tests[0] += 1
1354 1354 if getattr(test, 'should_reload', False):
1355 1355 return self._loadtest(test.name, num_tests[0])
1356 1356 return test
1357 1357 if not os.path.exists(test.path):
1358 1358 result.addSkip(test, "Doesn't exist")
1359 1359 continue
1360 1360
1361 1361 if not (self._whitelist and test.name in self._whitelist):
1362 1362 if self._blacklist and test.name in self._blacklist:
1363 1363 result.addSkip(test, 'blacklisted')
1364 1364 continue
1365 1365
1366 1366 if self._retest and not os.path.exists(test.errpath):
1367 1367 result.addIgnore(test, 'not retesting')
1368 1368 continue
1369 1369
1370 1370 if self._keywords:
1371 1371 f = open(test.path, 'rb')
1372 1372 t = f.read().lower() + test.name.lower()
1373 1373 f.close()
1374 1374 ignored = False
1375 1375 for k in self._keywords.lower().split():
1376 1376 if k not in t:
1377 1377 result.addIgnore(test, "doesn't match keyword")
1378 1378 ignored = True
1379 1379 break
1380 1380
1381 1381 if ignored:
1382 1382 continue
1383 1383 for _ in xrange(self._runs_per_test):
1384 1384 tests.append(get())
1385 1385
1386 1386 runtests = list(tests)
1387 1387 done = queue.Queue()
1388 1388 running = 0
1389 1389
1390 1390 def job(test, result):
1391 1391 try:
1392 1392 test(result)
1393 1393 done.put(None)
1394 1394 except KeyboardInterrupt:
1395 1395 pass
1396 1396 except: # re-raises
1397 1397 done.put(('!', test, 'run-test raised an error, see traceback'))
1398 1398 raise
1399 1399
1400 1400 stoppedearly = False
1401 1401
1402 1402 try:
1403 1403 while tests or running:
1404 1404 if not done.empty() or running == self._jobs or not tests:
1405 1405 try:
1406 1406 done.get(True, 1)
1407 1407 running -= 1
1408 1408 if result and result.shouldStop:
1409 1409 stoppedearly = True
1410 1410 break
1411 1411 except queue.Empty:
1412 1412 continue
1413 1413 if tests and not running == self._jobs:
1414 1414 test = tests.pop(0)
1415 1415 if self._loop:
1416 1416 if getattr(test, 'should_reload', False):
1417 1417 num_tests[0] += 1
1418 1418 tests.append(
1419 1419 self._loadtest(test.name, num_tests[0]))
1420 1420 else:
1421 1421 tests.append(test)
1422 1422 t = threading.Thread(target=job, name=test.name,
1423 1423 args=(test, result))
1424 1424 t.start()
1425 1425 running += 1
1426 1426
1427 1427 # If we stop early we still need to wait on started tests to
1428 1428 # finish. Otherwise, there is a race between the test completing
1429 1429 # and the test's cleanup code running. This could result in the
1430 1430 # test reporting incorrect.
1431 1431 if stoppedearly:
1432 1432 while running:
1433 1433 try:
1434 1434 done.get(True, 1)
1435 1435 running -= 1
1436 1436 except queue.Empty:
1437 1437 continue
1438 1438 except KeyboardInterrupt:
1439 1439 for test in runtests:
1440 1440 test.abort()
1441 1441
1442 1442 return result
1443 1443
1444 1444 class TextTestRunner(unittest.TextTestRunner):
1445 1445 """Custom unittest test runner that uses appropriate settings."""
1446 1446
1447 1447 def __init__(self, runner, *args, **kwargs):
1448 1448 super(TextTestRunner, self).__init__(*args, **kwargs)
1449 1449
1450 1450 self._runner = runner
1451 1451
1452 1452 def run(self, test):
1453 1453 result = TestResult(self._runner.options, self.stream,
1454 1454 self.descriptions, self.verbosity)
1455 1455
1456 1456 test(result)
1457 1457
1458 1458 failed = len(result.failures)
1459 1459 warned = len(result.warned)
1460 1460 skipped = len(result.skipped)
1461 1461 ignored = len(result.ignored)
1462 1462
1463 1463 iolock.acquire()
1464 1464 self.stream.writeln('')
1465 1465
1466 1466 if not self._runner.options.noskips:
1467 1467 for test, msg in result.skipped:
1468 1468 self.stream.writeln('Skipped %s: %s' % (test.name, msg))
1469 1469 for test, msg in result.warned:
1470 1470 self.stream.writeln('Warned %s: %s' % (test.name, msg))
1471 1471 for test, msg in result.failures:
1472 1472 self.stream.writeln('Failed %s: %s' % (test.name, msg))
1473 1473 for test, msg in result.errors:
1474 1474 self.stream.writeln('Errored %s: %s' % (test.name, msg))
1475 1475
1476 1476 if self._runner.options.xunit:
1477 1477 xuf = open(self._runner.options.xunit, 'wb')
1478 1478 try:
1479 1479 timesd = dict(
1480 1480 (test, real) for test, cuser, csys, real in result.times)
1481 1481 doc = minidom.Document()
1482 1482 s = doc.createElement('testsuite')
1483 1483 s.setAttribute('name', 'run-tests')
1484 1484 s.setAttribute('tests', str(result.testsRun))
1485 1485 s.setAttribute('errors', "0") # TODO
1486 1486 s.setAttribute('failures', str(failed))
1487 1487 s.setAttribute('skipped', str(skipped + ignored))
1488 1488 doc.appendChild(s)
1489 1489 for tc in result.successes:
1490 1490 t = doc.createElement('testcase')
1491 1491 t.setAttribute('name', tc.name)
1492 1492 t.setAttribute('time', '%.3f' % timesd[tc.name])
1493 1493 s.appendChild(t)
1494 1494 for tc, err in sorted(result.faildata.iteritems()):
1495 1495 t = doc.createElement('testcase')
1496 1496 t.setAttribute('name', tc)
1497 1497 t.setAttribute('time', '%.3f' % timesd[tc])
1498 1498 # createCDATASection expects a unicode or it will convert
1499 1499 # using default conversion rules, which will fail if
1500 1500 # string isn't ASCII.
1501 1501 err = cdatasafe(err).decode('utf-8', 'replace')
1502 1502 cd = doc.createCDATASection(err)
1503 1503 t.appendChild(cd)
1504 1504 s.appendChild(t)
1505 1505 xuf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
1506 1506 finally:
1507 1507 xuf.close()
1508 1508
1509 1509 if self._runner.options.json:
1510 1510 if json is None:
1511 1511 raise ImportError("json module not installed")
1512 1512 jsonpath = os.path.join(self._runner._testdir, 'report.json')
1513 1513 fp = open(jsonpath, 'w')
1514 1514 try:
1515 1515 timesd = {}
1516 1516 for test, cuser, csys, real in result.times:
1517 1517 timesd[test] = (real, cuser, csys)
1518 1518
1519 1519 outcome = {}
1520 1520 for tc in result.successes:
1521 1521 testresult = {'result': 'success',
1522 1522 'time': ('%0.3f' % timesd[tc.name][0]),
1523 1523 'cuser': ('%0.3f' % timesd[tc.name][1]),
1524 1524 'csys': ('%0.3f' % timesd[tc.name][2])}
1525 1525 outcome[tc.name] = testresult
1526 1526
1527 1527 for tc, err in sorted(result.faildata.iteritems()):
1528 1528 testresult = {'result': 'failure',
1529 1529 'time': ('%0.3f' % timesd[tc][0]),
1530 1530 'cuser': ('%0.3f' % timesd[tc][1]),
1531 1531 'csys': ('%0.3f' % timesd[tc][2])}
1532 1532 outcome[tc] = testresult
1533 1533
1534 1534 for tc, reason in result.skipped:
1535 1535 testresult = {'result': 'skip',
1536 1536 'time': ('%0.3f' % timesd[tc.name][0]),
1537 1537 'cuser': ('%0.3f' % timesd[tc.name][1]),
1538 1538 'csys': ('%0.3f' % timesd[tc.name][2])}
1539 1539 outcome[tc.name] = testresult
1540 1540
1541 1541 jsonout = json.dumps(outcome, sort_keys=True, indent=4)
1542 1542 fp.writelines(("testreport =", jsonout))
1543 1543 finally:
1544 1544 fp.close()
1545 1545
1546 1546 self._runner._checkhglib('Tested')
1547 1547
1548 1548 self.stream.writeln('# Ran %d tests, %d skipped, %d warned, %d failed.'
1549 1549 % (result.testsRun,
1550 1550 skipped + ignored, warned, failed))
1551 1551 if failed:
1552 1552 self.stream.writeln('python hash seed: %s' %
1553 1553 os.environ['PYTHONHASHSEED'])
1554 1554 if self._runner.options.time:
1555 1555 self.printtimes(result.times)
1556 1556
1557 1557 iolock.release()
1558 1558
1559 1559 return result
1560 1560
1561 1561 def printtimes(self, times):
1562 1562 # iolock held by run
1563 1563 self.stream.writeln('# Producing time report')
1564 1564 times.sort(key=lambda t: (t[3]))
1565 1565 cols = '%7.3f %7.3f %7.3f %s'
1566 1566 self.stream.writeln('%-7s %-7s %-7s %s' % ('cuser', 'csys', 'real',
1567 1567 'Test'))
1568 1568 for test, cuser, csys, real in times:
1569 1569 self.stream.writeln(cols % (cuser, csys, real, test))
1570 1570
1571 1571 class TestRunner(object):
1572 1572 """Holds context for executing tests.
1573 1573
1574 1574 Tests rely on a lot of state. This object holds it for them.
1575 1575 """
1576 1576
1577 1577 # Programs required to run tests.
1578 1578 REQUIREDTOOLS = [
1579 1579 os.path.basename(sys.executable),
1580 1580 'diff',
1581 1581 'grep',
1582 1582 'unzip',
1583 1583 'gunzip',
1584 1584 'bunzip2',
1585 1585 'sed',
1586 1586 ]
1587 1587
1588 1588 # Maps file extensions to test class.
1589 1589 TESTTYPES = [
1590 1590 ('.py', PythonTest),
1591 1591 ('.t', TTest),
1592 1592 ]
1593 1593
1594 1594 def __init__(self):
1595 1595 self.options = None
1596 1596 self._hgroot = None
1597 1597 self._testdir = None
1598 1598 self._hgtmp = None
1599 1599 self._installdir = None
1600 1600 self._bindir = None
1601 1601 self._tmpbinddir = None
1602 1602 self._pythondir = None
1603 1603 self._coveragefile = None
1604 1604 self._createdfiles = []
1605 1605 self._hgpath = None
1606 1606
1607 1607 def run(self, args, parser=None):
1608 1608 """Run the test suite."""
1609 1609 oldmask = os.umask(022)
1610 1610 try:
1611 1611 parser = parser or getparser()
1612 1612 options, args = parseargs(args, parser)
1613 1613 self.options = options
1614 1614
1615 1615 self._checktools()
1616 1616 tests = self.findtests(args)
1617 1617 return self._run(tests)
1618 1618 finally:
1619 1619 os.umask(oldmask)
1620 1620
1621 1621 def _run(self, tests):
1622 1622 if self.options.random:
1623 1623 random.shuffle(tests)
1624 1624 else:
1625 1625 # keywords for slow tests
1626 1626 slow = 'svn gendoc check-code-hg'.split()
1627 1627 def sortkey(f):
1628 1628 # run largest tests first, as they tend to take the longest
1629 1629 try:
1630 1630 val = -os.stat(f).st_size
1631 1631 except OSError, e:
1632 1632 if e.errno != errno.ENOENT:
1633 1633 raise
1634 1634 return -1e9 # file does not exist, tell early
1635 1635 for kw in slow:
1636 1636 if kw in f:
1637 1637 val *= 10
1638 1638 return val
1639 1639 tests.sort(key=sortkey)
1640 1640
1641 1641 self._testdir = os.environ['TESTDIR'] = os.getcwd()
1642 1642
1643 1643 if 'PYTHONHASHSEED' not in os.environ:
1644 1644 # use a random python hash seed all the time
1645 1645 # we do the randomness ourself to know what seed is used
1646 1646 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
1647 1647
1648 1648 if self.options.tmpdir:
1649 1649 self.options.keep_tmpdir = True
1650 1650 tmpdir = self.options.tmpdir
1651 1651 if os.path.exists(tmpdir):
1652 1652 # Meaning of tmpdir has changed since 1.3: we used to create
1653 1653 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
1654 1654 # tmpdir already exists.
1655 1655 print "error: temp dir %r already exists" % tmpdir
1656 1656 return 1
1657 1657
1658 1658 # Automatically removing tmpdir sounds convenient, but could
1659 1659 # really annoy anyone in the habit of using "--tmpdir=/tmp"
1660 1660 # or "--tmpdir=$HOME".
1661 1661 #vlog("# Removing temp dir", tmpdir)
1662 1662 #shutil.rmtree(tmpdir)
1663 1663 os.makedirs(tmpdir)
1664 1664 else:
1665 1665 d = None
1666 1666 if os.name == 'nt':
1667 1667 # without this, we get the default temp dir location, but
1668 1668 # in all lowercase, which causes troubles with paths (issue3490)
1669 1669 d = os.getenv('TMP')
1670 1670 tmpdir = tempfile.mkdtemp('', 'hgtests.', d)
1671 1671 self._hgtmp = os.environ['HGTMP'] = os.path.realpath(tmpdir)
1672 1672
1673 1673 if self.options.with_hg:
1674 1674 self._installdir = None
1675 1675 self._bindir = os.path.dirname(os.path.realpath(
1676 1676 self.options.with_hg))
1677 1677 self._tmpbindir = os.path.join(self._hgtmp, 'install', 'bin')
1678 1678 os.makedirs(self._tmpbindir)
1679 1679
1680 1680 # This looks redundant with how Python initializes sys.path from
1681 1681 # the location of the script being executed. Needed because the
1682 1682 # "hg" specified by --with-hg is not the only Python script
1683 1683 # executed in the test suite that needs to import 'mercurial'
1684 1684 # ... which means it's not really redundant at all.
1685 1685 self._pythondir = self._bindir
1686 1686 else:
1687 1687 self._installdir = os.path.join(self._hgtmp, "install")
1688 1688 self._bindir = os.environ["BINDIR"] = \
1689 1689 os.path.join(self._installdir, "bin")
1690 1690 self._tmpbindir = self._bindir
1691 1691 self._pythondir = os.path.join(self._installdir, "lib", "python")
1692 1692
1693 1693 os.environ["BINDIR"] = self._bindir
1694 1694 os.environ["PYTHON"] = PYTHON
1695 1695
1696 1696 runtestdir = os.path.abspath(os.path.dirname(__file__))
1697 1697 path = [self._bindir, runtestdir] + os.environ["PATH"].split(os.pathsep)
1698 1698 if os.path.islink(__file__):
1699 1699 # test helper will likely be at the end of the symlink
1700 1700 realfile = os.path.realpath(__file__)
1701 1701 realdir = os.path.abspath(os.path.dirname(realfile))
1702 1702 path.insert(2, realdir)
1703 1703 if self._tmpbindir != self._bindir:
1704 1704 path = [self._tmpbindir] + path
1705 1705 os.environ["PATH"] = os.pathsep.join(path)
1706 1706
1707 1707 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
1708 1708 # can run .../tests/run-tests.py test-foo where test-foo
1709 1709 # adds an extension to HGRC. Also include run-test.py directory to
1710 1710 # import modules like heredoctest.
1711 1711 pypath = [self._pythondir, self._testdir, runtestdir]
1712 1712 # We have to augment PYTHONPATH, rather than simply replacing
1713 1713 # it, in case external libraries are only available via current
1714 1714 # PYTHONPATH. (In particular, the Subversion bindings on OS X
1715 1715 # are in /opt/subversion.)
1716 1716 oldpypath = os.environ.get(IMPL_PATH)
1717 1717 if oldpypath:
1718 1718 pypath.append(oldpypath)
1719 1719 os.environ[IMPL_PATH] = os.pathsep.join(pypath)
1720 1720
1721 1721 if self.options.pure:
1722 1722 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
1723 1723
1724 1724 self._coveragefile = os.path.join(self._testdir, '.coverage')
1725 1725
1726 1726 vlog("# Using TESTDIR", self._testdir)
1727 1727 vlog("# Using HGTMP", self._hgtmp)
1728 1728 vlog("# Using PATH", os.environ["PATH"])
1729 1729 vlog("# Using", IMPL_PATH, os.environ[IMPL_PATH])
1730 1730
1731 1731 try:
1732 1732 return self._runtests(tests) or 0
1733 1733 finally:
1734 1734 time.sleep(.1)
1735 1735 self._cleanup()
1736 1736
1737 1737 def findtests(self, args):
1738 1738 """Finds possible test files from arguments.
1739 1739
1740 1740 If you wish to inject custom tests into the test harness, this would
1741 1741 be a good function to monkeypatch or override in a derived class.
1742 1742 """
1743 1743 if not args:
1744 1744 if self.options.changed:
1745 1745 proc = Popen4('hg st --rev "%s" -man0 .' %
1746 1746 self.options.changed, None, 0)
1747 1747 stdout, stderr = proc.communicate()
1748 1748 args = stdout.strip('\0').split('\0')
1749 1749 else:
1750 1750 args = os.listdir('.')
1751 1751
1752 1752 return [t for t in args
1753 1753 if os.path.basename(t).startswith('test-')
1754 1754 and (t.endswith('.py') or t.endswith('.t'))]
1755 1755
1756 1756 def _runtests(self, tests):
1757 1757 try:
1758 1758 if self._installdir:
1759 1759 self._installhg()
1760 1760 self._checkhglib("Testing")
1761 1761 else:
1762 1762 self._usecorrectpython()
1763 1763
1764 1764 if self.options.restart:
1765 1765 orig = list(tests)
1766 1766 while tests:
1767 1767 if os.path.exists(tests[0] + ".err"):
1768 1768 break
1769 1769 tests.pop(0)
1770 1770 if not tests:
1771 1771 print "running all tests"
1772 1772 tests = orig
1773 1773
1774 1774 tests = [self._gettest(t, i) for i, t in enumerate(tests)]
1775 1775
1776 1776 failed = False
1777 1777 warned = False
1778 1778
1779 1779 suite = TestSuite(self._testdir,
1780 1780 jobs=self.options.jobs,
1781 1781 whitelist=self.options.whitelisted,
1782 1782 blacklist=self.options.blacklist,
1783 1783 retest=self.options.retest,
1784 1784 keywords=self.options.keywords,
1785 1785 loop=self.options.loop,
1786 1786 runs_per_test=self.options.runs_per_test,
1787 1787 tests=tests, loadtest=self._gettest)
1788 1788 verbosity = 1
1789 1789 if self.options.verbose:
1790 1790 verbosity = 2
1791 1791 runner = TextTestRunner(self, verbosity=verbosity)
1792 1792 result = runner.run(suite)
1793 1793
1794 1794 if result.failures:
1795 1795 failed = True
1796 1796 if result.warned:
1797 1797 warned = True
1798 1798
1799 1799 if self.options.anycoverage:
1800 1800 self._outputcoverage()
1801 1801 except KeyboardInterrupt:
1802 1802 failed = True
1803 1803 print "\ninterrupted!"
1804 1804
1805 1805 if failed:
1806 1806 return 1
1807 1807 if warned:
1808 1808 return 80
1809 1809
1810 1810 def _gettest(self, test, count):
1811 1811 """Obtain a Test by looking at its filename.
1812 1812
1813 1813 Returns a Test instance. The Test may not be runnable if it doesn't
1814 1814 map to a known type.
1815 1815 """
1816 1816 lctest = test.lower()
1817 1817 testcls = Test
1818 1818
1819 1819 for ext, cls in self.TESTTYPES:
1820 1820 if lctest.endswith(ext):
1821 1821 testcls = cls
1822 1822 break
1823 1823
1824 1824 refpath = os.path.join(self._testdir, test)
1825 1825 tmpdir = os.path.join(self._hgtmp, 'child%d' % count)
1826 1826
1827 1827 t = testcls(refpath, tmpdir,
1828 1828 keeptmpdir=self.options.keep_tmpdir,
1829 1829 debug=self.options.debug,
1830 1830 timeout=self.options.timeout,
1831 1831 startport=self.options.port + count * 3,
1832 1832 extraconfigopts=self.options.extra_config_opt,
1833 1833 py3kwarnings=self.options.py3k_warnings,
1834 1834 shell=self.options.shell)
1835 1835 t.should_reload = True
1836 1836 return t
1837 1837
1838 1838 def _cleanup(self):
1839 1839 """Clean up state from this test invocation."""
1840 1840
1841 1841 if self.options.keep_tmpdir:
1842 1842 return
1843 1843
1844 1844 vlog("# Cleaning up HGTMP", self._hgtmp)
1845 1845 shutil.rmtree(self._hgtmp, True)
1846 1846 for f in self._createdfiles:
1847 1847 try:
1848 1848 os.remove(f)
1849 1849 except OSError:
1850 1850 pass
1851 1851
1852 1852 def _usecorrectpython(self):
1853 1853 """Configure the environment to use the appropriate Python in tests."""
1854 1854 # Tests must use the same interpreter as us or bad things will happen.
1855 1855 pyexename = sys.platform == 'win32' and 'python.exe' or 'python'
1856 1856 if getattr(os, 'symlink', None):
1857 1857 vlog("# Making python executable in test path a symlink to '%s'" %
1858 1858 sys.executable)
1859 1859 mypython = os.path.join(self._tmpbindir, pyexename)
1860 1860 try:
1861 1861 if os.readlink(mypython) == sys.executable:
1862 1862 return
1863 1863 os.unlink(mypython)
1864 1864 except OSError, err:
1865 1865 if err.errno != errno.ENOENT:
1866 1866 raise
1867 1867 if self._findprogram(pyexename) != sys.executable:
1868 1868 try:
1869 1869 os.symlink(sys.executable, mypython)
1870 1870 self._createdfiles.append(mypython)
1871 1871 except OSError, err:
1872 1872 # child processes may race, which is harmless
1873 1873 if err.errno != errno.EEXIST:
1874 1874 raise
1875 1875 else:
1876 1876 exedir, exename = os.path.split(sys.executable)
1877 1877 vlog("# Modifying search path to find %s as %s in '%s'" %
1878 1878 (exename, pyexename, exedir))
1879 1879 path = os.environ['PATH'].split(os.pathsep)
1880 1880 while exedir in path:
1881 1881 path.remove(exedir)
1882 1882 os.environ['PATH'] = os.pathsep.join([exedir] + path)
1883 1883 if not self._findprogram(pyexename):
1884 1884 print "WARNING: Cannot find %s in search path" % pyexename
1885 1885
1886 1886 def _installhg(self):
1887 1887 """Install hg into the test environment.
1888 1888
1889 1889 This will also configure hg with the appropriate testing settings.
1890 1890 """
1891 1891 vlog("# Performing temporary installation of HG")
1892 1892 installerrs = os.path.join("tests", "install.err")
1893 1893 compiler = ''
1894 1894 if self.options.compiler:
1895 1895 compiler = '--compiler ' + self.options.compiler
1896 1896 if self.options.pure:
1897 1897 pure = "--pure"
1898 1898 else:
1899 1899 pure = ""
1900 1900 py3 = ''
1901 1901 if sys.version_info[0] == 3:
1902 1902 py3 = '--c2to3'
1903 1903
1904 1904 # Run installer in hg root
1905 1905 script = os.path.realpath(sys.argv[0])
1906 1906 hgroot = os.path.dirname(os.path.dirname(script))
1907 1907 self._hgroot = hgroot
1908 1908 os.chdir(hgroot)
1909 1909 nohome = '--home=""'
1910 1910 if os.name == 'nt':
1911 1911 # The --home="" trick works only on OS where os.sep == '/'
1912 1912 # because of a distutils convert_path() fast-path. Avoid it at
1913 1913 # least on Windows for now, deal with .pydistutils.cfg bugs
1914 1914 # when they happen.
1915 1915 nohome = ''
1916 1916 cmd = ('%(exe)s setup.py %(py3)s %(pure)s clean --all'
1917 1917 ' build %(compiler)s --build-base="%(base)s"'
1918 1918 ' install --force --prefix="%(prefix)s"'
1919 1919 ' --install-lib="%(libdir)s"'
1920 1920 ' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
1921 1921 % {'exe': sys.executable, 'py3': py3, 'pure': pure,
1922 1922 'compiler': compiler,
1923 1923 'base': os.path.join(self._hgtmp, "build"),
1924 1924 'prefix': self._installdir, 'libdir': self._pythondir,
1925 1925 'bindir': self._bindir,
1926 1926 'nohome': nohome, 'logfile': installerrs})
1927 1927
1928 1928 # setuptools requires install directories to exist.
1929 1929 def makedirs(p):
1930 1930 try:
1931 1931 os.makedirs(p)
1932 1932 except OSError, e:
1933 1933 if e.errno != errno.EEXIST:
1934 1934 raise
1935 1935 makedirs(self._pythondir)
1936 1936 makedirs(self._bindir)
1937 1937
1938 1938 vlog("# Running", cmd)
1939 1939 if os.system(cmd) == 0:
1940 1940 if not self.options.verbose:
1941 1941 os.remove(installerrs)
1942 1942 else:
1943 1943 f = open(installerrs, 'rb')
1944 1944 for line in f:
1945 1945 sys.stdout.write(line)
1946 1946 f.close()
1947 1947 sys.exit(1)
1948 1948 os.chdir(self._testdir)
1949 1949
1950 1950 self._usecorrectpython()
1951 1951
1952 1952 if self.options.py3k_warnings and not self.options.anycoverage:
1953 1953 vlog("# Updating hg command to enable Py3k Warnings switch")
1954 1954 f = open(os.path.join(self._bindir, 'hg'), 'rb')
1955 1955 lines = [line.rstrip() for line in f]
1956 1956 lines[0] += ' -3'
1957 1957 f.close()
1958 1958 f = open(os.path.join(self._bindir, 'hg'), 'wb')
1959 1959 for line in lines:
1960 1960 f.write(line + '\n')
1961 1961 f.close()
1962 1962
1963 1963 hgbat = os.path.join(self._bindir, 'hg.bat')
1964 1964 if os.path.isfile(hgbat):
1965 1965 # hg.bat expects to be put in bin/scripts while run-tests.py
1966 1966 # installation layout put it in bin/ directly. Fix it
1967 1967 f = open(hgbat, 'rb')
1968 1968 data = f.read()
1969 1969 f.close()
1970 1970 if '"%~dp0..\python" "%~dp0hg" %*' in data:
1971 1971 data = data.replace('"%~dp0..\python" "%~dp0hg" %*',
1972 1972 '"%~dp0python" "%~dp0hg" %*')
1973 1973 f = open(hgbat, 'wb')
1974 1974 f.write(data)
1975 1975 f.close()
1976 1976 else:
1977 1977 print 'WARNING: cannot fix hg.bat reference to python.exe'
1978 1978
1979 1979 if self.options.anycoverage:
1980 1980 custom = os.path.join(self._testdir, 'sitecustomize.py')
1981 1981 target = os.path.join(self._pythondir, 'sitecustomize.py')
1982 1982 vlog('# Installing coverage trigger to %s' % target)
1983 1983 shutil.copyfile(custom, target)
1984 1984 rc = os.path.join(self._testdir, '.coveragerc')
1985 1985 vlog('# Installing coverage rc to %s' % rc)
1986 1986 os.environ['COVERAGE_PROCESS_START'] = rc
1987 1987 covdir = os.path.join(self._installdir, '..', 'coverage')
1988 1988 try:
1989 1989 os.mkdir(covdir)
1990 1990 except OSError, e:
1991 1991 if e.errno != errno.EEXIST:
1992 1992 raise
1993 1993
1994 1994 os.environ['COVERAGE_DIR'] = covdir
1995 1995
1996 1996 def _checkhglib(self, verb):
1997 1997 """Ensure that the 'mercurial' package imported by python is
1998 1998 the one we expect it to be. If not, print a warning to stderr."""
1999 1999 if ((self._bindir == self._pythondir) and
2000 2000 (self._bindir != self._tmpbindir)):
2001 2001 # The pythondir has been inferred from --with-hg flag.
2002 2002 # We cannot expect anything sensible here.
2003 2003 return
2004 2004 expecthg = os.path.join(self._pythondir, 'mercurial')
2005 2005 actualhg = self._gethgpath()
2006 2006 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2007 2007 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2008 2008 ' (expected %s)\n'
2009 2009 % (verb, actualhg, expecthg))
2010 2010 def _gethgpath(self):
2011 2011 """Return the path to the mercurial package that is actually found by
2012 2012 the current Python interpreter."""
2013 2013 if self._hgpath is not None:
2014 2014 return self._hgpath
2015 2015
2016 2016 cmd = '%s -c "import mercurial; print (mercurial.__path__[0])"'
2017 2017 pipe = os.popen(cmd % PYTHON)
2018 2018 try:
2019 2019 self._hgpath = pipe.read().strip()
2020 2020 finally:
2021 2021 pipe.close()
2022 2022
2023 2023 return self._hgpath
2024 2024
2025 2025 def _outputcoverage(self):
2026 2026 """Produce code coverage output."""
2027 2027 from coverage import coverage
2028 2028
2029 2029 vlog('# Producing coverage report')
2030 2030 # chdir is the easiest way to get short, relative paths in the
2031 2031 # output.
2032 2032 os.chdir(self._hgroot)
2033 2033 covdir = os.path.join(self._installdir, '..', 'coverage')
2034 2034 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2035 2035
2036 2036 # Map install directory paths back to source directory.
2037 2037 cov.config.paths['srcdir'] = ['.', self._pythondir]
2038 2038
2039 2039 cov.combine()
2040 2040
2041 2041 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2042 2042 cov.report(ignore_errors=True, omit=omit)
2043 2043
2044 2044 if self.options.htmlcov:
2045 2045 htmldir = os.path.join(self._testdir, 'htmlcov')
2046 2046 cov.html_report(directory=htmldir, omit=omit)
2047 2047 if self.options.annotate:
2048 2048 adir = os.path.join(self._testdir, 'annotated')
2049 2049 if not os.path.isdir(adir):
2050 2050 os.mkdir(adir)
2051 2051 cov.annotate(directory=adir, omit=omit)
2052 2052
2053 2053 def _findprogram(self, program):
2054 2054 """Search PATH for a executable program"""
2055 2055 for p in os.environ.get('PATH', os.defpath).split(os.pathsep):
2056 2056 name = os.path.join(p, program)
2057 2057 if os.name == 'nt' or os.access(name, os.X_OK):
2058 2058 return name
2059 2059 return None
2060 2060
2061 2061 def _checktools(self):
2062 2062 """Ensure tools required to run tests are present."""
2063 2063 for p in self.REQUIREDTOOLS:
2064 2064 if os.name == 'nt' and not p.endswith('.exe'):
2065 2065 p += '.exe'
2066 2066 found = self._findprogram(p)
2067 2067 if found:
2068 2068 vlog("# Found prerequisite", p, "at", found)
2069 2069 else:
2070 2070 print "WARNING: Did not find prerequisite tool: %s " % p
2071 2071
2072 2072 if __name__ == '__main__':
2073 2073 runner = TestRunner()
2074 2074
2075 2075 try:
2076 2076 import msvcrt
2077 2077 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
2078 2078 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2079 2079 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
2080 2080 except ImportError:
2081 2081 pass
2082 2082
2083 2083 sys.exit(runner.run(sys.argv[1:]))
@@ -1,493 +1,507 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Smoke test
4 4 ============
5 5
6 6 $ $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE
7 7
8 8 # Ran 0 tests, 0 skipped, 0 warned, 0 failed.
9 9
10 10 a succesful test
11 11 =======================
12 12
13 13 $ cat > test-success.t << EOF
14 14 > $ echo babar
15 15 > babar
16 16 > $ echo xyzzy
17 17 > xyzzy
18 18 > EOF
19 19
20 20 $ $TESTDIR/run-tests.py --with-hg=`which hg`
21 21 .
22 22 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
23 23
24 24 failing test
25 25 ==================
26 26
27 27 $ cat > test-failure.t << EOF
28 28 > $ echo babar
29 29 > rataxes
30 30 > This is a noop statement so that
31 31 > this test is still more bytes than success.
32 32 > EOF
33 33
34 34 >>> fh = open('test-failure-unicode.t', 'wb')
35 35 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8'))
36 36 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8'))
37 37
38 38 $ $TESTDIR/run-tests.py --with-hg=`which hg`
39 39
40 40 --- $TESTTMP/test-failure.t
41 41 +++ $TESTTMP/test-failure.t.err
42 42 @@ -1,4 +1,4 @@
43 43 $ echo babar
44 44 - rataxes
45 45 + babar
46 46 This is a noop statement so that
47 47 this test is still more bytes than success.
48 48
49 49 ERROR: test-failure.t output changed
50 50 !.
51 51 --- $TESTTMP/test-failure-unicode.t
52 52 +++ $TESTTMP/test-failure-unicode.t.err
53 53 @@ -1,2 +1,2 @@
54 54 $ echo babar\xce\xb1 (esc)
55 55 - l\xce\xb5\xce\xb5t (esc)
56 56 + babar\xce\xb1 (esc)
57 57
58 58 ERROR: test-failure-unicode.t output changed
59 59 !
60 60 Failed test-failure.t: output changed
61 61 Failed test-failure-unicode.t: output changed
62 62 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
63 63 python hash seed: * (glob)
64 64 [1]
65 65
66 66 test --xunit support
67 67 $ $TESTDIR/run-tests.py --with-hg=`which hg` --xunit=xunit.xml
68 68
69 69 --- $TESTTMP/test-failure.t
70 70 +++ $TESTTMP/test-failure.t.err
71 71 @@ -1,4 +1,4 @@
72 72 $ echo babar
73 73 - rataxes
74 74 + babar
75 75 This is a noop statement so that
76 76 this test is still more bytes than success.
77 77
78 78 ERROR: test-failure.t output changed
79 79 !.
80 80 --- $TESTTMP/test-failure-unicode.t
81 81 +++ $TESTTMP/test-failure-unicode.t.err
82 82 @@ -1,2 +1,2 @@
83 83 $ echo babar\xce\xb1 (esc)
84 84 - l\xce\xb5\xce\xb5t (esc)
85 85 + babar\xce\xb1 (esc)
86 86
87 87 ERROR: test-failure-unicode.t output changed
88 88 !
89 89 Failed test-failure.t: output changed
90 90 Failed test-failure-unicode.t: output changed
91 91 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
92 92 python hash seed: * (glob)
93 93 [1]
94 94 $ cat xunit.xml
95 95 <?xml version="1.0" encoding="utf-8"?>
96 96 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
97 97 <testcase name="test-success.t" time="*"/> (glob)
98 98 <testcase name="test-failure-unicode.t" time="*"> (glob)
99 99 <![CDATA[--- $TESTTMP/test-failure-unicode.t
100 100 +++ $TESTTMP/test-failure-unicode.t.err
101 101 @@ -1,2 +1,2 @@
102 102 $ echo babar\xce\xb1 (esc)
103 103 - l\xce\xb5\xce\xb5t (esc)
104 104 + babar\xce\xb1 (esc)
105 105 ]]> </testcase>
106 106 <testcase name="test-failure.t" time="*"> (glob)
107 107 <![CDATA[--- $TESTTMP/test-failure.t
108 108 +++ $TESTTMP/test-failure.t.err
109 109 @@ -1,4 +1,4 @@
110 110 $ echo babar
111 111 - rataxes
112 112 + babar
113 113 This is a noop statement so that
114 114 this test is still more bytes than success.
115 115 ]]> </testcase>
116 116 </testsuite>
117 117
118 118 $ rm test-failure-unicode.t
119 119
120 120 test for --retest
121 121 ====================
122 122
123 123 $ $TESTDIR/run-tests.py --with-hg=`which hg` --retest
124 124
125 125 --- $TESTTMP/test-failure.t
126 126 +++ $TESTTMP/test-failure.t.err
127 127 @@ -1,4 +1,4 @@
128 128 $ echo babar
129 129 - rataxes
130 130 + babar
131 131 This is a noop statement so that
132 132 this test is still more bytes than success.
133 133
134 134 ERROR: test-failure.t output changed
135 135 !
136 136 Failed test-failure.t: output changed
137 137 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
138 138 python hash seed: * (glob)
139 139 [1]
140 140
141 141 Selecting Tests To Run
142 142 ======================
143 143
144 144 successful
145 145
146 146 $ $TESTDIR/run-tests.py --with-hg=`which hg` test-success.t
147 147 .
148 148 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
149 149
150 150 success w/ keyword
151 151 $ $TESTDIR/run-tests.py --with-hg=`which hg` -k xyzzy
152 152 .
153 153 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
154 154
155 155 failed
156 156
157 157 $ $TESTDIR/run-tests.py --with-hg=`which hg` test-failure.t
158 158
159 159 --- $TESTTMP/test-failure.t
160 160 +++ $TESTTMP/test-failure.t.err
161 161 @@ -1,4 +1,4 @@
162 162 $ echo babar
163 163 - rataxes
164 164 + babar
165 165 This is a noop statement so that
166 166 this test is still more bytes than success.
167 167
168 168 ERROR: test-failure.t output changed
169 169 !
170 170 Failed test-failure.t: output changed
171 171 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
172 172 python hash seed: * (glob)
173 173 [1]
174 174
175 175 failure w/ keyword
176 176 $ $TESTDIR/run-tests.py --with-hg=`which hg` -k rataxes
177 177
178 178 --- $TESTTMP/test-failure.t
179 179 +++ $TESTTMP/test-failure.t.err
180 180 @@ -1,4 +1,4 @@
181 181 $ echo babar
182 182 - rataxes
183 183 + babar
184 184 This is a noop statement so that
185 185 this test is still more bytes than success.
186 186
187 187 ERROR: test-failure.t output changed
188 188 !
189 189 Failed test-failure.t: output changed
190 190 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
191 191 python hash seed: * (glob)
192 192 [1]
193 193
194 194 Verify that when a process fails to start we show a useful message
195 195 ==================================================================
196 196 NOTE: there is currently a bug where this shows "2 failed" even though
197 197 it's actually the same test being reported for failure twice.
198 198
199 199 $ cat > test-serve-fail.t <<EOF
200 200 > $ echo 'abort: child process failed to start blah'
201 201 > EOF
202 202 $ $TESTDIR/run-tests.py --with-hg=`which hg` test-serve-fail.t
203 203
204 204 ERROR: test-serve-fail.t output changed
205 205 !
206 206 ERROR: test-serve-fail.t output changed
207 207 !
208 208 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
209 209 Failed test-serve-fail.t: output changed
210 210 # Ran 1 tests, 0 skipped, 0 warned, 2 failed.
211 211 python hash seed: * (glob)
212 212 [1]
213 213 $ rm test-serve-fail.t
214 214
215 215 Running In Debug Mode
216 216 ======================
217 217
218 218 $ $TESTDIR/run-tests.py --with-hg=`which hg` --debug 2>&1 | grep -v pwd
219 219 + echo *SALT* 0 0 (glob)
220 220 *SALT* 0 0 (glob)
221 221 + echo babar
222 222 babar
223 223 + echo *SALT* 4 0 (glob)
224 224 *SALT* 4 0 (glob)
225 225 .+ echo *SALT* 0 0 (glob)
226 226 *SALT* 0 0 (glob)
227 227 + echo babar
228 228 babar
229 229 + echo *SALT* 2 0 (glob)
230 230 *SALT* 2 0 (glob)
231 231 + echo xyzzy
232 232 xyzzy
233 233 + echo *SALT* 4 0 (glob)
234 234 *SALT* 4 0 (glob)
235 235 .
236 236 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
237 237
238 238 Parallel runs
239 239 ==============
240 240
241 241 (duplicate the failing test to get predictable output)
242 242 $ cp test-failure.t test-failure-copy.t
243 243
244 244 $ $TESTDIR/run-tests.py --with-hg=`which hg` --jobs 2 test-failure*.t -n
245 245 !!
246 246 Failed test-failure*.t: output changed (glob)
247 247 Failed test-failure*.t: output changed (glob)
248 248 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
249 249 python hash seed: * (glob)
250 250 [1]
251 251
252 252 failures in parallel with --first should only print one failure
253 253 >>> f = open('test-nothing.t', 'w')
254 254 >>> f.write('foo\n' * 1024)
255 255 >>> f.write(' $ sleep 1')
256 256 $ $TESTDIR/run-tests.py --with-hg=`which hg` --jobs 2 --first
257 257
258 258 --- $TESTTMP/test-failure*.t (glob)
259 259 +++ $TESTTMP/test-failure*.t.err (glob)
260 260 @@ -1,4 +1,4 @@
261 261 $ echo babar
262 262 - rataxes
263 263 + babar
264 264 This is a noop statement so that
265 265 this test is still more bytes than success.
266 266
267 267 Failed test-failure*.t: output changed (glob)
268 268 Failed test-nothing.t: output changed
269 269 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
270 270 python hash seed: * (glob)
271 271 [1]
272 272
273 273
274 274 (delete the duplicated test file)
275 275 $ rm test-failure-copy.t test-nothing.t
276 276
277 277
278 278 Interactive run
279 279 ===============
280 280
281 281 (backup the failing test)
282 282 $ cp test-failure.t backup
283 283
284 284 Refuse the fix
285 285
286 286 $ echo 'n' | $TESTDIR/run-tests.py --with-hg=`which hg` -i
287 287
288 288 --- $TESTTMP/test-failure.t
289 289 +++ $TESTTMP/test-failure.t.err
290 290 @@ -1,4 +1,4 @@
291 291 $ echo babar
292 292 - rataxes
293 293 + babar
294 294 This is a noop statement so that
295 295 this test is still more bytes than success.
296 296 Accept this change? [n]
297 297 ERROR: test-failure.t output changed
298 298 !.
299 299 Failed test-failure.t: output changed
300 300 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
301 301 python hash seed: * (glob)
302 302 [1]
303 303
304 304 $ cat test-failure.t
305 305 $ echo babar
306 306 rataxes
307 307 This is a noop statement so that
308 308 this test is still more bytes than success.
309 309
310 310 Interactive with custom view
311 311
312 312 $ echo 'n' | $TESTDIR/run-tests.py --with-hg=`which hg` -i --view echo
313 313 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
314 314 Accept this change? [n]* (glob)
315 315 ERROR: test-failure.t output changed
316 316 !.
317 317 Failed test-failure.t: output changed
318 318 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
319 319 python hash seed: * (glob)
320 320 [1]
321 321
322 322 View the fix
323 323
324 324 $ echo 'y' | $TESTDIR/run-tests.py --with-hg=`which hg` --view echo
325 325 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
326 326
327 327 ERROR: test-failure.t output changed
328 328 !.
329 329 Failed test-failure.t: output changed
330 330 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
331 331 python hash seed: * (glob)
332 332 [1]
333 333
334 334 Accept the fix
335 335
336 336 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
337 337 $ echo " saved backup bundle to \$TESTTMP/foo.hg" >> test-failure.t
338 338 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
339 339 $ echo " saved backup bundle to \$TESTTMP/foo.hg (glob)" >> test-failure.t
340 340 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
341 341 $ echo " saved backup bundle to \$TESTTMP/*.hg (glob)" >> test-failure.t
342 342 $ echo 'y' | $TESTDIR/run-tests.py --with-hg=`which hg` -i 2>&1 | \
343 343 > sed -e 's,(glob)$,&<,g'
344 344
345 345 --- $TESTTMP/test-failure.t
346 346 +++ $TESTTMP/test-failure.t.err
347 347 @@ -1,9 +1,9 @@
348 348 $ echo babar
349 349 - rataxes
350 350 + babar
351 351 This is a noop statement so that
352 352 this test is still more bytes than success.
353 353 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
354 354 - saved backup bundle to $TESTTMP/foo.hg
355 355 + saved backup bundle to $TESTTMP/foo.hg (glob)<
356 356 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
357 357 saved backup bundle to $TESTTMP/foo.hg (glob)<
358 358 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
359 359 Accept this change? [n] ..
360 360 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
361 361
362 362 $ sed -e 's,(glob)$,&<,g' test-failure.t
363 363 $ echo babar
364 364 babar
365 365 This is a noop statement so that
366 366 this test is still more bytes than success.
367 367 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
368 368 saved backup bundle to $TESTTMP/foo.hg (glob)<
369 369 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
370 370 saved backup bundle to $TESTTMP/foo.hg (glob)<
371 371 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
372 372 saved backup bundle to $TESTTMP/*.hg (glob)<
373 373
374 374 (reinstall)
375 375 $ mv backup test-failure.t
376 376
377 377 No Diff
378 378 ===============
379 379
380 380 $ $TESTDIR/run-tests.py --with-hg=`which hg` --nodiff
381 381 !.
382 382 Failed test-failure.t: output changed
383 383 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
384 384 python hash seed: * (glob)
385 385 [1]
386 386
387 387 test for --time
388 388 ==================
389 389
390 390 $ $TESTDIR/run-tests.py --with-hg=`which hg` test-success.t --time
391 391 .
392 392 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
393 393 # Producing time report
394 394 cuser csys real Test
395 395 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
396 396
397 397 test for --time with --job enabled
398 398 ====================================
399 399
400 400 $ $TESTDIR/run-tests.py --with-hg=`which hg` test-success.t --time --jobs 2
401 401 .
402 402 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
403 403 # Producing time report
404 404 cuser csys real Test
405 405 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
406 406
407 407 Skips
408 408 ================
409 409 $ cat > test-skip.t <<EOF
410 410 > $ echo xyzzy
411 411 > #require false
412 412 > EOF
413 413 $ $TESTDIR/run-tests.py --with-hg=`which hg` --nodiff
414 414 !.s
415 415 Skipped test-skip.t: skipped
416 416 Failed test-failure.t: output changed
417 417 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
418 418 python hash seed: * (glob)
419 419 [1]
420 420
421 421 $ $TESTDIR/run-tests.py --with-hg=`which hg` --keyword xyzzy
422 422 .s
423 423 Skipped test-skip.t: skipped
424 424 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
425 425
426 426 Skips with xml
427 427 $ $TESTDIR/run-tests.py --with-hg=`which hg` --keyword xyzzy \
428 428 > --xunit=xunit.xml
429 429 .s
430 430 Skipped test-skip.t: skipped
431 431 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
432 432 $ cat xunit.xml
433 433 <?xml version="1.0" encoding="utf-8"?>
434 434 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
435 435 <testcase name="test-success.t" time="*"/> (glob)
436 436 </testsuite>
437 437
438 438 Missing skips or blacklisted skips don't count as executed:
439 439 $ echo test-failure.t > blacklist
440 440 $ $TESTDIR/run-tests.py --with-hg=`which hg` --blacklist=blacklist \
441 441 > test-failure.t test-bogus.t
442 442 ss
443 443 Skipped test-bogus.t: Doesn't exist
444 444 Skipped test-failure.t: blacklisted
445 445 # Ran 0 tests, 2 skipped, 0 warned, 0 failed.
446 446
447 447 #if json
448 448
449 449 test for --json
450 450 ==================
451 451
452 452 $ $TESTDIR/run-tests.py --with-hg=`which hg` --json
453 453
454 454 --- $TESTTMP/test-failure.t
455 455 +++ $TESTTMP/test-failure.t.err
456 456 @@ -1,4 +1,4 @@
457 457 $ echo babar
458 458 - rataxes
459 459 + babar
460 460 This is a noop statement so that
461 461 this test is still more bytes than success.
462 462
463 463 ERROR: test-failure.t output changed
464 464 !.s
465 465 Skipped test-skip.t: skipped
466 466 Failed test-failure.t: output changed
467 467 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
468 468 python hash seed: * (glob)
469 469 [1]
470 470
471 471 $ cat report.json
472 472 testreport ={
473 473 "test-failure.t": [\{] (re)
474 474 "csys": "\s*[\d\.]{4,5}", ? (re)
475 475 "cuser": "\s*[\d\.]{4,5}", ? (re)
476 476 "result": "failure", ? (re)
477 477 "time": "\s*[\d\.]{4,5}" (re)
478 478 }, ? (re)
479 479 "test-skip.t": {
480 480 "csys": "\s*[\d\.]{4,5}", ? (re)
481 481 "cuser": "\s*[\d\.]{4,5}", ? (re)
482 482 "result": "skip", ? (re)
483 483 "time": "\s*[\d\.]{4,5}" (re)
484 484 }, ? (re)
485 485 "test-success.t": [\{] (re)
486 486 "csys": "\s*[\d\.]{4,5}", ? (re)
487 487 "cuser": "\s*[\d\.]{4,5}", ? (re)
488 488 "result": "success", ? (re)
489 489 "time": "\s*[\d\.]{4,5}" (re)
490 490 }
491 491 } (no-eol)
492 492
493 493 #endif
494
495 backslash on end of line with glob matching is handled properly
496
497 $ cat > test-glob-backslash.t << EOF
498 > $ echo 'foo bar \\'
499 > foo * \ (glob)
500 > EOF
501
502 $ $TESTDIR/run-tests.py --with-hg=`which hg` test-glob-backslash.t
503 .
504 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
505
506 $ rm -f test-glob-backslash.t
507
General Comments 0
You need to be logged in to leave comments. Login now