##// END OF EJS Templates
run-tests: fix crash when --json and --blacklist are both used (issue5050)...
Laurent Charignon -
r27927:0de4dfc9 stable
parent child Browse files
Show More
@@ -1,2375 +1,2381 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import print_function
47 47
48 48 from distutils import version
49 49 import difflib
50 50 import errno
51 51 import optparse
52 52 import os
53 53 import shutil
54 54 import subprocess
55 55 import signal
56 56 import socket
57 57 import sys
58 58 import tempfile
59 59 import time
60 60 import random
61 61 import re
62 62 import threading
63 63 import killdaemons as killmod
64 64 try:
65 65 import Queue as queue
66 66 except ImportError:
67 67 import queue
68 68 from xml.dom import minidom
69 69 import unittest
70 70
71 71 osenvironb = getattr(os, 'environb', os.environ)
72 72
73 73 try:
74 74 import json
75 75 except ImportError:
76 76 try:
77 77 import simplejson as json
78 78 except ImportError:
79 79 json = None
80 80
81 81 processlock = threading.Lock()
82 82
83 83 if sys.version_info > (3, 5, 0):
84 84 PYTHON3 = True
85 85 xrange = range # we use xrange in one place, and we'd rather not use range
86 86 def _bytespath(p):
87 87 return p.encode('utf-8')
88 88
89 89 def _strpath(p):
90 90 return p.decode('utf-8')
91 91
92 92 elif sys.version_info >= (3, 0, 0):
93 93 print('%s is only supported on Python 3.5+ and 2.6-2.7, not %s' %
94 94 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
95 95 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
96 96 else:
97 97 PYTHON3 = False
98 98
99 99 # In python 2.x, path operations are generally done using
100 100 # bytestrings by default, so we don't have to do any extra
101 101 # fiddling there. We define the wrapper functions anyway just to
102 102 # help keep code consistent between platforms.
103 103 def _bytespath(p):
104 104 return p
105 105
106 106 _strpath = _bytespath
107 107
108 108 # For Windows support
109 109 wifexited = getattr(os, "WIFEXITED", lambda x: False)
110 110
111 111 def checkportisavailable(port):
112 112 """return true if a port seems free to bind on localhost"""
113 113 try:
114 114 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
115 115 s.bind(('localhost', port))
116 116 s.close()
117 117 return True
118 118 except socket.error as exc:
119 119 if not exc.errno == errno.EADDRINUSE:
120 120 raise
121 121 return False
122 122
123 123 closefds = os.name == 'posix'
124 124 def Popen4(cmd, wd, timeout, env=None):
125 125 processlock.acquire()
126 126 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
127 127 close_fds=closefds,
128 128 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
129 129 stderr=subprocess.STDOUT)
130 130 processlock.release()
131 131
132 132 p.fromchild = p.stdout
133 133 p.tochild = p.stdin
134 134 p.childerr = p.stderr
135 135
136 136 p.timeout = False
137 137 if timeout:
138 138 def t():
139 139 start = time.time()
140 140 while time.time() - start < timeout and p.returncode is None:
141 141 time.sleep(.1)
142 142 p.timeout = True
143 143 if p.returncode is None:
144 144 terminate(p)
145 145 threading.Thread(target=t).start()
146 146
147 147 return p
148 148
149 149 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
150 150 IMPL_PATH = b'PYTHONPATH'
151 151 if 'java' in sys.platform:
152 152 IMPL_PATH = b'JYTHONPATH'
153 153
154 154 defaults = {
155 155 'jobs': ('HGTEST_JOBS', 1),
156 156 'timeout': ('HGTEST_TIMEOUT', 180),
157 157 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
158 158 'port': ('HGTEST_PORT', 20059),
159 159 'shell': ('HGTEST_SHELL', 'sh'),
160 160 }
161 161
162 162 def parselistfiles(files, listtype, warn=True):
163 163 entries = dict()
164 164 for filename in files:
165 165 try:
166 166 path = os.path.expanduser(os.path.expandvars(filename))
167 167 f = open(path, "rb")
168 168 except IOError as err:
169 169 if err.errno != errno.ENOENT:
170 170 raise
171 171 if warn:
172 172 print("warning: no such %s file: %s" % (listtype, filename))
173 173 continue
174 174
175 175 for line in f.readlines():
176 176 line = line.split(b'#', 1)[0].strip()
177 177 if line:
178 178 entries[line] = filename
179 179
180 180 f.close()
181 181 return entries
182 182
183 183 def getparser():
184 184 """Obtain the OptionParser used by the CLI."""
185 185 parser = optparse.OptionParser("%prog [options] [tests]")
186 186
187 187 # keep these sorted
188 188 parser.add_option("--blacklist", action="append",
189 189 help="skip tests listed in the specified blacklist file")
190 190 parser.add_option("--whitelist", action="append",
191 191 help="always run tests listed in the specified whitelist file")
192 192 parser.add_option("--changed", type="string",
193 193 help="run tests that are changed in parent rev or working directory")
194 194 parser.add_option("-C", "--annotate", action="store_true",
195 195 help="output files annotated with coverage")
196 196 parser.add_option("-c", "--cover", action="store_true",
197 197 help="print a test coverage report")
198 198 parser.add_option("-d", "--debug", action="store_true",
199 199 help="debug mode: write output of test scripts to console"
200 200 " rather than capturing and diffing it (disables timeout)")
201 201 parser.add_option("-f", "--first", action="store_true",
202 202 help="exit on the first test failure")
203 203 parser.add_option("-H", "--htmlcov", action="store_true",
204 204 help="create an HTML report of the coverage of the files")
205 205 parser.add_option("-i", "--interactive", action="store_true",
206 206 help="prompt to accept changed output")
207 207 parser.add_option("-j", "--jobs", type="int",
208 208 help="number of jobs to run in parallel"
209 209 " (default: $%s or %d)" % defaults['jobs'])
210 210 parser.add_option("--keep-tmpdir", action="store_true",
211 211 help="keep temporary directory after running tests")
212 212 parser.add_option("-k", "--keywords",
213 213 help="run tests matching keywords")
214 214 parser.add_option("-l", "--local", action="store_true",
215 215 help="shortcut for --with-hg=<testdir>/../hg")
216 216 parser.add_option("--loop", action="store_true",
217 217 help="loop tests repeatedly")
218 218 parser.add_option("--runs-per-test", type="int", dest="runs_per_test",
219 219 help="run each test N times (default=1)", default=1)
220 220 parser.add_option("-n", "--nodiff", action="store_true",
221 221 help="skip showing test changes")
222 222 parser.add_option("-p", "--port", type="int",
223 223 help="port on which servers should listen"
224 224 " (default: $%s or %d)" % defaults['port'])
225 225 parser.add_option("--compiler", type="string",
226 226 help="compiler to build with")
227 227 parser.add_option("--pure", action="store_true",
228 228 help="use pure Python code instead of C extensions")
229 229 parser.add_option("-R", "--restart", action="store_true",
230 230 help="restart at last error")
231 231 parser.add_option("-r", "--retest", action="store_true",
232 232 help="retest failed tests")
233 233 parser.add_option("-S", "--noskips", action="store_true",
234 234 help="don't report skip tests verbosely")
235 235 parser.add_option("--shell", type="string",
236 236 help="shell to use (default: $%s or %s)" % defaults['shell'])
237 237 parser.add_option("-t", "--timeout", type="int",
238 238 help="kill errant tests after TIMEOUT seconds"
239 239 " (default: $%s or %d)" % defaults['timeout'])
240 240 parser.add_option("--slowtimeout", type="int",
241 241 help="kill errant slow tests after SLOWTIMEOUT seconds"
242 242 " (default: $%s or %d)" % defaults['slowtimeout'])
243 243 parser.add_option("--time", action="store_true",
244 244 help="time how long each test takes")
245 245 parser.add_option("--json", action="store_true",
246 246 help="store test result data in 'report.json' file")
247 247 parser.add_option("--tmpdir", type="string",
248 248 help="run tests in the given temporary directory"
249 249 " (implies --keep-tmpdir)")
250 250 parser.add_option("-v", "--verbose", action="store_true",
251 251 help="output verbose messages")
252 252 parser.add_option("--xunit", type="string",
253 253 help="record xunit results at specified path")
254 254 parser.add_option("--view", type="string",
255 255 help="external diff viewer")
256 256 parser.add_option("--with-hg", type="string",
257 257 metavar="HG",
258 258 help="test using specified hg script rather than a "
259 259 "temporary installation")
260 260 parser.add_option("-3", "--py3k-warnings", action="store_true",
261 261 help="enable Py3k warnings on Python 2.6+")
262 262 parser.add_option('--extra-config-opt', action="append",
263 263 help='set the given config opt in the test hgrc')
264 264 parser.add_option('--random', action="store_true",
265 265 help='run tests in random order')
266 266 parser.add_option('--profile-runner', action='store_true',
267 267 help='run statprof on run-tests')
268 268 parser.add_option('--allow-slow-tests', action='store_true',
269 269 help='allow extremely slow tests')
270 270 parser.add_option('--showchannels', action='store_true',
271 271 help='show scheduling channels')
272 272
273 273 for option, (envvar, default) in defaults.items():
274 274 defaults[option] = type(default)(os.environ.get(envvar, default))
275 275 parser.set_defaults(**defaults)
276 276
277 277 return parser
278 278
279 279 def parseargs(args, parser):
280 280 """Parse arguments with our OptionParser and validate results."""
281 281 (options, args) = parser.parse_args(args)
282 282
283 283 # jython is always pure
284 284 if 'java' in sys.platform or '__pypy__' in sys.modules:
285 285 options.pure = True
286 286
287 287 if options.with_hg:
288 288 options.with_hg = os.path.expanduser(options.with_hg)
289 289 if not (os.path.isfile(options.with_hg) and
290 290 os.access(options.with_hg, os.X_OK)):
291 291 parser.error('--with-hg must specify an executable hg script')
292 292 if not os.path.basename(options.with_hg) == 'hg':
293 293 sys.stderr.write('warning: --with-hg should specify an hg script\n')
294 294 if options.local:
295 295 testdir = os.path.dirname(_bytespath(os.path.realpath(sys.argv[0])))
296 296 hgbin = os.path.join(os.path.dirname(testdir), b'hg')
297 297 if os.name != 'nt' and not os.access(hgbin, os.X_OK):
298 298 parser.error('--local specified, but %r not found or not executable'
299 299 % hgbin)
300 300 options.with_hg = hgbin
301 301
302 302 options.anycoverage = options.cover or options.annotate or options.htmlcov
303 303 if options.anycoverage:
304 304 try:
305 305 import coverage
306 306 covver = version.StrictVersion(coverage.__version__).version
307 307 if covver < (3, 3):
308 308 parser.error('coverage options require coverage 3.3 or later')
309 309 except ImportError:
310 310 parser.error('coverage options now require the coverage package')
311 311
312 312 if options.anycoverage and options.local:
313 313 # this needs some path mangling somewhere, I guess
314 314 parser.error("sorry, coverage options do not work when --local "
315 315 "is specified")
316 316
317 317 if options.anycoverage and options.with_hg:
318 318 parser.error("sorry, coverage options do not work when --with-hg "
319 319 "is specified")
320 320
321 321 global verbose
322 322 if options.verbose:
323 323 verbose = ''
324 324
325 325 if options.tmpdir:
326 326 options.tmpdir = os.path.expanduser(options.tmpdir)
327 327
328 328 if options.jobs < 1:
329 329 parser.error('--jobs must be positive')
330 330 if options.interactive and options.debug:
331 331 parser.error("-i/--interactive and -d/--debug are incompatible")
332 332 if options.debug:
333 333 if options.timeout != defaults['timeout']:
334 334 sys.stderr.write(
335 335 'warning: --timeout option ignored with --debug\n')
336 336 if options.slowtimeout != defaults['slowtimeout']:
337 337 sys.stderr.write(
338 338 'warning: --slowtimeout option ignored with --debug\n')
339 339 options.timeout = 0
340 340 options.slowtimeout = 0
341 341 if options.py3k_warnings:
342 342 if PYTHON3:
343 343 parser.error(
344 344 '--py3k-warnings can only be used on Python 2.6 and 2.7')
345 345 if options.blacklist:
346 346 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
347 347 if options.whitelist:
348 348 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
349 349 else:
350 350 options.whitelisted = {}
351 351
352 352 if options.showchannels:
353 353 options.nodiff = True
354 354
355 355 return (options, args)
356 356
357 357 def rename(src, dst):
358 358 """Like os.rename(), trade atomicity and opened files friendliness
359 359 for existing destination support.
360 360 """
361 361 shutil.copy(src, dst)
362 362 os.remove(src)
363 363
364 364 _unified_diff = difflib.unified_diff
365 365 if PYTHON3:
366 366 import functools
367 367 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
368 368
369 369 def getdiff(expected, output, ref, err):
370 370 servefail = False
371 371 lines = []
372 372 for line in _unified_diff(expected, output, ref, err):
373 373 if line.startswith(b'+++') or line.startswith(b'---'):
374 374 line = line.replace(b'\\', b'/')
375 375 if line.endswith(b' \n'):
376 376 line = line[:-2] + b'\n'
377 377 lines.append(line)
378 378 if not servefail and line.startswith(
379 379 b'+ abort: child process failed to start'):
380 380 servefail = True
381 381
382 382 return servefail, lines
383 383
384 384 verbose = False
385 385 def vlog(*msg):
386 386 """Log only when in verbose mode."""
387 387 if verbose is False:
388 388 return
389 389
390 390 return log(*msg)
391 391
392 392 # Bytes that break XML even in a CDATA block: control characters 0-31
393 393 # sans \t, \n and \r
394 394 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
395 395
396 396 def cdatasafe(data):
397 397 """Make a string safe to include in a CDATA block.
398 398
399 399 Certain control characters are illegal in a CDATA block, and
400 400 there's no way to include a ]]> in a CDATA either. This function
401 401 replaces illegal bytes with ? and adds a space between the ]] so
402 402 that it won't break the CDATA block.
403 403 """
404 404 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
405 405
406 406 def log(*msg):
407 407 """Log something to stdout.
408 408
409 409 Arguments are strings to print.
410 410 """
411 411 with iolock:
412 412 if verbose:
413 413 print(verbose, end=' ')
414 414 for m in msg:
415 415 print(m, end=' ')
416 416 print()
417 417 sys.stdout.flush()
418 418
419 419 def terminate(proc):
420 420 """Terminate subprocess (with fallback for Python versions < 2.6)"""
421 421 vlog('# Terminating process %d' % proc.pid)
422 422 try:
423 423 getattr(proc, 'terminate', lambda : os.kill(proc.pid, signal.SIGTERM))()
424 424 except OSError:
425 425 pass
426 426
427 427 def killdaemons(pidfile):
428 428 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
429 429 logfn=vlog)
430 430
431 431 class Test(unittest.TestCase):
432 432 """Encapsulates a single, runnable test.
433 433
434 434 While this class conforms to the unittest.TestCase API, it differs in that
435 435 instances need to be instantiated manually. (Typically, unittest.TestCase
436 436 classes are instantiated automatically by scanning modules.)
437 437 """
438 438
439 439 # Status code reserved for skipped tests (used by hghave).
440 440 SKIPPED_STATUS = 80
441 441
442 442 def __init__(self, path, tmpdir, keeptmpdir=False,
443 443 debug=False,
444 444 timeout=defaults['timeout'],
445 445 startport=defaults['port'], extraconfigopts=None,
446 446 py3kwarnings=False, shell=None,
447 447 slowtimeout=defaults['slowtimeout']):
448 448 """Create a test from parameters.
449 449
450 450 path is the full path to the file defining the test.
451 451
452 452 tmpdir is the main temporary directory to use for this test.
453 453
454 454 keeptmpdir determines whether to keep the test's temporary directory
455 455 after execution. It defaults to removal (False).
456 456
457 457 debug mode will make the test execute verbosely, with unfiltered
458 458 output.
459 459
460 460 timeout controls the maximum run time of the test. It is ignored when
461 461 debug is True. See slowtimeout for tests with #require slow.
462 462
463 463 slowtimeout overrides timeout if the test has #require slow.
464 464
465 465 startport controls the starting port number to use for this test. Each
466 466 test will reserve 3 port numbers for execution. It is the caller's
467 467 responsibility to allocate a non-overlapping port range to Test
468 468 instances.
469 469
470 470 extraconfigopts is an iterable of extra hgrc config options. Values
471 471 must have the form "key=value" (something understood by hgrc). Values
472 472 of the form "foo.key=value" will result in "[foo] key=value".
473 473
474 474 py3kwarnings enables Py3k warnings.
475 475
476 476 shell is the shell to execute tests in.
477 477 """
478 478 self.path = path
479 479 self.bname = os.path.basename(path)
480 480 self.name = _strpath(self.bname)
481 481 self._testdir = os.path.dirname(path)
482 482 self.errpath = os.path.join(self._testdir, b'%s.err' % self.bname)
483 483
484 484 self._threadtmp = tmpdir
485 485 self._keeptmpdir = keeptmpdir
486 486 self._debug = debug
487 487 self._timeout = timeout
488 488 self._slowtimeout = slowtimeout
489 489 self._startport = startport
490 490 self._extraconfigopts = extraconfigopts or []
491 491 self._py3kwarnings = py3kwarnings
492 492 self._shell = _bytespath(shell)
493 493
494 494 self._aborted = False
495 495 self._daemonpids = []
496 496 self._finished = None
497 497 self._ret = None
498 498 self._out = None
499 499 self._skipped = None
500 500 self._testtmp = None
501 501
502 502 # If we're not in --debug mode and reference output file exists,
503 503 # check test output against it.
504 504 if debug:
505 505 self._refout = None # to match "out is None"
506 506 elif os.path.exists(self.refpath):
507 507 f = open(self.refpath, 'rb')
508 508 self._refout = f.read().splitlines(True)
509 509 f.close()
510 510 else:
511 511 self._refout = []
512 512
513 513 # needed to get base class __repr__ running
514 514 @property
515 515 def _testMethodName(self):
516 516 return self.name
517 517
518 518 def __str__(self):
519 519 return self.name
520 520
521 521 def shortDescription(self):
522 522 return self.name
523 523
524 524 def setUp(self):
525 525 """Tasks to perform before run()."""
526 526 self._finished = False
527 527 self._ret = None
528 528 self._out = None
529 529 self._skipped = None
530 530
531 531 try:
532 532 os.mkdir(self._threadtmp)
533 533 except OSError as e:
534 534 if e.errno != errno.EEXIST:
535 535 raise
536 536
537 537 self._testtmp = os.path.join(self._threadtmp,
538 538 os.path.basename(self.path))
539 539 os.mkdir(self._testtmp)
540 540
541 541 # Remove any previous output files.
542 542 if os.path.exists(self.errpath):
543 543 try:
544 544 os.remove(self.errpath)
545 545 except OSError as e:
546 546 # We might have raced another test to clean up a .err
547 547 # file, so ignore ENOENT when removing a previous .err
548 548 # file.
549 549 if e.errno != errno.ENOENT:
550 550 raise
551 551
552 552 def run(self, result):
553 553 """Run this test and report results against a TestResult instance."""
554 554 # This function is extremely similar to unittest.TestCase.run(). Once
555 555 # we require Python 2.7 (or at least its version of unittest), this
556 556 # function can largely go away.
557 557 self._result = result
558 558 result.startTest(self)
559 559 try:
560 560 try:
561 561 self.setUp()
562 562 except (KeyboardInterrupt, SystemExit):
563 563 self._aborted = True
564 564 raise
565 565 except Exception:
566 566 result.addError(self, sys.exc_info())
567 567 return
568 568
569 569 success = False
570 570 try:
571 571 self.runTest()
572 572 except KeyboardInterrupt:
573 573 self._aborted = True
574 574 raise
575 575 except SkipTest as e:
576 576 result.addSkip(self, str(e))
577 577 # The base class will have already counted this as a
578 578 # test we "ran", but we want to exclude skipped tests
579 579 # from those we count towards those run.
580 580 result.testsRun -= 1
581 581 except IgnoreTest as e:
582 582 result.addIgnore(self, str(e))
583 583 # As with skips, ignores also should be excluded from
584 584 # the number of tests executed.
585 585 result.testsRun -= 1
586 586 except WarnTest as e:
587 587 result.addWarn(self, str(e))
588 588 except ReportedTest as e:
589 589 pass
590 590 except self.failureException as e:
591 591 # This differs from unittest in that we don't capture
592 592 # the stack trace. This is for historical reasons and
593 593 # this decision could be revisited in the future,
594 594 # especially for PythonTest instances.
595 595 if result.addFailure(self, str(e)):
596 596 success = True
597 597 except Exception:
598 598 result.addError(self, sys.exc_info())
599 599 else:
600 600 success = True
601 601
602 602 try:
603 603 self.tearDown()
604 604 except (KeyboardInterrupt, SystemExit):
605 605 self._aborted = True
606 606 raise
607 607 except Exception:
608 608 result.addError(self, sys.exc_info())
609 609 success = False
610 610
611 611 if success:
612 612 result.addSuccess(self)
613 613 finally:
614 614 result.stopTest(self, interrupted=self._aborted)
615 615
616 616 def runTest(self):
617 617 """Run this test instance.
618 618
619 619 This will return a tuple describing the result of the test.
620 620 """
621 621 env = self._getenv()
622 622 self._daemonpids.append(env['DAEMON_PIDS'])
623 623 self._createhgrc(env['HGRCPATH'])
624 624
625 625 vlog('# Test', self.name)
626 626
627 627 ret, out = self._run(env)
628 628 self._finished = True
629 629 self._ret = ret
630 630 self._out = out
631 631
632 632 def describe(ret):
633 633 if ret < 0:
634 634 return 'killed by signal: %d' % -ret
635 635 return 'returned error code %d' % ret
636 636
637 637 self._skipped = False
638 638
639 639 if ret == self.SKIPPED_STATUS:
640 640 if out is None: # Debug mode, nothing to parse.
641 641 missing = ['unknown']
642 642 failed = None
643 643 else:
644 644 missing, failed = TTest.parsehghaveoutput(out)
645 645
646 646 if not missing:
647 647 missing = ['skipped']
648 648
649 649 if failed:
650 650 self.fail('hg have failed checking for %s' % failed[-1])
651 651 else:
652 652 self._skipped = True
653 653 raise SkipTest(missing[-1])
654 654 elif ret == 'timeout':
655 655 self.fail('timed out')
656 656 elif ret is False:
657 657 raise WarnTest('no result code from test')
658 658 elif out != self._refout:
659 659 # Diff generation may rely on written .err file.
660 660 if (ret != 0 or out != self._refout) and not self._skipped \
661 661 and not self._debug:
662 662 f = open(self.errpath, 'wb')
663 663 for line in out:
664 664 f.write(line)
665 665 f.close()
666 666
667 667 # The result object handles diff calculation for us.
668 668 if self._result.addOutputMismatch(self, ret, out, self._refout):
669 669 # change was accepted, skip failing
670 670 return
671 671
672 672 if ret:
673 673 msg = 'output changed and ' + describe(ret)
674 674 else:
675 675 msg = 'output changed'
676 676
677 677 self.fail(msg)
678 678 elif ret:
679 679 self.fail(describe(ret))
680 680
681 681 def tearDown(self):
682 682 """Tasks to perform after run()."""
683 683 for entry in self._daemonpids:
684 684 killdaemons(entry)
685 685 self._daemonpids = []
686 686
687 687 if self._keeptmpdir:
688 688 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
689 689 (self._testtmp, self._threadtmp))
690 690 else:
691 691 shutil.rmtree(self._testtmp, True)
692 692 shutil.rmtree(self._threadtmp, True)
693 693
694 694 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
695 695 and not self._debug and self._out:
696 696 f = open(self.errpath, 'wb')
697 697 for line in self._out:
698 698 f.write(line)
699 699 f.close()
700 700
701 701 vlog("# Ret was:", self._ret, '(%s)' % self.name)
702 702
703 703 def _run(self, env):
704 704 # This should be implemented in child classes to run tests.
705 705 raise SkipTest('unknown test type')
706 706
707 707 def abort(self):
708 708 """Terminate execution of this test."""
709 709 self._aborted = True
710 710
711 711 def _getreplacements(self):
712 712 """Obtain a mapping of text replacements to apply to test output.
713 713
714 714 Test output needs to be normalized so it can be compared to expected
715 715 output. This function defines how some of that normalization will
716 716 occur.
717 717 """
718 718 r = [
719 719 (br':%d\b' % self._startport, b':$HGPORT'),
720 720 (br':%d\b' % (self._startport + 1), b':$HGPORT1'),
721 721 (br':%d\b' % (self._startport + 2), b':$HGPORT2'),
722 722 (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
723 723 br'\1 (glob)'),
724 724 ]
725 725
726 726 if os.name == 'nt':
727 727 r.append(
728 728 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
729 729 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
730 730 for c in self._testtmp), b'$TESTTMP'))
731 731 else:
732 732 r.append((re.escape(self._testtmp), b'$TESTTMP'))
733 733
734 734 return r
735 735
736 736 def _getenv(self):
737 737 """Obtain environment variables to use during test execution."""
738 738 env = os.environ.copy()
739 739 env['TESTTMP'] = self._testtmp
740 740 env['HOME'] = self._testtmp
741 741 env["HGPORT"] = str(self._startport)
742 742 env["HGPORT1"] = str(self._startport + 1)
743 743 env["HGPORT2"] = str(self._startport + 2)
744 744 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
745 745 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
746 746 env["HGEDITOR"] = ('"' + sys.executable + '"'
747 747 + ' -c "import sys; sys.exit(0)"')
748 748 env["HGMERGE"] = "internal:merge"
749 749 env["HGUSER"] = "test"
750 750 env["HGENCODING"] = "ascii"
751 751 env["HGENCODINGMODE"] = "strict"
752 752
753 753 # Reset some environment variables to well-known values so that
754 754 # the tests produce repeatable output.
755 755 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
756 756 env['TZ'] = 'GMT'
757 757 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
758 758 env['COLUMNS'] = '80'
759 759 env['TERM'] = 'xterm'
760 760
761 761 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
762 762 'NO_PROXY').split():
763 763 if k in env:
764 764 del env[k]
765 765
766 766 # unset env related to hooks
767 767 for k in env.keys():
768 768 if k.startswith('HG_'):
769 769 del env[k]
770 770
771 771 return env
772 772
773 773 def _createhgrc(self, path):
774 774 """Create an hgrc file for this test."""
775 775 hgrc = open(path, 'wb')
776 776 hgrc.write(b'[ui]\n')
777 777 hgrc.write(b'slash = True\n')
778 778 hgrc.write(b'interactive = False\n')
779 779 hgrc.write(b'mergemarkers = detailed\n')
780 780 hgrc.write(b'promptecho = True\n')
781 781 hgrc.write(b'[defaults]\n')
782 782 hgrc.write(b'backout = -d "0 0"\n')
783 783 hgrc.write(b'commit = -d "0 0"\n')
784 784 hgrc.write(b'shelve = --date "0 0"\n')
785 785 hgrc.write(b'tag = -d "0 0"\n')
786 786 hgrc.write(b'[devel]\n')
787 787 hgrc.write(b'all-warnings = true\n')
788 788 hgrc.write(b'[largefiles]\n')
789 789 hgrc.write(b'usercache = %s\n' %
790 790 (os.path.join(self._testtmp, b'.cache/largefiles')))
791 791
792 792 for opt in self._extraconfigopts:
793 793 section, key = opt.split('.', 1)
794 794 assert '=' in key, ('extra config opt %s must '
795 795 'have an = for assignment' % opt)
796 796 hgrc.write(b'[%s]\n%s\n' % (section, key))
797 797 hgrc.close()
798 798
799 799 def fail(self, msg):
800 800 # unittest differentiates between errored and failed.
801 801 # Failed is denoted by AssertionError (by default at least).
802 802 raise AssertionError(msg)
803 803
804 804 def _runcommand(self, cmd, env, normalizenewlines=False):
805 805 """Run command in a sub-process, capturing the output (stdout and
806 806 stderr).
807 807
808 808 Return a tuple (exitcode, output). output is None in debug mode.
809 809 """
810 810 if self._debug:
811 811 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
812 812 env=env)
813 813 ret = proc.wait()
814 814 return (ret, None)
815 815
816 816 proc = Popen4(cmd, self._testtmp, self._timeout, env)
817 817 def cleanup():
818 818 terminate(proc)
819 819 ret = proc.wait()
820 820 if ret == 0:
821 821 ret = signal.SIGTERM << 8
822 822 killdaemons(env['DAEMON_PIDS'])
823 823 return ret
824 824
825 825 output = ''
826 826 proc.tochild.close()
827 827
828 828 try:
829 829 output = proc.fromchild.read()
830 830 except KeyboardInterrupt:
831 831 vlog('# Handling keyboard interrupt')
832 832 cleanup()
833 833 raise
834 834
835 835 ret = proc.wait()
836 836 if wifexited(ret):
837 837 ret = os.WEXITSTATUS(ret)
838 838
839 839 if proc.timeout:
840 840 ret = 'timeout'
841 841
842 842 if ret:
843 843 killdaemons(env['DAEMON_PIDS'])
844 844
845 845 for s, r in self._getreplacements():
846 846 output = re.sub(s, r, output)
847 847
848 848 if normalizenewlines:
849 849 output = output.replace('\r\n', '\n')
850 850
851 851 return ret, output.splitlines(True)
852 852
853 853 class PythonTest(Test):
854 854 """A Python-based test."""
855 855
856 856 @property
857 857 def refpath(self):
858 858 return os.path.join(self._testdir, b'%s.out' % self.bname)
859 859
860 860 def _run(self, env):
861 861 py3kswitch = self._py3kwarnings and b' -3' or b''
862 862 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
863 863 vlog("# Running", cmd)
864 864 normalizenewlines = os.name == 'nt'
865 865 result = self._runcommand(cmd, env,
866 866 normalizenewlines=normalizenewlines)
867 867 if self._aborted:
868 868 raise KeyboardInterrupt()
869 869
870 870 return result
871 871
872 872 # This script may want to drop globs from lines matching these patterns on
873 873 # Windows, but check-code.py wants a glob on these lines unconditionally. Don't
874 874 # warn if that is the case for anything matching these lines.
875 875 checkcodeglobpats = [
876 876 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
877 877 re.compile(br'^moving \S+/.*[^)]$'),
878 878 re.compile(br'^pulling from \$TESTTMP/.*[^)]$')
879 879 ]
880 880
881 881 bchr = chr
882 882 if PYTHON3:
883 883 bchr = lambda x: bytes([x])
884 884
885 885 class TTest(Test):
886 886 """A "t test" is a test backed by a .t file."""
887 887
888 888 SKIPPED_PREFIX = 'skipped: '
889 889 FAILED_PREFIX = 'hghave check failed: '
890 890 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
891 891
892 892 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
893 893 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
894 894 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
895 895
896 896 @property
897 897 def refpath(self):
898 898 return os.path.join(self._testdir, self.bname)
899 899
900 900 def _run(self, env):
901 901 f = open(self.path, 'rb')
902 902 lines = f.readlines()
903 903 f.close()
904 904
905 905 salt, script, after, expected = self._parsetest(lines)
906 906
907 907 # Write out the generated script.
908 908 fname = b'%s.sh' % self._testtmp
909 909 f = open(fname, 'wb')
910 910 for l in script:
911 911 f.write(l)
912 912 f.close()
913 913
914 914 cmd = b'%s "%s"' % (self._shell, fname)
915 915 vlog("# Running", cmd)
916 916
917 917 exitcode, output = self._runcommand(cmd, env)
918 918
919 919 if self._aborted:
920 920 raise KeyboardInterrupt()
921 921
922 922 # Do not merge output if skipped. Return hghave message instead.
923 923 # Similarly, with --debug, output is None.
924 924 if exitcode == self.SKIPPED_STATUS or output is None:
925 925 return exitcode, output
926 926
927 927 return self._processoutput(exitcode, output, salt, after, expected)
928 928
929 929 def _hghave(self, reqs):
930 930 # TODO do something smarter when all other uses of hghave are gone.
931 931 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
932 932 tdir = runtestdir.replace(b'\\', b'/')
933 933 proc = Popen4(b'%s -c "%s/hghave %s"' %
934 934 (self._shell, tdir, b' '.join(reqs)),
935 935 self._testtmp, 0, self._getenv())
936 936 stdout, stderr = proc.communicate()
937 937 ret = proc.wait()
938 938 if wifexited(ret):
939 939 ret = os.WEXITSTATUS(ret)
940 940 if ret == 2:
941 941 print(stdout)
942 942 sys.exit(1)
943 943
944 944 if ret != 0:
945 945 return False, stdout
946 946
947 947 if 'slow' in reqs:
948 948 self._timeout = self._slowtimeout
949 949 return True, None
950 950
951 951 def _parsetest(self, lines):
952 952 # We generate a shell script which outputs unique markers to line
953 953 # up script results with our source. These markers include input
954 954 # line number and the last return code.
955 955 salt = b"SALT%d" % time.time()
956 956 def addsalt(line, inpython):
957 957 if inpython:
958 958 script.append(b'%s %d 0\n' % (salt, line))
959 959 else:
960 960 script.append(b'echo %s %d $?\n' % (salt, line))
961 961
962 962 script = []
963 963
964 964 # After we run the shell script, we re-unify the script output
965 965 # with non-active parts of the source, with synchronization by our
966 966 # SALT line number markers. The after table contains the non-active
967 967 # components, ordered by line number.
968 968 after = {}
969 969
970 970 # Expected shell script output.
971 971 expected = {}
972 972
973 973 pos = prepos = -1
974 974
975 975 # True or False when in a true or false conditional section
976 976 skipping = None
977 977
978 978 # We keep track of whether or not we're in a Python block so we
979 979 # can generate the surrounding doctest magic.
980 980 inpython = False
981 981
982 982 if self._debug:
983 983 script.append(b'set -x\n')
984 984 if os.getenv('MSYSTEM'):
985 985 script.append(b'alias pwd="pwd -W"\n')
986 986
987 987 for n, l in enumerate(lines):
988 988 if not l.endswith(b'\n'):
989 989 l += b'\n'
990 990 if l.startswith(b'#require'):
991 991 lsplit = l.split()
992 992 if len(lsplit) < 2 or lsplit[0] != b'#require':
993 993 after.setdefault(pos, []).append(' !!! invalid #require\n')
994 994 haveresult, message = self._hghave(lsplit[1:])
995 995 if not haveresult:
996 996 script = [b'echo "%s"\nexit 80\n' % message]
997 997 break
998 998 after.setdefault(pos, []).append(l)
999 999 elif l.startswith(b'#if'):
1000 1000 lsplit = l.split()
1001 1001 if len(lsplit) < 2 or lsplit[0] != b'#if':
1002 1002 after.setdefault(pos, []).append(' !!! invalid #if\n')
1003 1003 if skipping is not None:
1004 1004 after.setdefault(pos, []).append(' !!! nested #if\n')
1005 1005 skipping = not self._hghave(lsplit[1:])[0]
1006 1006 after.setdefault(pos, []).append(l)
1007 1007 elif l.startswith(b'#else'):
1008 1008 if skipping is None:
1009 1009 after.setdefault(pos, []).append(' !!! missing #if\n')
1010 1010 skipping = not skipping
1011 1011 after.setdefault(pos, []).append(l)
1012 1012 elif l.startswith(b'#endif'):
1013 1013 if skipping is None:
1014 1014 after.setdefault(pos, []).append(' !!! missing #if\n')
1015 1015 skipping = None
1016 1016 after.setdefault(pos, []).append(l)
1017 1017 elif skipping:
1018 1018 after.setdefault(pos, []).append(l)
1019 1019 elif l.startswith(b' >>> '): # python inlines
1020 1020 after.setdefault(pos, []).append(l)
1021 1021 prepos = pos
1022 1022 pos = n
1023 1023 if not inpython:
1024 1024 # We've just entered a Python block. Add the header.
1025 1025 inpython = True
1026 1026 addsalt(prepos, False) # Make sure we report the exit code.
1027 1027 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1028 1028 addsalt(n, True)
1029 1029 script.append(l[2:])
1030 1030 elif l.startswith(b' ... '): # python inlines
1031 1031 after.setdefault(prepos, []).append(l)
1032 1032 script.append(l[2:])
1033 1033 elif l.startswith(b' $ '): # commands
1034 1034 if inpython:
1035 1035 script.append(b'EOF\n')
1036 1036 inpython = False
1037 1037 after.setdefault(pos, []).append(l)
1038 1038 prepos = pos
1039 1039 pos = n
1040 1040 addsalt(n, False)
1041 1041 cmd = l[4:].split()
1042 1042 if len(cmd) == 2 and cmd[0] == b'cd':
1043 1043 l = b' $ cd %s || exit 1\n' % cmd[1]
1044 1044 script.append(l[4:])
1045 1045 elif l.startswith(b' > '): # continuations
1046 1046 after.setdefault(prepos, []).append(l)
1047 1047 script.append(l[4:])
1048 1048 elif l.startswith(b' '): # results
1049 1049 # Queue up a list of expected results.
1050 1050 expected.setdefault(pos, []).append(l[2:])
1051 1051 else:
1052 1052 if inpython:
1053 1053 script.append(b'EOF\n')
1054 1054 inpython = False
1055 1055 # Non-command/result. Queue up for merged output.
1056 1056 after.setdefault(pos, []).append(l)
1057 1057
1058 1058 if inpython:
1059 1059 script.append(b'EOF\n')
1060 1060 if skipping is not None:
1061 1061 after.setdefault(pos, []).append(' !!! missing #endif\n')
1062 1062 addsalt(n + 1, False)
1063 1063
1064 1064 return salt, script, after, expected
1065 1065
1066 1066 def _processoutput(self, exitcode, output, salt, after, expected):
1067 1067 # Merge the script output back into a unified test.
1068 1068 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1069 1069 if exitcode != 0:
1070 1070 warnonly = 3
1071 1071
1072 1072 pos = -1
1073 1073 postout = []
1074 1074 for l in output:
1075 1075 lout, lcmd = l, None
1076 1076 if salt in l:
1077 1077 lout, lcmd = l.split(salt, 1)
1078 1078
1079 1079 while lout:
1080 1080 if not lout.endswith(b'\n'):
1081 1081 lout += b' (no-eol)\n'
1082 1082
1083 1083 # Find the expected output at the current position.
1084 1084 el = None
1085 1085 if expected.get(pos, None):
1086 1086 el = expected[pos].pop(0)
1087 1087
1088 1088 r = TTest.linematch(el, lout)
1089 1089 if isinstance(r, str):
1090 1090 if r == '+glob':
1091 1091 lout = el[:-1] + ' (glob)\n'
1092 1092 r = '' # Warn only this line.
1093 1093 elif r == '-glob':
1094 1094 lout = ''.join(el.rsplit(' (glob)', 1))
1095 1095 r = '' # Warn only this line.
1096 1096 elif r == "retry":
1097 1097 postout.append(b' ' + el)
1098 1098 continue
1099 1099 else:
1100 1100 log('\ninfo, unknown linematch result: %r\n' % r)
1101 1101 r = False
1102 1102 if r:
1103 1103 postout.append(b' ' + el)
1104 1104 else:
1105 1105 if self.NEEDESCAPE(lout):
1106 1106 lout = TTest._stringescape(b'%s (esc)\n' %
1107 1107 lout.rstrip(b'\n'))
1108 1108 postout.append(b' ' + lout) # Let diff deal with it.
1109 1109 if r != '': # If line failed.
1110 1110 warnonly = 3 # for sure not
1111 1111 elif warnonly == 1: # Is "not yet" and line is warn only.
1112 1112 warnonly = 2 # Yes do warn.
1113 1113 break
1114 1114
1115 1115 # clean up any optional leftovers
1116 1116 while expected.get(pos, None):
1117 1117 el = expected[pos].pop(0)
1118 1118 if not el.endswith(b" (?)\n"):
1119 1119 expected[pos].insert(0, el)
1120 1120 break
1121 1121 postout.append(b' ' + el)
1122 1122
1123 1123 if lcmd:
1124 1124 # Add on last return code.
1125 1125 ret = int(lcmd.split()[1])
1126 1126 if ret != 0:
1127 1127 postout.append(b' [%d]\n' % ret)
1128 1128 if pos in after:
1129 1129 # Merge in non-active test bits.
1130 1130 postout += after.pop(pos)
1131 1131 pos = int(lcmd.split()[0])
1132 1132
1133 1133 if pos in after:
1134 1134 postout += after.pop(pos)
1135 1135
1136 1136 if warnonly == 2:
1137 1137 exitcode = False # Set exitcode to warned.
1138 1138
1139 1139 return exitcode, postout
1140 1140
1141 1141 @staticmethod
1142 1142 def rematch(el, l):
1143 1143 try:
1144 1144 # use \Z to ensure that the regex matches to the end of the string
1145 1145 if os.name == 'nt':
1146 1146 return re.match(el + br'\r?\n\Z', l)
1147 1147 return re.match(el + br'\n\Z', l)
1148 1148 except re.error:
1149 1149 # el is an invalid regex
1150 1150 return False
1151 1151
1152 1152 @staticmethod
1153 1153 def globmatch(el, l):
1154 1154 # The only supported special characters are * and ? plus / which also
1155 1155 # matches \ on windows. Escaping of these characters is supported.
1156 1156 if el + b'\n' == l:
1157 1157 if os.altsep:
1158 1158 # matching on "/" is not needed for this line
1159 1159 for pat in checkcodeglobpats:
1160 1160 if pat.match(el):
1161 1161 return True
1162 1162 return b'-glob'
1163 1163 return True
1164 1164 i, n = 0, len(el)
1165 1165 res = b''
1166 1166 while i < n:
1167 1167 c = el[i:i + 1]
1168 1168 i += 1
1169 1169 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1170 1170 res += el[i - 1:i + 1]
1171 1171 i += 1
1172 1172 elif c == b'*':
1173 1173 res += b'.*'
1174 1174 elif c == b'?':
1175 1175 res += b'.'
1176 1176 elif c == b'/' and os.altsep:
1177 1177 res += b'[/\\\\]'
1178 1178 else:
1179 1179 res += re.escape(c)
1180 1180 return TTest.rematch(res, l)
1181 1181
1182 1182 @staticmethod
1183 1183 def linematch(el, l):
1184 1184 retry = False
1185 1185 if el == l: # perfect match (fast)
1186 1186 return True
1187 1187 if el:
1188 1188 if el.endswith(b" (?)\n"):
1189 1189 retry = "retry"
1190 1190 el = el[:-5] + "\n"
1191 1191 if el.endswith(b" (esc)\n"):
1192 1192 if PYTHON3:
1193 1193 el = el[:-7].decode('unicode_escape') + '\n'
1194 1194 el = el.encode('utf-8')
1195 1195 else:
1196 1196 el = el[:-7].decode('string-escape') + '\n'
1197 1197 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1198 1198 return True
1199 1199 if el.endswith(b" (re)\n"):
1200 1200 return TTest.rematch(el[:-6], l) or retry
1201 1201 if el.endswith(b" (glob)\n"):
1202 1202 # ignore '(glob)' added to l by 'replacements'
1203 1203 if l.endswith(b" (glob)\n"):
1204 1204 l = l[:-8] + b"\n"
1205 1205 return TTest.globmatch(el[:-8], l)
1206 1206 if os.altsep and l.replace(b'\\', b'/') == el:
1207 1207 return b'+glob'
1208 1208 return retry
1209 1209
1210 1210 @staticmethod
1211 1211 def parsehghaveoutput(lines):
1212 1212 '''Parse hghave log lines.
1213 1213
1214 1214 Return tuple of lists (missing, failed):
1215 1215 * the missing/unknown features
1216 1216 * the features for which existence check failed'''
1217 1217 missing = []
1218 1218 failed = []
1219 1219 for line in lines:
1220 1220 if line.startswith(TTest.SKIPPED_PREFIX):
1221 1221 line = line.splitlines()[0]
1222 1222 missing.append(line[len(TTest.SKIPPED_PREFIX):])
1223 1223 elif line.startswith(TTest.FAILED_PREFIX):
1224 1224 line = line.splitlines()[0]
1225 1225 failed.append(line[len(TTest.FAILED_PREFIX):])
1226 1226
1227 1227 return missing, failed
1228 1228
1229 1229 @staticmethod
1230 1230 def _escapef(m):
1231 1231 return TTest.ESCAPEMAP[m.group(0)]
1232 1232
1233 1233 @staticmethod
1234 1234 def _stringescape(s):
1235 1235 return TTest.ESCAPESUB(TTest._escapef, s)
1236 1236
1237 1237 iolock = threading.RLock()
1238 1238
1239 1239 class SkipTest(Exception):
1240 1240 """Raised to indicate that a test is to be skipped."""
1241 1241
1242 1242 class IgnoreTest(Exception):
1243 1243 """Raised to indicate that a test is to be ignored."""
1244 1244
1245 1245 class WarnTest(Exception):
1246 1246 """Raised to indicate that a test warned."""
1247 1247
1248 1248 class ReportedTest(Exception):
1249 1249 """Raised to indicate that a test already reported."""
1250 1250
1251 1251 class TestResult(unittest._TextTestResult):
1252 1252 """Holds results when executing via unittest."""
1253 1253 # Don't worry too much about accessing the non-public _TextTestResult.
1254 1254 # It is relatively common in Python testing tools.
1255 1255 def __init__(self, options, *args, **kwargs):
1256 1256 super(TestResult, self).__init__(*args, **kwargs)
1257 1257
1258 1258 self._options = options
1259 1259
1260 1260 # unittest.TestResult didn't have skipped until 2.7. We need to
1261 1261 # polyfill it.
1262 1262 self.skipped = []
1263 1263
1264 1264 # We have a custom "ignored" result that isn't present in any Python
1265 1265 # unittest implementation. It is very similar to skipped. It may make
1266 1266 # sense to map it into skip some day.
1267 1267 self.ignored = []
1268 1268
1269 1269 # We have a custom "warned" result that isn't present in any Python
1270 1270 # unittest implementation. It is very similar to failed. It may make
1271 1271 # sense to map it into fail some day.
1272 1272 self.warned = []
1273 1273
1274 1274 self.times = []
1275 1275 self._firststarttime = None
1276 1276 # Data stored for the benefit of generating xunit reports.
1277 1277 self.successes = []
1278 1278 self.faildata = {}
1279 1279
1280 1280 def addFailure(self, test, reason):
1281 1281 self.failures.append((test, reason))
1282 1282
1283 1283 if self._options.first:
1284 1284 self.stop()
1285 1285 else:
1286 1286 with iolock:
1287 1287 if reason == "timed out":
1288 1288 self.stream.write('t')
1289 1289 else:
1290 1290 if not self._options.nodiff:
1291 1291 self.stream.write('\nERROR: %s output changed\n' % test)
1292 1292 self.stream.write('!')
1293 1293
1294 1294 self.stream.flush()
1295 1295
1296 1296 def addSuccess(self, test):
1297 1297 with iolock:
1298 1298 super(TestResult, self).addSuccess(test)
1299 1299 self.successes.append(test)
1300 1300
1301 1301 def addError(self, test, err):
1302 1302 super(TestResult, self).addError(test, err)
1303 1303 if self._options.first:
1304 1304 self.stop()
1305 1305
1306 1306 # Polyfill.
1307 1307 def addSkip(self, test, reason):
1308 1308 self.skipped.append((test, reason))
1309 1309 with iolock:
1310 1310 if self.showAll:
1311 1311 self.stream.writeln('skipped %s' % reason)
1312 1312 else:
1313 1313 self.stream.write('s')
1314 1314 self.stream.flush()
1315 1315
1316 1316 def addIgnore(self, test, reason):
1317 1317 self.ignored.append((test, reason))
1318 1318 with iolock:
1319 1319 if self.showAll:
1320 1320 self.stream.writeln('ignored %s' % reason)
1321 1321 else:
1322 1322 if reason not in ('not retesting', "doesn't match keyword"):
1323 1323 self.stream.write('i')
1324 1324 else:
1325 1325 self.testsRun += 1
1326 1326 self.stream.flush()
1327 1327
1328 1328 def addWarn(self, test, reason):
1329 1329 self.warned.append((test, reason))
1330 1330
1331 1331 if self._options.first:
1332 1332 self.stop()
1333 1333
1334 1334 with iolock:
1335 1335 if self.showAll:
1336 1336 self.stream.writeln('warned %s' % reason)
1337 1337 else:
1338 1338 self.stream.write('~')
1339 1339 self.stream.flush()
1340 1340
1341 1341 def addOutputMismatch(self, test, ret, got, expected):
1342 1342 """Record a mismatch in test output for a particular test."""
1343 1343 if self.shouldStop:
1344 1344 # don't print, some other test case already failed and
1345 1345 # printed, we're just stale and probably failed due to our
1346 1346 # temp dir getting cleaned up.
1347 1347 return
1348 1348
1349 1349 accepted = False
1350 1350 failed = False
1351 1351 lines = []
1352 1352
1353 1353 with iolock:
1354 1354 if self._options.nodiff:
1355 1355 pass
1356 1356 elif self._options.view:
1357 1357 v = self._options.view
1358 1358 if PYTHON3:
1359 1359 v = _bytespath(v)
1360 1360 os.system(b"%s %s %s" %
1361 1361 (v, test.refpath, test.errpath))
1362 1362 else:
1363 1363 servefail, lines = getdiff(expected, got,
1364 1364 test.refpath, test.errpath)
1365 1365 if servefail:
1366 1366 self.addFailure(
1367 1367 test,
1368 1368 'server failed to start (HGPORT=%s)' % test._startport)
1369 1369 raise ReportedTest('server failed to start')
1370 1370 else:
1371 1371 self.stream.write('\n')
1372 1372 for line in lines:
1373 1373 if PYTHON3:
1374 1374 self.stream.flush()
1375 1375 self.stream.buffer.write(line)
1376 1376 self.stream.buffer.flush()
1377 1377 else:
1378 1378 self.stream.write(line)
1379 1379 self.stream.flush()
1380 1380
1381 1381 # handle interactive prompt without releasing iolock
1382 1382 if self._options.interactive:
1383 1383 self.stream.write('Accept this change? [n] ')
1384 1384 answer = sys.stdin.readline().strip()
1385 1385 if answer.lower() in ('y', 'yes'):
1386 1386 if test.name.endswith('.t'):
1387 1387 rename(test.errpath, test.path)
1388 1388 else:
1389 1389 rename(test.errpath, '%s.out' % test.path)
1390 1390 accepted = True
1391 1391 if not accepted and not failed:
1392 1392 self.faildata[test.name] = b''.join(lines)
1393 1393
1394 1394 return accepted
1395 1395
1396 1396 def startTest(self, test):
1397 1397 super(TestResult, self).startTest(test)
1398 1398
1399 1399 # os.times module computes the user time and system time spent by
1400 1400 # child's processes along with real elapsed time taken by a process.
1401 1401 # This module has one limitation. It can only work for Linux user
1402 1402 # and not for Windows.
1403 1403 test.started = os.times()
1404 1404 if self._firststarttime is None: # thread racy but irrelevant
1405 1405 self._firststarttime = test.started[4]
1406 1406
1407 1407 def stopTest(self, test, interrupted=False):
1408 1408 super(TestResult, self).stopTest(test)
1409 1409
1410 1410 test.stopped = os.times()
1411 1411
1412 1412 starttime = test.started
1413 1413 endtime = test.stopped
1414 1414 origin = self._firststarttime
1415 1415 self.times.append((test.name,
1416 1416 endtime[2] - starttime[2], # user space CPU time
1417 1417 endtime[3] - starttime[3], # sys space CPU time
1418 1418 endtime[4] - starttime[4], # real time
1419 1419 starttime[4] - origin, # start date in run context
1420 1420 endtime[4] - origin, # end date in run context
1421 1421 ))
1422 1422
1423 1423 if interrupted:
1424 1424 with iolock:
1425 1425 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1426 1426 test.name, self.times[-1][3]))
1427 1427
1428 1428 class TestSuite(unittest.TestSuite):
1429 1429 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1430 1430
1431 1431 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1432 1432 retest=False, keywords=None, loop=False, runs_per_test=1,
1433 1433 loadtest=None, showchannels=False,
1434 1434 *args, **kwargs):
1435 1435 """Create a new instance that can run tests with a configuration.
1436 1436
1437 1437 testdir specifies the directory where tests are executed from. This
1438 1438 is typically the ``tests`` directory from Mercurial's source
1439 1439 repository.
1440 1440
1441 1441 jobs specifies the number of jobs to run concurrently. Each test
1442 1442 executes on its own thread. Tests actually spawn new processes, so
1443 1443 state mutation should not be an issue.
1444 1444
1445 1445 If there is only one job, it will use the main thread.
1446 1446
1447 1447 whitelist and blacklist denote tests that have been whitelisted and
1448 1448 blacklisted, respectively. These arguments don't belong in TestSuite.
1449 1449 Instead, whitelist and blacklist should be handled by the thing that
1450 1450 populates the TestSuite with tests. They are present to preserve
1451 1451 backwards compatible behavior which reports skipped tests as part
1452 1452 of the results.
1453 1453
1454 1454 retest denotes whether to retest failed tests. This arguably belongs
1455 1455 outside of TestSuite.
1456 1456
1457 1457 keywords denotes key words that will be used to filter which tests
1458 1458 to execute. This arguably belongs outside of TestSuite.
1459 1459
1460 1460 loop denotes whether to loop over tests forever.
1461 1461 """
1462 1462 super(TestSuite, self).__init__(*args, **kwargs)
1463 1463
1464 1464 self._jobs = jobs
1465 1465 self._whitelist = whitelist
1466 1466 self._blacklist = blacklist
1467 1467 self._retest = retest
1468 1468 self._keywords = keywords
1469 1469 self._loop = loop
1470 1470 self._runs_per_test = runs_per_test
1471 1471 self._loadtest = loadtest
1472 1472 self._showchannels = showchannels
1473 1473
1474 1474 def run(self, result):
1475 1475 # We have a number of filters that need to be applied. We do this
1476 1476 # here instead of inside Test because it makes the running logic for
1477 1477 # Test simpler.
1478 1478 tests = []
1479 1479 num_tests = [0]
1480 1480 for test in self._tests:
1481 1481 def get():
1482 1482 num_tests[0] += 1
1483 1483 if getattr(test, 'should_reload', False):
1484 1484 return self._loadtest(test.bname, num_tests[0])
1485 1485 return test
1486 1486 if not os.path.exists(test.path):
1487 1487 result.addSkip(test, "Doesn't exist")
1488 1488 continue
1489 1489
1490 1490 if not (self._whitelist and test.name in self._whitelist):
1491 1491 if self._blacklist and test.bname in self._blacklist:
1492 1492 result.addSkip(test, 'blacklisted')
1493 1493 continue
1494 1494
1495 1495 if self._retest and not os.path.exists(test.errpath):
1496 1496 result.addIgnore(test, 'not retesting')
1497 1497 continue
1498 1498
1499 1499 if self._keywords:
1500 1500 f = open(test.path, 'rb')
1501 1501 t = f.read().lower() + test.bname.lower()
1502 1502 f.close()
1503 1503 ignored = False
1504 1504 for k in self._keywords.lower().split():
1505 1505 if k not in t:
1506 1506 result.addIgnore(test, "doesn't match keyword")
1507 1507 ignored = True
1508 1508 break
1509 1509
1510 1510 if ignored:
1511 1511 continue
1512 1512 for _ in xrange(self._runs_per_test):
1513 1513 tests.append(get())
1514 1514
1515 1515 runtests = list(tests)
1516 1516 done = queue.Queue()
1517 1517 running = 0
1518 1518
1519 1519 channels = [""] * self._jobs
1520 1520
1521 1521 def job(test, result):
1522 1522 for n, v in enumerate(channels):
1523 1523 if not v:
1524 1524 channel = n
1525 1525 break
1526 1526 channels[channel] = "=" + test.name[5:].split(".")[0]
1527 1527 try:
1528 1528 test(result)
1529 1529 channels[channel] = ''
1530 1530 done.put(None)
1531 1531 except KeyboardInterrupt:
1532 1532 channels[channel] = ''
1533 1533 except: # re-raises
1534 1534 done.put(('!', test, 'run-test raised an error, see traceback'))
1535 1535 raise
1536 1536
1537 1537 def stat():
1538 1538 count = 0
1539 1539 while channels:
1540 1540 d = '\n%03s ' % count
1541 1541 for n, v in enumerate(channels):
1542 1542 if v:
1543 1543 d += v[0]
1544 1544 channels[n] = v[1:] or '.'
1545 1545 else:
1546 1546 d += ' '
1547 1547 d += ' '
1548 1548 with iolock:
1549 1549 sys.stdout.write(d + ' ')
1550 1550 sys.stdout.flush()
1551 1551 for x in xrange(10):
1552 1552 if channels:
1553 1553 time.sleep(.1)
1554 1554 count += 1
1555 1555
1556 1556 stoppedearly = False
1557 1557
1558 1558 if self._showchannels:
1559 1559 statthread = threading.Thread(target=stat, name="stat")
1560 1560 statthread.start()
1561 1561
1562 1562 try:
1563 1563 while tests or running:
1564 1564 if not done.empty() or running == self._jobs or not tests:
1565 1565 try:
1566 1566 done.get(True, 1)
1567 1567 running -= 1
1568 1568 if result and result.shouldStop:
1569 1569 stoppedearly = True
1570 1570 break
1571 1571 except queue.Empty:
1572 1572 continue
1573 1573 if tests and not running == self._jobs:
1574 1574 test = tests.pop(0)
1575 1575 if self._loop:
1576 1576 if getattr(test, 'should_reload', False):
1577 1577 num_tests[0] += 1
1578 1578 tests.append(
1579 1579 self._loadtest(test.name, num_tests[0]))
1580 1580 else:
1581 1581 tests.append(test)
1582 1582 if self._jobs == 1:
1583 1583 job(test, result)
1584 1584 else:
1585 1585 t = threading.Thread(target=job, name=test.name,
1586 1586 args=(test, result))
1587 1587 t.start()
1588 1588 running += 1
1589 1589
1590 1590 # If we stop early we still need to wait on started tests to
1591 1591 # finish. Otherwise, there is a race between the test completing
1592 1592 # and the test's cleanup code running. This could result in the
1593 1593 # test reporting incorrect.
1594 1594 if stoppedearly:
1595 1595 while running:
1596 1596 try:
1597 1597 done.get(True, 1)
1598 1598 running -= 1
1599 1599 except queue.Empty:
1600 1600 continue
1601 1601 except KeyboardInterrupt:
1602 1602 for test in runtests:
1603 1603 test.abort()
1604 1604
1605 1605 channels = []
1606 1606
1607 1607 return result
1608 1608
1609 1609 # Save the most recent 5 wall-clock runtimes of each test to a
1610 1610 # human-readable text file named .testtimes. Tests are sorted
1611 1611 # alphabetically, while times for each test are listed from oldest to
1612 1612 # newest.
1613 1613
1614 1614 def loadtimes(testdir):
1615 1615 times = []
1616 1616 try:
1617 1617 with open(os.path.join(testdir, '.testtimes-')) as fp:
1618 1618 for line in fp:
1619 1619 ts = line.split()
1620 1620 times.append((ts[0], [float(t) for t in ts[1:]]))
1621 1621 except IOError as err:
1622 1622 if err.errno != errno.ENOENT:
1623 1623 raise
1624 1624 return times
1625 1625
1626 1626 def savetimes(testdir, result):
1627 1627 saved = dict(loadtimes(testdir))
1628 1628 maxruns = 5
1629 1629 skipped = set([str(t[0]) for t in result.skipped])
1630 1630 for tdata in result.times:
1631 1631 test, real = tdata[0], tdata[3]
1632 1632 if test not in skipped:
1633 1633 ts = saved.setdefault(test, [])
1634 1634 ts.append(real)
1635 1635 ts[:] = ts[-maxruns:]
1636 1636
1637 1637 fd, tmpname = tempfile.mkstemp(prefix='.testtimes',
1638 1638 dir=testdir, text=True)
1639 1639 with os.fdopen(fd, 'w') as fp:
1640 1640 for name, ts in sorted(saved.iteritems()):
1641 1641 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
1642 1642 timepath = os.path.join(testdir, '.testtimes')
1643 1643 try:
1644 1644 os.unlink(timepath)
1645 1645 except OSError:
1646 1646 pass
1647 1647 try:
1648 1648 os.rename(tmpname, timepath)
1649 1649 except OSError:
1650 1650 pass
1651 1651
1652 1652 class TextTestRunner(unittest.TextTestRunner):
1653 1653 """Custom unittest test runner that uses appropriate settings."""
1654 1654
1655 1655 def __init__(self, runner, *args, **kwargs):
1656 1656 super(TextTestRunner, self).__init__(*args, **kwargs)
1657 1657
1658 1658 self._runner = runner
1659 1659
1660 1660 def run(self, test):
1661 1661 result = TestResult(self._runner.options, self.stream,
1662 1662 self.descriptions, self.verbosity)
1663 1663
1664 1664 test(result)
1665 1665
1666 1666 failed = len(result.failures)
1667 1667 warned = len(result.warned)
1668 1668 skipped = len(result.skipped)
1669 1669 ignored = len(result.ignored)
1670 1670
1671 1671 with iolock:
1672 1672 self.stream.writeln('')
1673 1673
1674 1674 if not self._runner.options.noskips:
1675 1675 for test, msg in result.skipped:
1676 1676 self.stream.writeln('Skipped %s: %s' % (test.name, msg))
1677 1677 for test, msg in result.warned:
1678 1678 self.stream.writeln('Warned %s: %s' % (test.name, msg))
1679 1679 for test, msg in result.failures:
1680 1680 self.stream.writeln('Failed %s: %s' % (test.name, msg))
1681 1681 for test, msg in result.errors:
1682 1682 self.stream.writeln('Errored %s: %s' % (test.name, msg))
1683 1683
1684 1684 if self._runner.options.xunit:
1685 1685 with open(self._runner.options.xunit, 'wb') as xuf:
1686 1686 timesd = dict((t[0], t[3]) for t in result.times)
1687 1687 doc = minidom.Document()
1688 1688 s = doc.createElement('testsuite')
1689 1689 s.setAttribute('name', 'run-tests')
1690 1690 s.setAttribute('tests', str(result.testsRun))
1691 1691 s.setAttribute('errors', "0") # TODO
1692 1692 s.setAttribute('failures', str(failed))
1693 1693 s.setAttribute('skipped', str(skipped + ignored))
1694 1694 doc.appendChild(s)
1695 1695 for tc in result.successes:
1696 1696 t = doc.createElement('testcase')
1697 1697 t.setAttribute('name', tc.name)
1698 1698 t.setAttribute('time', '%.3f' % timesd[tc.name])
1699 1699 s.appendChild(t)
1700 1700 for tc, err in sorted(result.faildata.items()):
1701 1701 t = doc.createElement('testcase')
1702 1702 t.setAttribute('name', tc)
1703 1703 t.setAttribute('time', '%.3f' % timesd[tc])
1704 1704 # createCDATASection expects a unicode or it will
1705 1705 # convert using default conversion rules, which will
1706 1706 # fail if string isn't ASCII.
1707 1707 err = cdatasafe(err).decode('utf-8', 'replace')
1708 1708 cd = doc.createCDATASection(err)
1709 1709 t.appendChild(cd)
1710 1710 s.appendChild(t)
1711 1711 xuf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
1712 1712
1713 1713 if self._runner.options.json:
1714 1714 if json is None:
1715 1715 raise ImportError("json module not installed")
1716 1716 jsonpath = os.path.join(self._runner._testdir, 'report.json')
1717 1717 with open(jsonpath, 'w') as fp:
1718 1718 timesd = {}
1719 1719 for tdata in result.times:
1720 1720 test = tdata[0]
1721 1721 timesd[test] = tdata[1:]
1722 1722
1723 1723 outcome = {}
1724 1724 groups = [('success', ((tc, None)
1725 1725 for tc in result.successes)),
1726 1726 ('failure', result.failures),
1727 1727 ('skip', result.skipped)]
1728 1728 for res, testcases in groups:
1729 1729 for tc, __ in testcases:
1730 if tc.name in timesd:
1730 1731 tres = {'result': res,
1731 1732 'time': ('%0.3f' % timesd[tc.name][2]),
1732 1733 'cuser': ('%0.3f' % timesd[tc.name][0]),
1733 1734 'csys': ('%0.3f' % timesd[tc.name][1]),
1734 1735 'start': ('%0.3f' % timesd[tc.name][3]),
1735 1736 'end': ('%0.3f' % timesd[tc.name][4]),
1736 'diff': result.faildata.get(tc.name, ''),
1737 'diff': result.faildata.get(tc.name,
1738 ''),
1737 1739 }
1740 else:
1741 # blacklisted test
1742 tres = {'result': res}
1743
1738 1744 outcome[tc.name] = tres
1739 1745 jsonout = json.dumps(outcome, sort_keys=True, indent=4)
1740 1746 fp.writelines(("testreport =", jsonout))
1741 1747
1742 1748 self._runner._checkhglib('Tested')
1743 1749
1744 1750 savetimes(self._runner._testdir, result)
1745 1751 self.stream.writeln(
1746 1752 '# Ran %d tests, %d skipped, %d warned, %d failed.'
1747 1753 % (result.testsRun,
1748 1754 skipped + ignored, warned, failed))
1749 1755 if failed:
1750 1756 self.stream.writeln('python hash seed: %s' %
1751 1757 os.environ['PYTHONHASHSEED'])
1752 1758 if self._runner.options.time:
1753 1759 self.printtimes(result.times)
1754 1760
1755 1761 return result
1756 1762
1757 1763 def printtimes(self, times):
1758 1764 # iolock held by run
1759 1765 self.stream.writeln('# Producing time report')
1760 1766 times.sort(key=lambda t: (t[3]))
1761 1767 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
1762 1768 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
1763 1769 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
1764 1770 for tdata in times:
1765 1771 test = tdata[0]
1766 1772 cuser, csys, real, start, end = tdata[1:6]
1767 1773 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
1768 1774
1769 1775 class TestRunner(object):
1770 1776 """Holds context for executing tests.
1771 1777
1772 1778 Tests rely on a lot of state. This object holds it for them.
1773 1779 """
1774 1780
1775 1781 # Programs required to run tests.
1776 1782 REQUIREDTOOLS = [
1777 1783 os.path.basename(_bytespath(sys.executable)),
1778 1784 b'diff',
1779 1785 b'grep',
1780 1786 b'unzip',
1781 1787 b'gunzip',
1782 1788 b'bunzip2',
1783 1789 b'sed',
1784 1790 ]
1785 1791
1786 1792 # Maps file extensions to test class.
1787 1793 TESTTYPES = [
1788 1794 (b'.py', PythonTest),
1789 1795 (b'.t', TTest),
1790 1796 ]
1791 1797
1792 1798 def __init__(self):
1793 1799 self.options = None
1794 1800 self._hgroot = None
1795 1801 self._testdir = None
1796 1802 self._hgtmp = None
1797 1803 self._installdir = None
1798 1804 self._bindir = None
1799 1805 self._tmpbinddir = None
1800 1806 self._pythondir = None
1801 1807 self._coveragefile = None
1802 1808 self._createdfiles = []
1803 1809 self._hgpath = None
1804 1810 self._portoffset = 0
1805 1811 self._ports = {}
1806 1812
1807 1813 def run(self, args, parser=None):
1808 1814 """Run the test suite."""
1809 1815 oldmask = os.umask(0o22)
1810 1816 try:
1811 1817 parser = parser or getparser()
1812 1818 options, args = parseargs(args, parser)
1813 1819 # positional arguments are paths to test files to run, so
1814 1820 # we make sure they're all bytestrings
1815 1821 args = [_bytespath(a) for a in args]
1816 1822 self.options = options
1817 1823
1818 1824 self._checktools()
1819 1825 tests = self.findtests(args)
1820 1826 if options.profile_runner:
1821 1827 import statprof
1822 1828 statprof.start()
1823 1829 result = self._run(tests)
1824 1830 if options.profile_runner:
1825 1831 statprof.stop()
1826 1832 statprof.display()
1827 1833 return result
1828 1834
1829 1835 finally:
1830 1836 os.umask(oldmask)
1831 1837
1832 1838 def _run(self, tests):
1833 1839 if self.options.random:
1834 1840 random.shuffle(tests)
1835 1841 else:
1836 1842 # keywords for slow tests
1837 1843 slow = {b'svn': 10,
1838 1844 b'cvs': 10,
1839 1845 b'hghave': 10,
1840 1846 b'largefiles-update': 10,
1841 1847 b'run-tests': 10,
1842 1848 b'corruption': 10,
1843 1849 b'race': 10,
1844 1850 b'i18n': 10,
1845 1851 b'check': 100,
1846 1852 b'gendoc': 100,
1847 1853 b'contrib-perf': 200,
1848 1854 }
1849 1855 perf = {}
1850 1856 def sortkey(f):
1851 1857 # run largest tests first, as they tend to take the longest
1852 1858 try:
1853 1859 return perf[f]
1854 1860 except KeyError:
1855 1861 try:
1856 1862 val = -os.stat(f).st_size
1857 1863 except OSError as e:
1858 1864 if e.errno != errno.ENOENT:
1859 1865 raise
1860 1866 perf[f] = -1e9 # file does not exist, tell early
1861 1867 return -1e9
1862 1868 for kw, mul in slow.items():
1863 1869 if kw in f:
1864 1870 val *= mul
1865 1871 if f.endswith('.py'):
1866 1872 val /= 10.0
1867 1873 perf[f] = val / 1000.0
1868 1874 return perf[f]
1869 1875 tests.sort(key=sortkey)
1870 1876
1871 1877 self._testdir = osenvironb[b'TESTDIR'] = getattr(
1872 1878 os, 'getcwdb', os.getcwd)()
1873 1879
1874 1880 if 'PYTHONHASHSEED' not in os.environ:
1875 1881 # use a random python hash seed all the time
1876 1882 # we do the randomness ourself to know what seed is used
1877 1883 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
1878 1884
1879 1885 if self.options.tmpdir:
1880 1886 self.options.keep_tmpdir = True
1881 1887 tmpdir = _bytespath(self.options.tmpdir)
1882 1888 if os.path.exists(tmpdir):
1883 1889 # Meaning of tmpdir has changed since 1.3: we used to create
1884 1890 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
1885 1891 # tmpdir already exists.
1886 1892 print("error: temp dir %r already exists" % tmpdir)
1887 1893 return 1
1888 1894
1889 1895 # Automatically removing tmpdir sounds convenient, but could
1890 1896 # really annoy anyone in the habit of using "--tmpdir=/tmp"
1891 1897 # or "--tmpdir=$HOME".
1892 1898 #vlog("# Removing temp dir", tmpdir)
1893 1899 #shutil.rmtree(tmpdir)
1894 1900 os.makedirs(tmpdir)
1895 1901 else:
1896 1902 d = None
1897 1903 if os.name == 'nt':
1898 1904 # without this, we get the default temp dir location, but
1899 1905 # in all lowercase, which causes troubles with paths (issue3490)
1900 1906 d = osenvironb.get(b'TMP', None)
1901 1907 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
1902 1908
1903 1909 self._hgtmp = osenvironb[b'HGTMP'] = (
1904 1910 os.path.realpath(tmpdir))
1905 1911
1906 1912 if self.options.with_hg:
1907 1913 self._installdir = None
1908 1914 whg = self.options.with_hg
1909 1915 # If --with-hg is not specified, we have bytes already,
1910 1916 # but if it was specified in python3 we get a str, so we
1911 1917 # have to encode it back into a bytes.
1912 1918 if PYTHON3:
1913 1919 if not isinstance(whg, bytes):
1914 1920 whg = _bytespath(whg)
1915 1921 self._bindir = os.path.dirname(os.path.realpath(whg))
1916 1922 assert isinstance(self._bindir, bytes)
1917 1923 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
1918 1924 os.makedirs(self._tmpbindir)
1919 1925
1920 1926 # This looks redundant with how Python initializes sys.path from
1921 1927 # the location of the script being executed. Needed because the
1922 1928 # "hg" specified by --with-hg is not the only Python script
1923 1929 # executed in the test suite that needs to import 'mercurial'
1924 1930 # ... which means it's not really redundant at all.
1925 1931 self._pythondir = self._bindir
1926 1932 else:
1927 1933 self._installdir = os.path.join(self._hgtmp, b"install")
1928 1934 self._bindir = osenvironb[b"BINDIR"] = \
1929 1935 os.path.join(self._installdir, b"bin")
1930 1936 self._tmpbindir = self._bindir
1931 1937 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
1932 1938
1933 1939 osenvironb[b"BINDIR"] = self._bindir
1934 1940 osenvironb[b"PYTHON"] = PYTHON
1935 1941
1936 1942 fileb = _bytespath(__file__)
1937 1943 runtestdir = os.path.abspath(os.path.dirname(fileb))
1938 1944 osenvironb[b'RUNTESTDIR'] = runtestdir
1939 1945 if PYTHON3:
1940 1946 sepb = _bytespath(os.pathsep)
1941 1947 else:
1942 1948 sepb = os.pathsep
1943 1949 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
1944 1950 if os.path.islink(__file__):
1945 1951 # test helper will likely be at the end of the symlink
1946 1952 realfile = os.path.realpath(fileb)
1947 1953 realdir = os.path.abspath(os.path.dirname(realfile))
1948 1954 path.insert(2, realdir)
1949 1955 if self._testdir != runtestdir:
1950 1956 path = [self._testdir] + path
1951 1957 if self._tmpbindir != self._bindir:
1952 1958 path = [self._tmpbindir] + path
1953 1959 osenvironb[b"PATH"] = sepb.join(path)
1954 1960
1955 1961 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
1956 1962 # can run .../tests/run-tests.py test-foo where test-foo
1957 1963 # adds an extension to HGRC. Also include run-test.py directory to
1958 1964 # import modules like heredoctest.
1959 1965 pypath = [self._pythondir, self._testdir, runtestdir]
1960 1966 # We have to augment PYTHONPATH, rather than simply replacing
1961 1967 # it, in case external libraries are only available via current
1962 1968 # PYTHONPATH. (In particular, the Subversion bindings on OS X
1963 1969 # are in /opt/subversion.)
1964 1970 oldpypath = osenvironb.get(IMPL_PATH)
1965 1971 if oldpypath:
1966 1972 pypath.append(oldpypath)
1967 1973 osenvironb[IMPL_PATH] = sepb.join(pypath)
1968 1974
1969 1975 if self.options.pure:
1970 1976 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
1971 1977
1972 1978 if self.options.allow_slow_tests:
1973 1979 os.environ["HGTEST_SLOW"] = "slow"
1974 1980 elif 'HGTEST_SLOW' in os.environ:
1975 1981 del os.environ['HGTEST_SLOW']
1976 1982
1977 1983 self._coveragefile = os.path.join(self._testdir, b'.coverage')
1978 1984
1979 1985 vlog("# Using TESTDIR", self._testdir)
1980 1986 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
1981 1987 vlog("# Using HGTMP", self._hgtmp)
1982 1988 vlog("# Using PATH", os.environ["PATH"])
1983 1989 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
1984 1990
1985 1991 try:
1986 1992 return self._runtests(tests) or 0
1987 1993 finally:
1988 1994 time.sleep(.1)
1989 1995 self._cleanup()
1990 1996
1991 1997 def findtests(self, args):
1992 1998 """Finds possible test files from arguments.
1993 1999
1994 2000 If you wish to inject custom tests into the test harness, this would
1995 2001 be a good function to monkeypatch or override in a derived class.
1996 2002 """
1997 2003 if not args:
1998 2004 if self.options.changed:
1999 2005 proc = Popen4('hg st --rev "%s" -man0 .' %
2000 2006 self.options.changed, None, 0)
2001 2007 stdout, stderr = proc.communicate()
2002 2008 args = stdout.strip(b'\0').split(b'\0')
2003 2009 else:
2004 2010 args = os.listdir(b'.')
2005 2011
2006 2012 return [t for t in args
2007 2013 if os.path.basename(t).startswith(b'test-')
2008 2014 and (t.endswith(b'.py') or t.endswith(b'.t'))]
2009 2015
2010 2016 def _runtests(self, tests):
2011 2017 try:
2012 2018 if self._installdir:
2013 2019 self._installhg()
2014 2020 self._checkhglib("Testing")
2015 2021 else:
2016 2022 self._usecorrectpython()
2017 2023
2018 2024 if self.options.restart:
2019 2025 orig = list(tests)
2020 2026 while tests:
2021 2027 if os.path.exists(tests[0] + ".err"):
2022 2028 break
2023 2029 tests.pop(0)
2024 2030 if not tests:
2025 2031 print("running all tests")
2026 2032 tests = orig
2027 2033
2028 2034 tests = [self._gettest(t, i) for i, t in enumerate(tests)]
2029 2035
2030 2036 failed = False
2031 2037 warned = False
2032 2038 kws = self.options.keywords
2033 2039 if kws is not None and PYTHON3:
2034 2040 kws = kws.encode('utf-8')
2035 2041
2036 2042 suite = TestSuite(self._testdir,
2037 2043 jobs=self.options.jobs,
2038 2044 whitelist=self.options.whitelisted,
2039 2045 blacklist=self.options.blacklist,
2040 2046 retest=self.options.retest,
2041 2047 keywords=kws,
2042 2048 loop=self.options.loop,
2043 2049 runs_per_test=self.options.runs_per_test,
2044 2050 showchannels=self.options.showchannels,
2045 2051 tests=tests, loadtest=self._gettest)
2046 2052 verbosity = 1
2047 2053 if self.options.verbose:
2048 2054 verbosity = 2
2049 2055 runner = TextTestRunner(self, verbosity=verbosity)
2050 2056 result = runner.run(suite)
2051 2057
2052 2058 if result.failures:
2053 2059 failed = True
2054 2060 if result.warned:
2055 2061 warned = True
2056 2062
2057 2063 if self.options.anycoverage:
2058 2064 self._outputcoverage()
2059 2065 except KeyboardInterrupt:
2060 2066 failed = True
2061 2067 print("\ninterrupted!")
2062 2068
2063 2069 if failed:
2064 2070 return 1
2065 2071 if warned:
2066 2072 return 80
2067 2073
2068 2074 def _getport(self, count):
2069 2075 port = self._ports.get(count) # do we have a cached entry?
2070 2076 if port is None:
2071 2077 portneeded = 3
2072 2078 # above 100 tries we just give up and let test reports failure
2073 2079 for tries in xrange(100):
2074 2080 allfree = True
2075 2081 port = self.options.port + self._portoffset
2076 2082 for idx in xrange(portneeded):
2077 2083 if not checkportisavailable(port + idx):
2078 2084 allfree = False
2079 2085 break
2080 2086 self._portoffset += portneeded
2081 2087 if allfree:
2082 2088 break
2083 2089 self._ports[count] = port
2084 2090 return port
2085 2091
2086 2092 def _gettest(self, test, count):
2087 2093 """Obtain a Test by looking at its filename.
2088 2094
2089 2095 Returns a Test instance. The Test may not be runnable if it doesn't
2090 2096 map to a known type.
2091 2097 """
2092 2098 lctest = test.lower()
2093 2099 testcls = Test
2094 2100
2095 2101 for ext, cls in self.TESTTYPES:
2096 2102 if lctest.endswith(ext):
2097 2103 testcls = cls
2098 2104 break
2099 2105
2100 2106 refpath = os.path.join(self._testdir, test)
2101 2107 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2102 2108
2103 2109 t = testcls(refpath, tmpdir,
2104 2110 keeptmpdir=self.options.keep_tmpdir,
2105 2111 debug=self.options.debug,
2106 2112 timeout=self.options.timeout,
2107 2113 startport=self._getport(count),
2108 2114 extraconfigopts=self.options.extra_config_opt,
2109 2115 py3kwarnings=self.options.py3k_warnings,
2110 2116 shell=self.options.shell)
2111 2117 t.should_reload = True
2112 2118 return t
2113 2119
2114 2120 def _cleanup(self):
2115 2121 """Clean up state from this test invocation."""
2116 2122
2117 2123 if self.options.keep_tmpdir:
2118 2124 return
2119 2125
2120 2126 vlog("# Cleaning up HGTMP", self._hgtmp)
2121 2127 shutil.rmtree(self._hgtmp, True)
2122 2128 for f in self._createdfiles:
2123 2129 try:
2124 2130 os.remove(f)
2125 2131 except OSError:
2126 2132 pass
2127 2133
2128 2134 def _usecorrectpython(self):
2129 2135 """Configure the environment to use the appropriate Python in tests."""
2130 2136 # Tests must use the same interpreter as us or bad things will happen.
2131 2137 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2132 2138 if getattr(os, 'symlink', None):
2133 2139 vlog("# Making python executable in test path a symlink to '%s'" %
2134 2140 sys.executable)
2135 2141 mypython = os.path.join(self._tmpbindir, pyexename)
2136 2142 try:
2137 2143 if os.readlink(mypython) == sys.executable:
2138 2144 return
2139 2145 os.unlink(mypython)
2140 2146 except OSError as err:
2141 2147 if err.errno != errno.ENOENT:
2142 2148 raise
2143 2149 if self._findprogram(pyexename) != sys.executable:
2144 2150 try:
2145 2151 os.symlink(sys.executable, mypython)
2146 2152 self._createdfiles.append(mypython)
2147 2153 except OSError as err:
2148 2154 # child processes may race, which is harmless
2149 2155 if err.errno != errno.EEXIST:
2150 2156 raise
2151 2157 else:
2152 2158 exedir, exename = os.path.split(sys.executable)
2153 2159 vlog("# Modifying search path to find %s as %s in '%s'" %
2154 2160 (exename, pyexename, exedir))
2155 2161 path = os.environ['PATH'].split(os.pathsep)
2156 2162 while exedir in path:
2157 2163 path.remove(exedir)
2158 2164 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2159 2165 if not self._findprogram(pyexename):
2160 2166 print("WARNING: Cannot find %s in search path" % pyexename)
2161 2167
2162 2168 def _installhg(self):
2163 2169 """Install hg into the test environment.
2164 2170
2165 2171 This will also configure hg with the appropriate testing settings.
2166 2172 """
2167 2173 vlog("# Performing temporary installation of HG")
2168 2174 installerrs = os.path.join(b"tests", b"install.err")
2169 2175 compiler = ''
2170 2176 if self.options.compiler:
2171 2177 compiler = '--compiler ' + self.options.compiler
2172 2178 if self.options.pure:
2173 2179 pure = b"--pure"
2174 2180 else:
2175 2181 pure = b""
2176 2182 py3 = ''
2177 2183
2178 2184 # Run installer in hg root
2179 2185 script = os.path.realpath(sys.argv[0])
2180 2186 exe = sys.executable
2181 2187 if PYTHON3:
2182 2188 py3 = b'--c2to3'
2183 2189 compiler = _bytespath(compiler)
2184 2190 script = _bytespath(script)
2185 2191 exe = _bytespath(exe)
2186 2192 hgroot = os.path.dirname(os.path.dirname(script))
2187 2193 self._hgroot = hgroot
2188 2194 os.chdir(hgroot)
2189 2195 nohome = b'--home=""'
2190 2196 if os.name == 'nt':
2191 2197 # The --home="" trick works only on OS where os.sep == '/'
2192 2198 # because of a distutils convert_path() fast-path. Avoid it at
2193 2199 # least on Windows for now, deal with .pydistutils.cfg bugs
2194 2200 # when they happen.
2195 2201 nohome = b''
2196 2202 cmd = (b'%(exe)s setup.py %(py3)s %(pure)s clean --all'
2197 2203 b' build %(compiler)s --build-base="%(base)s"'
2198 2204 b' install --force --prefix="%(prefix)s"'
2199 2205 b' --install-lib="%(libdir)s"'
2200 2206 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2201 2207 % {b'exe': exe, b'py3': py3, b'pure': pure,
2202 2208 b'compiler': compiler,
2203 2209 b'base': os.path.join(self._hgtmp, b"build"),
2204 2210 b'prefix': self._installdir, b'libdir': self._pythondir,
2205 2211 b'bindir': self._bindir,
2206 2212 b'nohome': nohome, b'logfile': installerrs})
2207 2213
2208 2214 # setuptools requires install directories to exist.
2209 2215 def makedirs(p):
2210 2216 try:
2211 2217 os.makedirs(p)
2212 2218 except OSError as e:
2213 2219 if e.errno != errno.EEXIST:
2214 2220 raise
2215 2221 makedirs(self._pythondir)
2216 2222 makedirs(self._bindir)
2217 2223
2218 2224 vlog("# Running", cmd)
2219 2225 if os.system(cmd) == 0:
2220 2226 if not self.options.verbose:
2221 2227 try:
2222 2228 os.remove(installerrs)
2223 2229 except OSError as e:
2224 2230 if e.errno != errno.ENOENT:
2225 2231 raise
2226 2232 else:
2227 2233 f = open(installerrs, 'rb')
2228 2234 for line in f:
2229 2235 if PYTHON3:
2230 2236 sys.stdout.buffer.write(line)
2231 2237 else:
2232 2238 sys.stdout.write(line)
2233 2239 f.close()
2234 2240 sys.exit(1)
2235 2241 os.chdir(self._testdir)
2236 2242
2237 2243 self._usecorrectpython()
2238 2244
2239 2245 if self.options.py3k_warnings and not self.options.anycoverage:
2240 2246 vlog("# Updating hg command to enable Py3k Warnings switch")
2241 2247 f = open(os.path.join(self._bindir, 'hg'), 'rb')
2242 2248 lines = [line.rstrip() for line in f]
2243 2249 lines[0] += ' -3'
2244 2250 f.close()
2245 2251 f = open(os.path.join(self._bindir, 'hg'), 'wb')
2246 2252 for line in lines:
2247 2253 f.write(line + '\n')
2248 2254 f.close()
2249 2255
2250 2256 hgbat = os.path.join(self._bindir, b'hg.bat')
2251 2257 if os.path.isfile(hgbat):
2252 2258 # hg.bat expects to be put in bin/scripts while run-tests.py
2253 2259 # installation layout put it in bin/ directly. Fix it
2254 2260 f = open(hgbat, 'rb')
2255 2261 data = f.read()
2256 2262 f.close()
2257 2263 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2258 2264 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2259 2265 b'"%~dp0python" "%~dp0hg" %*')
2260 2266 f = open(hgbat, 'wb')
2261 2267 f.write(data)
2262 2268 f.close()
2263 2269 else:
2264 2270 print('WARNING: cannot fix hg.bat reference to python.exe')
2265 2271
2266 2272 if self.options.anycoverage:
2267 2273 custom = os.path.join(self._testdir, 'sitecustomize.py')
2268 2274 target = os.path.join(self._pythondir, 'sitecustomize.py')
2269 2275 vlog('# Installing coverage trigger to %s' % target)
2270 2276 shutil.copyfile(custom, target)
2271 2277 rc = os.path.join(self._testdir, '.coveragerc')
2272 2278 vlog('# Installing coverage rc to %s' % rc)
2273 2279 os.environ['COVERAGE_PROCESS_START'] = rc
2274 2280 covdir = os.path.join(self._installdir, '..', 'coverage')
2275 2281 try:
2276 2282 os.mkdir(covdir)
2277 2283 except OSError as e:
2278 2284 if e.errno != errno.EEXIST:
2279 2285 raise
2280 2286
2281 2287 os.environ['COVERAGE_DIR'] = covdir
2282 2288
2283 2289 def _checkhglib(self, verb):
2284 2290 """Ensure that the 'mercurial' package imported by python is
2285 2291 the one we expect it to be. If not, print a warning to stderr."""
2286 2292 if ((self._bindir == self._pythondir) and
2287 2293 (self._bindir != self._tmpbindir)):
2288 2294 # The pythondir has been inferred from --with-hg flag.
2289 2295 # We cannot expect anything sensible here.
2290 2296 return
2291 2297 expecthg = os.path.join(self._pythondir, b'mercurial')
2292 2298 actualhg = self._gethgpath()
2293 2299 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2294 2300 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2295 2301 ' (expected %s)\n'
2296 2302 % (verb, actualhg, expecthg))
2297 2303 def _gethgpath(self):
2298 2304 """Return the path to the mercurial package that is actually found by
2299 2305 the current Python interpreter."""
2300 2306 if self._hgpath is not None:
2301 2307 return self._hgpath
2302 2308
2303 2309 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2304 2310 cmd = cmd % PYTHON
2305 2311 if PYTHON3:
2306 2312 cmd = _strpath(cmd)
2307 2313 pipe = os.popen(cmd)
2308 2314 try:
2309 2315 self._hgpath = _bytespath(pipe.read().strip())
2310 2316 finally:
2311 2317 pipe.close()
2312 2318
2313 2319 return self._hgpath
2314 2320
2315 2321 def _outputcoverage(self):
2316 2322 """Produce code coverage output."""
2317 2323 from coverage import coverage
2318 2324
2319 2325 vlog('# Producing coverage report')
2320 2326 # chdir is the easiest way to get short, relative paths in the
2321 2327 # output.
2322 2328 os.chdir(self._hgroot)
2323 2329 covdir = os.path.join(self._installdir, '..', 'coverage')
2324 2330 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2325 2331
2326 2332 # Map install directory paths back to source directory.
2327 2333 cov.config.paths['srcdir'] = ['.', self._pythondir]
2328 2334
2329 2335 cov.combine()
2330 2336
2331 2337 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2332 2338 cov.report(ignore_errors=True, omit=omit)
2333 2339
2334 2340 if self.options.htmlcov:
2335 2341 htmldir = os.path.join(self._testdir, 'htmlcov')
2336 2342 cov.html_report(directory=htmldir, omit=omit)
2337 2343 if self.options.annotate:
2338 2344 adir = os.path.join(self._testdir, 'annotated')
2339 2345 if not os.path.isdir(adir):
2340 2346 os.mkdir(adir)
2341 2347 cov.annotate(directory=adir, omit=omit)
2342 2348
2343 2349 def _findprogram(self, program):
2344 2350 """Search PATH for a executable program"""
2345 2351 dpb = _bytespath(os.defpath)
2346 2352 sepb = _bytespath(os.pathsep)
2347 2353 for p in osenvironb.get(b'PATH', dpb).split(sepb):
2348 2354 name = os.path.join(p, program)
2349 2355 if os.name == 'nt' or os.access(name, os.X_OK):
2350 2356 return name
2351 2357 return None
2352 2358
2353 2359 def _checktools(self):
2354 2360 """Ensure tools required to run tests are present."""
2355 2361 for p in self.REQUIREDTOOLS:
2356 2362 if os.name == 'nt' and not p.endswith('.exe'):
2357 2363 p += '.exe'
2358 2364 found = self._findprogram(p)
2359 2365 if found:
2360 2366 vlog("# Found prerequisite", p, "at", found)
2361 2367 else:
2362 2368 print("WARNING: Did not find prerequisite tool: %s " % p)
2363 2369
2364 2370 if __name__ == '__main__':
2365 2371 runner = TestRunner()
2366 2372
2367 2373 try:
2368 2374 import msvcrt
2369 2375 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
2370 2376 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2371 2377 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
2372 2378 except ImportError:
2373 2379 pass
2374 2380
2375 2381 sys.exit(runner.run(sys.argv[1:]))
@@ -1,695 +1,703 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ unset HGTEST_JOBS
6 6 $ unset HGTEST_TIMEOUT
7 7 $ unset HGTEST_PORT
8 8 $ unset HGTEST_SHELL
9 9
10 10 Smoke test with install
11 11 ============
12 12
13 13 $ run-tests.py $HGTEST_RUN_TESTS_PURE -l
14 14
15 15 # Ran 0 tests, 0 skipped, 0 warned, 0 failed.
16 16
17 17 Define a helper to avoid the install step
18 18 =============
19 19 $ rt()
20 20 > {
21 21 > run-tests.py --with-hg=`which hg` "$@"
22 22 > }
23 23
24 24 a succesful test
25 25 =======================
26 26
27 27 $ cat > test-success.t << EOF
28 28 > $ echo babar
29 29 > babar
30 30 > $ echo xyzzy
31 31 > never happens (?)
32 32 > xyzzy
33 33 > nor this (?)
34 34 > EOF
35 35
36 36 $ rt
37 37 .
38 38 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
39 39
40 40 failing test
41 41 ==================
42 42
43 43 $ cat > test-failure.t << EOF
44 44 > $ echo babar
45 45 > rataxes
46 46 > This is a noop statement so that
47 47 > this test is still more bytes than success.
48 48 > EOF
49 49
50 50 >>> fh = open('test-failure-unicode.t', 'wb')
51 51 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
52 52 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
53 53
54 54 $ rt
55 55
56 56 --- $TESTTMP/test-failure.t
57 57 +++ $TESTTMP/test-failure.t.err
58 58 @@ -1,4 +1,4 @@
59 59 $ echo babar
60 60 - rataxes
61 61 + babar
62 62 This is a noop statement so that
63 63 this test is still more bytes than success.
64 64
65 65 ERROR: test-failure.t output changed
66 66 !.
67 67 --- $TESTTMP/test-failure-unicode.t
68 68 +++ $TESTTMP/test-failure-unicode.t.err
69 69 @@ -1,2 +1,2 @@
70 70 $ echo babar\xce\xb1 (esc)
71 71 - l\xce\xb5\xce\xb5t (esc)
72 72 + babar\xce\xb1 (esc)
73 73
74 74 ERROR: test-failure-unicode.t output changed
75 75 !
76 76 Failed test-failure.t: output changed
77 77 Failed test-failure-unicode.t: output changed
78 78 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
79 79 python hash seed: * (glob)
80 80 [1]
81 81
82 82 test --xunit support
83 83 $ rt --xunit=xunit.xml
84 84
85 85 --- $TESTTMP/test-failure.t
86 86 +++ $TESTTMP/test-failure.t.err
87 87 @@ -1,4 +1,4 @@
88 88 $ echo babar
89 89 - rataxes
90 90 + babar
91 91 This is a noop statement so that
92 92 this test is still more bytes than success.
93 93
94 94 ERROR: test-failure.t output changed
95 95 !.
96 96 --- $TESTTMP/test-failure-unicode.t
97 97 +++ $TESTTMP/test-failure-unicode.t.err
98 98 @@ -1,2 +1,2 @@
99 99 $ echo babar\xce\xb1 (esc)
100 100 - l\xce\xb5\xce\xb5t (esc)
101 101 + babar\xce\xb1 (esc)
102 102
103 103 ERROR: test-failure-unicode.t output changed
104 104 !
105 105 Failed test-failure.t: output changed
106 106 Failed test-failure-unicode.t: output changed
107 107 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
108 108 python hash seed: * (glob)
109 109 [1]
110 110 $ cat xunit.xml
111 111 <?xml version="1.0" encoding="utf-8"?>
112 112 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
113 113 <testcase name="test-success.t" time="*"/> (glob)
114 114 <testcase name="test-failure-unicode.t" time="*"> (glob)
115 115 <![CDATA[--- $TESTTMP/test-failure-unicode.t
116 116 +++ $TESTTMP/test-failure-unicode.t.err
117 117 @@ -1,2 +1,2 @@
118 118 $ echo babar\xce\xb1 (esc)
119 119 - l\xce\xb5\xce\xb5t (esc)
120 120 + babar\xce\xb1 (esc)
121 121 ]]> </testcase>
122 122 <testcase name="test-failure.t" time="*"> (glob)
123 123 <![CDATA[--- $TESTTMP/test-failure.t
124 124 +++ $TESTTMP/test-failure.t.err
125 125 @@ -1,4 +1,4 @@
126 126 $ echo babar
127 127 - rataxes
128 128 + babar
129 129 This is a noop statement so that
130 130 this test is still more bytes than success.
131 131 ]]> </testcase>
132 132 </testsuite>
133 133
134 134 $ rm test-failure-unicode.t
135 135
136 136 test for --retest
137 137 ====================
138 138
139 139 $ rt --retest
140 140
141 141 --- $TESTTMP/test-failure.t
142 142 +++ $TESTTMP/test-failure.t.err
143 143 @@ -1,4 +1,4 @@
144 144 $ echo babar
145 145 - rataxes
146 146 + babar
147 147 This is a noop statement so that
148 148 this test is still more bytes than success.
149 149
150 150 ERROR: test-failure.t output changed
151 151 !
152 152 Failed test-failure.t: output changed
153 153 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
154 154 python hash seed: * (glob)
155 155 [1]
156 156
157 157 Selecting Tests To Run
158 158 ======================
159 159
160 160 successful
161 161
162 162 $ rt test-success.t
163 163 .
164 164 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
165 165
166 166 success w/ keyword
167 167 $ rt -k xyzzy
168 168 .
169 169 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
170 170
171 171 failed
172 172
173 173 $ rt test-failure.t
174 174
175 175 --- $TESTTMP/test-failure.t
176 176 +++ $TESTTMP/test-failure.t.err
177 177 @@ -1,4 +1,4 @@
178 178 $ echo babar
179 179 - rataxes
180 180 + babar
181 181 This is a noop statement so that
182 182 this test is still more bytes than success.
183 183
184 184 ERROR: test-failure.t output changed
185 185 !
186 186 Failed test-failure.t: output changed
187 187 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
188 188 python hash seed: * (glob)
189 189 [1]
190 190
191 191 failure w/ keyword
192 192 $ rt -k rataxes
193 193
194 194 --- $TESTTMP/test-failure.t
195 195 +++ $TESTTMP/test-failure.t.err
196 196 @@ -1,4 +1,4 @@
197 197 $ echo babar
198 198 - rataxes
199 199 + babar
200 200 This is a noop statement so that
201 201 this test is still more bytes than success.
202 202
203 203 ERROR: test-failure.t output changed
204 204 !
205 205 Failed test-failure.t: output changed
206 206 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
207 207 python hash seed: * (glob)
208 208 [1]
209 209
210 210 Verify that when a process fails to start we show a useful message
211 211 ==================================================================
212 212
213 213 $ cat > test-serve-fail.t <<EOF
214 214 > $ echo 'abort: child process failed to start blah'
215 215 > EOF
216 216 $ rt test-serve-fail.t
217 217
218 218 ERROR: test-serve-fail.t output changed
219 219 !
220 220 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
221 221 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
222 222 python hash seed: * (glob)
223 223 [1]
224 224 $ rm test-serve-fail.t
225 225
226 226 Verify that we can try other ports
227 227 ===================================
228 228 $ hg init inuse
229 229 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
230 230 $ cat blocks.pid >> $DAEMON_PIDS
231 231 $ cat > test-serve-inuse.t <<EOF
232 232 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
233 233 > $ cat hg.pid >> \$DAEMON_PIDS
234 234 > EOF
235 235 $ rt test-serve-inuse.t
236 236 .
237 237 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
238 238 $ rm test-serve-inuse.t
239 239
240 240 Running In Debug Mode
241 241 ======================
242 242
243 243 $ rt --debug 2>&1 | grep -v pwd
244 244 + echo *SALT* 0 0 (glob)
245 245 *SALT* 0 0 (glob)
246 246 + echo babar
247 247 babar
248 248 + echo *SALT* 4 0 (glob)
249 249 *SALT* 4 0 (glob)
250 250 *+ echo *SALT* 0 0 (glob)
251 251 *SALT* 0 0 (glob)
252 252 + echo babar
253 253 babar
254 254 + echo *SALT* 2 0 (glob)
255 255 *SALT* 2 0 (glob)
256 256 + echo xyzzy
257 257 xyzzy
258 258 + echo *SALT* 6 0 (glob)
259 259 *SALT* 6 0 (glob)
260 260 .
261 261 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
262 262
263 263 Parallel runs
264 264 ==============
265 265
266 266 (duplicate the failing test to get predictable output)
267 267 $ cp test-failure.t test-failure-copy.t
268 268
269 269 $ rt --jobs 2 test-failure*.t -n
270 270 !!
271 271 Failed test-failure*.t: output changed (glob)
272 272 Failed test-failure*.t: output changed (glob)
273 273 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
274 274 python hash seed: * (glob)
275 275 [1]
276 276
277 277 failures in parallel with --first should only print one failure
278 278 >>> f = open('test-nothing.t', 'w')
279 279 >>> f.write('foo\n' * 1024) and None
280 280 >>> f.write(' $ sleep 1') and None
281 281 $ rt --jobs 2 --first
282 282
283 283 --- $TESTTMP/test-failure*.t (glob)
284 284 +++ $TESTTMP/test-failure*.t.err (glob)
285 285 @@ -1,4 +1,4 @@
286 286 $ echo babar
287 287 - rataxes
288 288 + babar
289 289 This is a noop statement so that
290 290 this test is still more bytes than success.
291 291
292 292 Failed test-failure*.t: output changed (glob)
293 293 Failed test-nothing.t: output changed
294 294 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
295 295 python hash seed: * (glob)
296 296 [1]
297 297
298 298
299 299 (delete the duplicated test file)
300 300 $ rm test-failure-copy.t test-nothing.t
301 301
302 302
303 303 Interactive run
304 304 ===============
305 305
306 306 (backup the failing test)
307 307 $ cp test-failure.t backup
308 308
309 309 Refuse the fix
310 310
311 311 $ echo 'n' | rt -i
312 312
313 313 --- $TESTTMP/test-failure.t
314 314 +++ $TESTTMP/test-failure.t.err
315 315 @@ -1,4 +1,4 @@
316 316 $ echo babar
317 317 - rataxes
318 318 + babar
319 319 This is a noop statement so that
320 320 this test is still more bytes than success.
321 321 Accept this change? [n]
322 322 ERROR: test-failure.t output changed
323 323 !.
324 324 Failed test-failure.t: output changed
325 325 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
326 326 python hash seed: * (glob)
327 327 [1]
328 328
329 329 $ cat test-failure.t
330 330 $ echo babar
331 331 rataxes
332 332 This is a noop statement so that
333 333 this test is still more bytes than success.
334 334
335 335 Interactive with custom view
336 336
337 337 $ echo 'n' | rt -i --view echo
338 338 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
339 339 Accept this change? [n]* (glob)
340 340 ERROR: test-failure.t output changed
341 341 !.
342 342 Failed test-failure.t: output changed
343 343 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
344 344 python hash seed: * (glob)
345 345 [1]
346 346
347 347 View the fix
348 348
349 349 $ echo 'y' | rt --view echo
350 350 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
351 351
352 352 ERROR: test-failure.t output changed
353 353 !.
354 354 Failed test-failure.t: output changed
355 355 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
356 356 python hash seed: * (glob)
357 357 [1]
358 358
359 359 Accept the fix
360 360
361 361 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
362 362 $ echo " saved backup bundle to \$TESTTMP/foo.hg" >> test-failure.t
363 363 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
364 364 $ echo " saved backup bundle to \$TESTTMP/foo.hg (glob)" >> test-failure.t
365 365 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
366 366 $ echo " saved backup bundle to \$TESTTMP/*.hg (glob)" >> test-failure.t
367 367 $ echo 'y' | rt -i 2>&1
368 368
369 369 --- $TESTTMP/test-failure.t
370 370 +++ $TESTTMP/test-failure.t.err
371 371 @@ -1,9 +1,9 @@
372 372 $ echo babar
373 373 - rataxes
374 374 + babar
375 375 This is a noop statement so that
376 376 this test is still more bytes than success.
377 377 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
378 378 - saved backup bundle to $TESTTMP/foo.hg
379 379 + saved backup bundle to $TESTTMP/foo.hg* (glob)
380 380 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
381 381 saved backup bundle to $TESTTMP/foo.hg* (glob)
382 382 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
383 383 Accept this change? [n] ..
384 384 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
385 385
386 386 $ sed -e 's,(glob)$,&<,g' test-failure.t
387 387 $ echo babar
388 388 babar
389 389 This is a noop statement so that
390 390 this test is still more bytes than success.
391 391 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
392 392 saved backup bundle to $TESTTMP/foo.hg (glob)<
393 393 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
394 394 saved backup bundle to $TESTTMP/foo.hg (glob)<
395 395 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
396 396 saved backup bundle to $TESTTMP/*.hg (glob)<
397 397
398 398 (reinstall)
399 399 $ mv backup test-failure.t
400 400
401 401 No Diff
402 402 ===============
403 403
404 404 $ rt --nodiff
405 405 !.
406 406 Failed test-failure.t: output changed
407 407 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
408 408 python hash seed: * (glob)
409 409 [1]
410 410
411 411 test --tmpdir support
412 412 $ rt --tmpdir=$TESTTMP/keep test-success.t
413 413
414 414 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t (glob)
415 415 Keeping threadtmp dir: $TESTTMP/keep/child1 (glob)
416 416 .
417 417 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
418 418
419 419 timeouts
420 420 ========
421 421 $ cat > test-timeout.t <<EOF
422 422 > $ sleep 2
423 423 > $ echo pass
424 424 > pass
425 425 > EOF
426 426 > echo '#require slow' > test-slow-timeout.t
427 427 > cat test-timeout.t >> test-slow-timeout.t
428 428 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
429 429 st
430 430 Skipped test-slow-timeout.t: missing feature: allow slow tests
431 431 Failed test-timeout.t: timed out
432 432 # Ran 1 tests, 1 skipped, 0 warned, 1 failed.
433 433 python hash seed: * (glob)
434 434 [1]
435 435 $ rt --timeout=1 --slowtimeout=3 \
436 436 > test-timeout.t test-slow-timeout.t --allow-slow-tests
437 437 .t
438 438 Failed test-timeout.t: timed out
439 439 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
440 440 python hash seed: * (glob)
441 441 [1]
442 442 $ rm test-timeout.t test-slow-timeout.t
443 443
444 444 test for --time
445 445 ==================
446 446
447 447 $ rt test-success.t --time
448 448 .
449 449 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
450 450 # Producing time report
451 451 start end cuser csys real Test
452 452 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
453 453
454 454 test for --time with --job enabled
455 455 ====================================
456 456
457 457 $ rt test-success.t --time --jobs 2
458 458 .
459 459 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
460 460 # Producing time report
461 461 start end cuser csys real Test
462 462 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
463 463
464 464 Skips
465 465 ================
466 466 $ cat > test-skip.t <<EOF
467 467 > $ echo xyzzy
468 468 > #require false
469 469 > EOF
470 470 $ rt --nodiff
471 471 !.s
472 472 Skipped test-skip.t: missing feature: nail clipper
473 473 Failed test-failure.t: output changed
474 474 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
475 475 python hash seed: * (glob)
476 476 [1]
477 477
478 478 $ rt --keyword xyzzy
479 479 .s
480 480 Skipped test-skip.t: missing feature: nail clipper
481 481 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
482 482
483 483 Skips with xml
484 484 $ rt --keyword xyzzy \
485 485 > --xunit=xunit.xml
486 486 .s
487 487 Skipped test-skip.t: missing feature: nail clipper
488 488 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
489 489 $ cat xunit.xml
490 490 <?xml version="1.0" encoding="utf-8"?>
491 491 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
492 492 <testcase name="test-success.t" time="*"/> (glob)
493 493 </testsuite>
494 494
495 495 Missing skips or blacklisted skips don't count as executed:
496 496 $ echo test-failure.t > blacklist
497 $ rt --blacklist=blacklist \
497 $ rt --blacklist=blacklist --json\
498 498 > test-failure.t test-bogus.t
499 499 ss
500 500 Skipped test-bogus.t: Doesn't exist
501 501 Skipped test-failure.t: blacklisted
502 502 # Ran 0 tests, 2 skipped, 0 warned, 0 failed.
503
503 $ cat report.json
504 testreport ={
505 "test-bogus.t": {
506 "result": "skip"
507 },
508 "test-failure.t": {
509 "result": "skip"
510 }
511 } (no-eol)
504 512 #if json
505 513
506 514 test for --json
507 515 ==================
508 516
509 517 $ rt --json
510 518
511 519 --- $TESTTMP/test-failure.t
512 520 +++ $TESTTMP/test-failure.t.err
513 521 @@ -1,4 +1,4 @@
514 522 $ echo babar
515 523 - rataxes
516 524 + babar
517 525 This is a noop statement so that
518 526 this test is still more bytes than success.
519 527
520 528 ERROR: test-failure.t output changed
521 529 !.s
522 530 Skipped test-skip.t: missing feature: nail clipper
523 531 Failed test-failure.t: output changed
524 532 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
525 533 python hash seed: * (glob)
526 534 [1]
527 535
528 536 $ cat report.json
529 537 testreport ={
530 538 "test-failure.t": [\{] (re)
531 539 "csys": "\s*[\d\.]{4,5}", ? (re)
532 540 "cuser": "\s*[\d\.]{4,5}", ? (re)
533 541 "diff": "---.+\+\+\+.+", ? (re)
534 542 "end": "\s*[\d\.]{4,5}", ? (re)
535 543 "result": "failure", ? (re)
536 544 "start": "\s*[\d\.]{4,5}", ? (re)
537 545 "time": "\s*[\d\.]{4,5}" (re)
538 546 }, ? (re)
539 547 "test-skip.t": {
540 548 "csys": "\s*[\d\.]{4,5}", ? (re)
541 549 "cuser": "\s*[\d\.]{4,5}", ? (re)
542 550 "diff": "", ? (re)
543 551 "end": "\s*[\d\.]{4,5}", ? (re)
544 552 "result": "skip", ? (re)
545 553 "start": "\s*[\d\.]{4,5}", ? (re)
546 554 "time": "\s*[\d\.]{4,5}" (re)
547 555 }, ? (re)
548 556 "test-success.t": [\{] (re)
549 557 "csys": "\s*[\d\.]{4,5}", ? (re)
550 558 "cuser": "\s*[\d\.]{4,5}", ? (re)
551 559 "diff": "", ? (re)
552 560 "end": "\s*[\d\.]{4,5}", ? (re)
553 561 "result": "success", ? (re)
554 562 "start": "\s*[\d\.]{4,5}", ? (re)
555 563 "time": "\s*[\d\.]{4,5}" (re)
556 564 }
557 565 } (no-eol)
558 566
559 567 Test that failed test accepted through interactive are properly reported:
560 568
561 569 $ cp test-failure.t backup
562 570 $ echo y | rt --json -i
563 571
564 572 --- $TESTTMP/test-failure.t
565 573 +++ $TESTTMP/test-failure.t.err
566 574 @@ -1,4 +1,4 @@
567 575 $ echo babar
568 576 - rataxes
569 577 + babar
570 578 This is a noop statement so that
571 579 this test is still more bytes than success.
572 580 Accept this change? [n] ..s
573 581 Skipped test-skip.t: missing feature: nail clipper
574 582 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
575 583
576 584 $ cat report.json
577 585 testreport ={
578 586 "test-failure.t": [\{] (re)
579 587 "csys": "\s*[\d\.]{4,5}", ? (re)
580 588 "cuser": "\s*[\d\.]{4,5}", ? (re)
581 589 "diff": "", ? (re)
582 590 "end": "\s*[\d\.]{4,5}", ? (re)
583 591 "result": "success", ? (re)
584 592 "start": "\s*[\d\.]{4,5}", ? (re)
585 593 "time": "\s*[\d\.]{4,5}" (re)
586 594 }, ? (re)
587 595 "test-skip.t": {
588 596 "csys": "\s*[\d\.]{4,5}", ? (re)
589 597 "cuser": "\s*[\d\.]{4,5}", ? (re)
590 598 "diff": "", ? (re)
591 599 "end": "\s*[\d\.]{4,5}", ? (re)
592 600 "result": "skip", ? (re)
593 601 "start": "\s*[\d\.]{4,5}", ? (re)
594 602 "time": "\s*[\d\.]{4,5}" (re)
595 603 }, ? (re)
596 604 "test-success.t": [\{] (re)
597 605 "csys": "\s*[\d\.]{4,5}", ? (re)
598 606 "cuser": "\s*[\d\.]{4,5}", ? (re)
599 607 "diff": "", ? (re)
600 608 "end": "\s*[\d\.]{4,5}", ? (re)
601 609 "result": "success", ? (re)
602 610 "start": "\s*[\d\.]{4,5}", ? (re)
603 611 "time": "\s*[\d\.]{4,5}" (re)
604 612 }
605 613 } (no-eol)
606 614 $ mv backup test-failure.t
607 615
608 616 #endif
609 617
610 618 backslash on end of line with glob matching is handled properly
611 619
612 620 $ cat > test-glob-backslash.t << EOF
613 621 > $ echo 'foo bar \\'
614 622 > foo * \ (glob)
615 623 > EOF
616 624
617 625 $ rt test-glob-backslash.t
618 626 .
619 627 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
620 628
621 629 $ rm -f test-glob-backslash.t
622 630
623 631 Test reusability for third party tools
624 632 ======================================
625 633
626 634 $ mkdir "$TESTTMP"/anothertests
627 635 $ cd "$TESTTMP"/anothertests
628 636
629 637 test that `run-tests.py` can execute hghave, even if it runs not in
630 638 Mercurial source tree.
631 639
632 640 $ cat > test-hghave.t <<EOF
633 641 > #require true
634 642 > $ echo foo
635 643 > foo
636 644 > EOF
637 645 $ rt $HGTEST_RUN_TESTS_PURE test-hghave.t
638 646 .
639 647 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
640 648
641 649 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
642 650 running is placed.
643 651
644 652 $ cat > test-runtestdir.t <<EOF
645 653 > - $TESTDIR, in which test-run-tests.t is placed
646 654 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
647 655 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
648 656 >
649 657 > #if windows
650 658 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
651 659 > #else
652 660 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
653 661 > #endif
654 662 > $ test "\$RUNTESTDIR" = "$TESTDIR"
655 663 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py
656 664 > #!/usr/bin/env python
657 665 > #
658 666 > # check-code - a style and portability checker for Mercurial
659 667 > EOF
660 668 $ rt $HGTEST_RUN_TESTS_PURE test-runtestdir.t
661 669 .
662 670 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
663 671
664 672 #if execbit
665 673
666 674 test that TESTDIR is referred in PATH
667 675
668 676 $ cat > custom-command.sh <<EOF
669 677 > #!/bin/sh
670 678 > echo "hello world"
671 679 > EOF
672 680 $ chmod +x custom-command.sh
673 681 $ cat > test-testdir-path.t <<EOF
674 682 > $ custom-command.sh
675 683 > hello world
676 684 > EOF
677 685 $ rt $HGTEST_RUN_TESTS_PURE test-testdir-path.t
678 686 .
679 687 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
680 688
681 689 #endif
682 690
683 691 test support for --allow-slow-tests
684 692 $ cat > test-very-slow-test.t <<EOF
685 693 > #require slow
686 694 > $ echo pass
687 695 > pass
688 696 > EOF
689 697 $ rt $HGTEST_RUN_TESTS_PURE test-very-slow-test.t
690 698 s
691 699 Skipped test-very-slow-test.t: missing feature: allow slow tests
692 700 # Ran 0 tests, 1 skipped, 0 warned, 0 failed.
693 701 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
694 702 .
695 703 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now