##// END OF EJS Templates
run-tests: fix get port to try differing ports...
timeless -
r27602:67aa88e0 default
parent child Browse files
Show More
@@ -1,2322 +1,2322
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import print_function
47 47
48 48 from distutils import version
49 49 import difflib
50 50 import errno
51 51 import optparse
52 52 import os
53 53 import shutil
54 54 import subprocess
55 55 import signal
56 56 import socket
57 57 import sys
58 58 import tempfile
59 59 import time
60 60 import random
61 61 import re
62 62 import threading
63 63 import killdaemons as killmod
64 64 try:
65 65 import Queue as queue
66 66 except ImportError:
67 67 import queue
68 68 from xml.dom import minidom
69 69 import unittest
70 70
71 71 osenvironb = getattr(os, 'environb', os.environ)
72 72
73 73 try:
74 74 import json
75 75 except ImportError:
76 76 try:
77 77 import simplejson as json
78 78 except ImportError:
79 79 json = None
80 80
81 81 processlock = threading.Lock()
82 82
83 83 if sys.version_info > (3, 5, 0):
84 84 PYTHON3 = True
85 85 xrange = range # we use xrange in one place, and we'd rather not use range
86 86 def _bytespath(p):
87 87 return p.encode('utf-8')
88 88
89 89 def _strpath(p):
90 90 return p.decode('utf-8')
91 91
92 92 elif sys.version_info >= (3, 0, 0):
93 93 print('%s is only supported on Python 3.5+ and 2.6-2.7, not %s' %
94 94 (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3])))
95 95 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
96 96 else:
97 97 PYTHON3 = False
98 98
99 99 # In python 2.x, path operations are generally done using
100 100 # bytestrings by default, so we don't have to do any extra
101 101 # fiddling there. We define the wrapper functions anyway just to
102 102 # help keep code consistent between platforms.
103 103 def _bytespath(p):
104 104 return p
105 105
106 106 _strpath = _bytespath
107 107
108 108 # For Windows support
109 109 wifexited = getattr(os, "WIFEXITED", lambda x: False)
110 110
111 111 def checkportisavailable(port):
112 112 """return true if a port seems free to bind on localhost"""
113 113 try:
114 114 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
115 115 s.bind(('localhost', port))
116 116 s.close()
117 117 return True
118 118 except socket.error as exc:
119 119 if not exc.errno == errno.EADDRINUSE:
120 120 raise
121 121 return False
122 122
123 123 closefds = os.name == 'posix'
124 124 def Popen4(cmd, wd, timeout, env=None):
125 125 processlock.acquire()
126 126 p = subprocess.Popen(cmd, shell=True, bufsize=-1, cwd=wd, env=env,
127 127 close_fds=closefds,
128 128 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
129 129 stderr=subprocess.STDOUT)
130 130 processlock.release()
131 131
132 132 p.fromchild = p.stdout
133 133 p.tochild = p.stdin
134 134 p.childerr = p.stderr
135 135
136 136 p.timeout = False
137 137 if timeout:
138 138 def t():
139 139 start = time.time()
140 140 while time.time() - start < timeout and p.returncode is None:
141 141 time.sleep(.1)
142 142 p.timeout = True
143 143 if p.returncode is None:
144 144 terminate(p)
145 145 threading.Thread(target=t).start()
146 146
147 147 return p
148 148
149 149 PYTHON = _bytespath(sys.executable.replace('\\', '/'))
150 150 IMPL_PATH = b'PYTHONPATH'
151 151 if 'java' in sys.platform:
152 152 IMPL_PATH = b'JYTHONPATH'
153 153
154 154 defaults = {
155 155 'jobs': ('HGTEST_JOBS', 1),
156 156 'timeout': ('HGTEST_TIMEOUT', 180),
157 157 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 500),
158 158 'port': ('HGTEST_PORT', 20059),
159 159 'shell': ('HGTEST_SHELL', 'sh'),
160 160 }
161 161
162 162 def parselistfiles(files, listtype, warn=True):
163 163 entries = dict()
164 164 for filename in files:
165 165 try:
166 166 path = os.path.expanduser(os.path.expandvars(filename))
167 167 f = open(path, "rb")
168 168 except IOError as err:
169 169 if err.errno != errno.ENOENT:
170 170 raise
171 171 if warn:
172 172 print("warning: no such %s file: %s" % (listtype, filename))
173 173 continue
174 174
175 175 for line in f.readlines():
176 176 line = line.split(b'#', 1)[0].strip()
177 177 if line:
178 178 entries[line] = filename
179 179
180 180 f.close()
181 181 return entries
182 182
183 183 def getparser():
184 184 """Obtain the OptionParser used by the CLI."""
185 185 parser = optparse.OptionParser("%prog [options] [tests]")
186 186
187 187 # keep these sorted
188 188 parser.add_option("--blacklist", action="append",
189 189 help="skip tests listed in the specified blacklist file")
190 190 parser.add_option("--whitelist", action="append",
191 191 help="always run tests listed in the specified whitelist file")
192 192 parser.add_option("--changed", type="string",
193 193 help="run tests that are changed in parent rev or working directory")
194 194 parser.add_option("-C", "--annotate", action="store_true",
195 195 help="output files annotated with coverage")
196 196 parser.add_option("-c", "--cover", action="store_true",
197 197 help="print a test coverage report")
198 198 parser.add_option("-d", "--debug", action="store_true",
199 199 help="debug mode: write output of test scripts to console"
200 200 " rather than capturing and diffing it (disables timeout)")
201 201 parser.add_option("-f", "--first", action="store_true",
202 202 help="exit on the first test failure")
203 203 parser.add_option("-H", "--htmlcov", action="store_true",
204 204 help="create an HTML report of the coverage of the files")
205 205 parser.add_option("-i", "--interactive", action="store_true",
206 206 help="prompt to accept changed output")
207 207 parser.add_option("-j", "--jobs", type="int",
208 208 help="number of jobs to run in parallel"
209 209 " (default: $%s or %d)" % defaults['jobs'])
210 210 parser.add_option("--keep-tmpdir", action="store_true",
211 211 help="keep temporary directory after running tests")
212 212 parser.add_option("-k", "--keywords",
213 213 help="run tests matching keywords")
214 214 parser.add_option("-l", "--local", action="store_true",
215 215 help="shortcut for --with-hg=<testdir>/../hg")
216 216 parser.add_option("--loop", action="store_true",
217 217 help="loop tests repeatedly")
218 218 parser.add_option("--runs-per-test", type="int", dest="runs_per_test",
219 219 help="run each test N times (default=1)", default=1)
220 220 parser.add_option("-n", "--nodiff", action="store_true",
221 221 help="skip showing test changes")
222 222 parser.add_option("-p", "--port", type="int",
223 223 help="port on which servers should listen"
224 224 " (default: $%s or %d)" % defaults['port'])
225 225 parser.add_option("--compiler", type="string",
226 226 help="compiler to build with")
227 227 parser.add_option("--pure", action="store_true",
228 228 help="use pure Python code instead of C extensions")
229 229 parser.add_option("-R", "--restart", action="store_true",
230 230 help="restart at last error")
231 231 parser.add_option("-r", "--retest", action="store_true",
232 232 help="retest failed tests")
233 233 parser.add_option("-S", "--noskips", action="store_true",
234 234 help="don't report skip tests verbosely")
235 235 parser.add_option("--shell", type="string",
236 236 help="shell to use (default: $%s or %s)" % defaults['shell'])
237 237 parser.add_option("-t", "--timeout", type="int",
238 238 help="kill errant tests after TIMEOUT seconds"
239 239 " (default: $%s or %d)" % defaults['timeout'])
240 240 parser.add_option("--slowtimeout", type="int",
241 241 help="kill errant slow tests after SLOWTIMEOUT seconds"
242 242 " (default: $%s or %d)" % defaults['slowtimeout'])
243 243 parser.add_option("--time", action="store_true",
244 244 help="time how long each test takes")
245 245 parser.add_option("--json", action="store_true",
246 246 help="store test result data in 'report.json' file")
247 247 parser.add_option("--tmpdir", type="string",
248 248 help="run tests in the given temporary directory"
249 249 " (implies --keep-tmpdir)")
250 250 parser.add_option("-v", "--verbose", action="store_true",
251 251 help="output verbose messages")
252 252 parser.add_option("--xunit", type="string",
253 253 help="record xunit results at specified path")
254 254 parser.add_option("--view", type="string",
255 255 help="external diff viewer")
256 256 parser.add_option("--with-hg", type="string",
257 257 metavar="HG",
258 258 help="test using specified hg script rather than a "
259 259 "temporary installation")
260 260 parser.add_option("-3", "--py3k-warnings", action="store_true",
261 261 help="enable Py3k warnings on Python 2.6+")
262 262 parser.add_option('--extra-config-opt', action="append",
263 263 help='set the given config opt in the test hgrc')
264 264 parser.add_option('--random', action="store_true",
265 265 help='run tests in random order')
266 266 parser.add_option('--profile-runner', action='store_true',
267 267 help='run statprof on run-tests')
268 268 parser.add_option('--allow-slow-tests', action='store_true',
269 269 help='allow extremely slow tests')
270 270 parser.add_option('--showchannels', action='store_true',
271 271 help='show scheduling channels')
272 272
273 273 for option, (envvar, default) in defaults.items():
274 274 defaults[option] = type(default)(os.environ.get(envvar, default))
275 275 parser.set_defaults(**defaults)
276 276
277 277 return parser
278 278
279 279 def parseargs(args, parser):
280 280 """Parse arguments with our OptionParser and validate results."""
281 281 (options, args) = parser.parse_args(args)
282 282
283 283 # jython is always pure
284 284 if 'java' in sys.platform or '__pypy__' in sys.modules:
285 285 options.pure = True
286 286
287 287 if options.with_hg:
288 288 options.with_hg = os.path.expanduser(options.with_hg)
289 289 if not (os.path.isfile(options.with_hg) and
290 290 os.access(options.with_hg, os.X_OK)):
291 291 parser.error('--with-hg must specify an executable hg script')
292 292 if not os.path.basename(options.with_hg) == 'hg':
293 293 sys.stderr.write('warning: --with-hg should specify an hg script\n')
294 294 if options.local:
295 295 testdir = os.path.dirname(_bytespath(os.path.realpath(sys.argv[0])))
296 296 hgbin = os.path.join(os.path.dirname(testdir), b'hg')
297 297 if os.name != 'nt' and not os.access(hgbin, os.X_OK):
298 298 parser.error('--local specified, but %r not found or not executable'
299 299 % hgbin)
300 300 options.with_hg = hgbin
301 301
302 302 options.anycoverage = options.cover or options.annotate or options.htmlcov
303 303 if options.anycoverage:
304 304 try:
305 305 import coverage
306 306 covver = version.StrictVersion(coverage.__version__).version
307 307 if covver < (3, 3):
308 308 parser.error('coverage options require coverage 3.3 or later')
309 309 except ImportError:
310 310 parser.error('coverage options now require the coverage package')
311 311
312 312 if options.anycoverage and options.local:
313 313 # this needs some path mangling somewhere, I guess
314 314 parser.error("sorry, coverage options do not work when --local "
315 315 "is specified")
316 316
317 317 if options.anycoverage and options.with_hg:
318 318 parser.error("sorry, coverage options do not work when --with-hg "
319 319 "is specified")
320 320
321 321 global verbose
322 322 if options.verbose:
323 323 verbose = ''
324 324
325 325 if options.tmpdir:
326 326 options.tmpdir = os.path.expanduser(options.tmpdir)
327 327
328 328 if options.jobs < 1:
329 329 parser.error('--jobs must be positive')
330 330 if options.interactive and options.debug:
331 331 parser.error("-i/--interactive and -d/--debug are incompatible")
332 332 if options.debug:
333 333 if options.timeout != defaults['timeout']:
334 334 sys.stderr.write(
335 335 'warning: --timeout option ignored with --debug\n')
336 336 if options.slowtimeout != defaults['slowtimeout']:
337 337 sys.stderr.write(
338 338 'warning: --slowtimeout option ignored with --debug\n')
339 339 options.timeout = 0
340 340 options.slowtimeout = 0
341 341 if options.py3k_warnings:
342 342 if PYTHON3:
343 343 parser.error(
344 344 '--py3k-warnings can only be used on Python 2.6 and 2.7')
345 345 if options.blacklist:
346 346 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
347 347 if options.whitelist:
348 348 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
349 349 else:
350 350 options.whitelisted = {}
351 351
352 352 if options.showchannels:
353 353 options.nodiff = True
354 354
355 355 return (options, args)
356 356
357 357 def rename(src, dst):
358 358 """Like os.rename(), trade atomicity and opened files friendliness
359 359 for existing destination support.
360 360 """
361 361 shutil.copy(src, dst)
362 362 os.remove(src)
363 363
364 364 _unified_diff = difflib.unified_diff
365 365 if PYTHON3:
366 366 import functools
367 367 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
368 368
369 369 def getdiff(expected, output, ref, err):
370 370 servefail = False
371 371 lines = []
372 372 for line in _unified_diff(expected, output, ref, err):
373 373 if line.startswith(b'+++') or line.startswith(b'---'):
374 374 line = line.replace(b'\\', b'/')
375 375 if line.endswith(b' \n'):
376 376 line = line[:-2] + b'\n'
377 377 lines.append(line)
378 378 if not servefail and line.startswith(
379 379 b'+ abort: child process failed to start'):
380 380 servefail = True
381 381
382 382 return servefail, lines
383 383
384 384 verbose = False
385 385 def vlog(*msg):
386 386 """Log only when in verbose mode."""
387 387 if verbose is False:
388 388 return
389 389
390 390 return log(*msg)
391 391
392 392 # Bytes that break XML even in a CDATA block: control characters 0-31
393 393 # sans \t, \n and \r
394 394 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
395 395
396 396 def cdatasafe(data):
397 397 """Make a string safe to include in a CDATA block.
398 398
399 399 Certain control characters are illegal in a CDATA block, and
400 400 there's no way to include a ]]> in a CDATA either. This function
401 401 replaces illegal bytes with ? and adds a space between the ]] so
402 402 that it won't break the CDATA block.
403 403 """
404 404 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
405 405
406 406 def log(*msg):
407 407 """Log something to stdout.
408 408
409 409 Arguments are strings to print.
410 410 """
411 411 with iolock:
412 412 if verbose:
413 413 print(verbose, end=' ')
414 414 for m in msg:
415 415 print(m, end=' ')
416 416 print()
417 417 sys.stdout.flush()
418 418
419 419 def terminate(proc):
420 420 """Terminate subprocess (with fallback for Python versions < 2.6)"""
421 421 vlog('# Terminating process %d' % proc.pid)
422 422 try:
423 423 getattr(proc, 'terminate', lambda : os.kill(proc.pid, signal.SIGTERM))()
424 424 except OSError:
425 425 pass
426 426
427 427 def killdaemons(pidfile):
428 428 return killmod.killdaemons(pidfile, tryhard=False, remove=True,
429 429 logfn=vlog)
430 430
431 431 class Test(unittest.TestCase):
432 432 """Encapsulates a single, runnable test.
433 433
434 434 While this class conforms to the unittest.TestCase API, it differs in that
435 435 instances need to be instantiated manually. (Typically, unittest.TestCase
436 436 classes are instantiated automatically by scanning modules.)
437 437 """
438 438
439 439 # Status code reserved for skipped tests (used by hghave).
440 440 SKIPPED_STATUS = 80
441 441
442 442 def __init__(self, path, tmpdir, keeptmpdir=False,
443 443 debug=False,
444 444 timeout=defaults['timeout'],
445 445 startport=defaults['port'], extraconfigopts=None,
446 446 py3kwarnings=False, shell=None,
447 447 slowtimeout=defaults['slowtimeout']):
448 448 """Create a test from parameters.
449 449
450 450 path is the full path to the file defining the test.
451 451
452 452 tmpdir is the main temporary directory to use for this test.
453 453
454 454 keeptmpdir determines whether to keep the test's temporary directory
455 455 after execution. It defaults to removal (False).
456 456
457 457 debug mode will make the test execute verbosely, with unfiltered
458 458 output.
459 459
460 460 timeout controls the maximum run time of the test. It is ignored when
461 461 debug is True. See slowtimeout for tests with #require slow.
462 462
463 463 slowtimeout overrides timeout if the test has #require slow.
464 464
465 465 startport controls the starting port number to use for this test. Each
466 466 test will reserve 3 port numbers for execution. It is the caller's
467 467 responsibility to allocate a non-overlapping port range to Test
468 468 instances.
469 469
470 470 extraconfigopts is an iterable of extra hgrc config options. Values
471 471 must have the form "key=value" (something understood by hgrc). Values
472 472 of the form "foo.key=value" will result in "[foo] key=value".
473 473
474 474 py3kwarnings enables Py3k warnings.
475 475
476 476 shell is the shell to execute tests in.
477 477 """
478 478 self.path = path
479 479 self.bname = os.path.basename(path)
480 480 self.name = _strpath(self.bname)
481 481 self._testdir = os.path.dirname(path)
482 482 self.errpath = os.path.join(self._testdir, b'%s.err' % self.bname)
483 483
484 484 self._threadtmp = tmpdir
485 485 self._keeptmpdir = keeptmpdir
486 486 self._debug = debug
487 487 self._timeout = timeout
488 488 self._slowtimeout = slowtimeout
489 489 self._startport = startport
490 490 self._extraconfigopts = extraconfigopts or []
491 491 self._py3kwarnings = py3kwarnings
492 492 self._shell = _bytespath(shell)
493 493
494 494 self._aborted = False
495 495 self._daemonpids = []
496 496 self._finished = None
497 497 self._ret = None
498 498 self._out = None
499 499 self._skipped = None
500 500 self._testtmp = None
501 501
502 502 # If we're not in --debug mode and reference output file exists,
503 503 # check test output against it.
504 504 if debug:
505 505 self._refout = None # to match "out is None"
506 506 elif os.path.exists(self.refpath):
507 507 f = open(self.refpath, 'rb')
508 508 self._refout = f.read().splitlines(True)
509 509 f.close()
510 510 else:
511 511 self._refout = []
512 512
513 513 # needed to get base class __repr__ running
514 514 @property
515 515 def _testMethodName(self):
516 516 return self.name
517 517
518 518 def __str__(self):
519 519 return self.name
520 520
521 521 def shortDescription(self):
522 522 return self.name
523 523
524 524 def setUp(self):
525 525 """Tasks to perform before run()."""
526 526 self._finished = False
527 527 self._ret = None
528 528 self._out = None
529 529 self._skipped = None
530 530
531 531 try:
532 532 os.mkdir(self._threadtmp)
533 533 except OSError as e:
534 534 if e.errno != errno.EEXIST:
535 535 raise
536 536
537 537 self._testtmp = os.path.join(self._threadtmp,
538 538 os.path.basename(self.path))
539 539 os.mkdir(self._testtmp)
540 540
541 541 # Remove any previous output files.
542 542 if os.path.exists(self.errpath):
543 543 try:
544 544 os.remove(self.errpath)
545 545 except OSError as e:
546 546 # We might have raced another test to clean up a .err
547 547 # file, so ignore ENOENT when removing a previous .err
548 548 # file.
549 549 if e.errno != errno.ENOENT:
550 550 raise
551 551
552 552 def run(self, result):
553 553 """Run this test and report results against a TestResult instance."""
554 554 # This function is extremely similar to unittest.TestCase.run(). Once
555 555 # we require Python 2.7 (or at least its version of unittest), this
556 556 # function can largely go away.
557 557 self._result = result
558 558 result.startTest(self)
559 559 try:
560 560 try:
561 561 self.setUp()
562 562 except (KeyboardInterrupt, SystemExit):
563 563 self._aborted = True
564 564 raise
565 565 except Exception:
566 566 result.addError(self, sys.exc_info())
567 567 return
568 568
569 569 success = False
570 570 try:
571 571 self.runTest()
572 572 except KeyboardInterrupt:
573 573 self._aborted = True
574 574 raise
575 575 except SkipTest as e:
576 576 result.addSkip(self, str(e))
577 577 # The base class will have already counted this as a
578 578 # test we "ran", but we want to exclude skipped tests
579 579 # from those we count towards those run.
580 580 result.testsRun -= 1
581 581 except IgnoreTest as e:
582 582 result.addIgnore(self, str(e))
583 583 # As with skips, ignores also should be excluded from
584 584 # the number of tests executed.
585 585 result.testsRun -= 1
586 586 except WarnTest as e:
587 587 result.addWarn(self, str(e))
588 588 except ReportedTest as e:
589 589 pass
590 590 except self.failureException as e:
591 591 # This differs from unittest in that we don't capture
592 592 # the stack trace. This is for historical reasons and
593 593 # this decision could be revisited in the future,
594 594 # especially for PythonTest instances.
595 595 if result.addFailure(self, str(e)):
596 596 success = True
597 597 except Exception:
598 598 result.addError(self, sys.exc_info())
599 599 else:
600 600 success = True
601 601
602 602 try:
603 603 self.tearDown()
604 604 except (KeyboardInterrupt, SystemExit):
605 605 self._aborted = True
606 606 raise
607 607 except Exception:
608 608 result.addError(self, sys.exc_info())
609 609 success = False
610 610
611 611 if success:
612 612 result.addSuccess(self)
613 613 finally:
614 614 result.stopTest(self, interrupted=self._aborted)
615 615
616 616 def runTest(self):
617 617 """Run this test instance.
618 618
619 619 This will return a tuple describing the result of the test.
620 620 """
621 621 env = self._getenv()
622 622 self._daemonpids.append(env['DAEMON_PIDS'])
623 623 self._createhgrc(env['HGRCPATH'])
624 624
625 625 vlog('# Test', self.name)
626 626
627 627 ret, out = self._run(env)
628 628 self._finished = True
629 629 self._ret = ret
630 630 self._out = out
631 631
632 632 def describe(ret):
633 633 if ret < 0:
634 634 return 'killed by signal: %d' % -ret
635 635 return 'returned error code %d' % ret
636 636
637 637 self._skipped = False
638 638
639 639 if ret == self.SKIPPED_STATUS:
640 640 if out is None: # Debug mode, nothing to parse.
641 641 missing = ['unknown']
642 642 failed = None
643 643 else:
644 644 missing, failed = TTest.parsehghaveoutput(out)
645 645
646 646 if not missing:
647 647 missing = ['skipped']
648 648
649 649 if failed:
650 650 self.fail('hg have failed checking for %s' % failed[-1])
651 651 else:
652 652 self._skipped = True
653 653 raise SkipTest(missing[-1])
654 654 elif ret == 'timeout':
655 655 self.fail('timed out')
656 656 elif ret is False:
657 657 raise WarnTest('no result code from test')
658 658 elif out != self._refout:
659 659 # Diff generation may rely on written .err file.
660 660 if (ret != 0 or out != self._refout) and not self._skipped \
661 661 and not self._debug:
662 662 f = open(self.errpath, 'wb')
663 663 for line in out:
664 664 f.write(line)
665 665 f.close()
666 666
667 667 # The result object handles diff calculation for us.
668 668 if self._result.addOutputMismatch(self, ret, out, self._refout):
669 669 # change was accepted, skip failing
670 670 return
671 671
672 672 if ret:
673 673 msg = 'output changed and ' + describe(ret)
674 674 else:
675 675 msg = 'output changed'
676 676
677 677 self.fail(msg)
678 678 elif ret:
679 679 self.fail(describe(ret))
680 680
681 681 def tearDown(self):
682 682 """Tasks to perform after run()."""
683 683 for entry in self._daemonpids:
684 684 killdaemons(entry)
685 685 self._daemonpids = []
686 686
687 687 if self._keeptmpdir:
688 688 log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' %
689 689 (self._testtmp, self._threadtmp))
690 690 else:
691 691 shutil.rmtree(self._testtmp, True)
692 692 shutil.rmtree(self._threadtmp, True)
693 693
694 694 if (self._ret != 0 or self._out != self._refout) and not self._skipped \
695 695 and not self._debug and self._out:
696 696 f = open(self.errpath, 'wb')
697 697 for line in self._out:
698 698 f.write(line)
699 699 f.close()
700 700
701 701 vlog("# Ret was:", self._ret, '(%s)' % self.name)
702 702
703 703 def _run(self, env):
704 704 # This should be implemented in child classes to run tests.
705 705 raise SkipTest('unknown test type')
706 706
707 707 def abort(self):
708 708 """Terminate execution of this test."""
709 709 self._aborted = True
710 710
711 711 def _getreplacements(self):
712 712 """Obtain a mapping of text replacements to apply to test output.
713 713
714 714 Test output needs to be normalized so it can be compared to expected
715 715 output. This function defines how some of that normalization will
716 716 occur.
717 717 """
718 718 r = [
719 719 (br':%d\b' % self._startport, b':$HGPORT'),
720 720 (br':%d\b' % (self._startport + 1), b':$HGPORT1'),
721 721 (br':%d\b' % (self._startport + 2), b':$HGPORT2'),
722 722 (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$',
723 723 br'\1 (glob)'),
724 724 ]
725 725
726 726 if os.name == 'nt':
727 727 r.append(
728 728 (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or
729 729 c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c
730 730 for c in self._testtmp), b'$TESTTMP'))
731 731 else:
732 732 r.append((re.escape(self._testtmp), b'$TESTTMP'))
733 733
734 734 return r
735 735
736 736 def _getenv(self):
737 737 """Obtain environment variables to use during test execution."""
738 738 env = os.environ.copy()
739 739 env['TESTTMP'] = self._testtmp
740 740 env['HOME'] = self._testtmp
741 741 env["HGPORT"] = str(self._startport)
742 742 env["HGPORT1"] = str(self._startport + 1)
743 743 env["HGPORT2"] = str(self._startport + 2)
744 744 env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc')
745 745 env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids')
746 746 env["HGEDITOR"] = ('"' + sys.executable + '"'
747 747 + ' -c "import sys; sys.exit(0)"')
748 748 env["HGMERGE"] = "internal:merge"
749 749 env["HGUSER"] = "test"
750 750 env["HGENCODING"] = "ascii"
751 751 env["HGENCODINGMODE"] = "strict"
752 752
753 753 # Reset some environment variables to well-known values so that
754 754 # the tests produce repeatable output.
755 755 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
756 756 env['TZ'] = 'GMT'
757 757 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
758 758 env['COLUMNS'] = '80'
759 759 env['TERM'] = 'xterm'
760 760
761 761 for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
762 762 'NO_PROXY').split():
763 763 if k in env:
764 764 del env[k]
765 765
766 766 # unset env related to hooks
767 767 for k in env.keys():
768 768 if k.startswith('HG_'):
769 769 del env[k]
770 770
771 771 return env
772 772
773 773 def _createhgrc(self, path):
774 774 """Create an hgrc file for this test."""
775 775 hgrc = open(path, 'wb')
776 776 hgrc.write(b'[ui]\n')
777 777 hgrc.write(b'slash = True\n')
778 778 hgrc.write(b'interactive = False\n')
779 779 hgrc.write(b'mergemarkers = detailed\n')
780 780 hgrc.write(b'promptecho = True\n')
781 781 hgrc.write(b'[defaults]\n')
782 782 hgrc.write(b'backout = -d "0 0"\n')
783 783 hgrc.write(b'commit = -d "0 0"\n')
784 784 hgrc.write(b'shelve = --date "0 0"\n')
785 785 hgrc.write(b'tag = -d "0 0"\n')
786 786 hgrc.write(b'[devel]\n')
787 787 hgrc.write(b'all-warnings = true\n')
788 788 hgrc.write(b'[largefiles]\n')
789 789 hgrc.write(b'usercache = %s\n' %
790 790 (os.path.join(self._testtmp, b'.cache/largefiles')))
791 791
792 792 for opt in self._extraconfigopts:
793 793 section, key = opt.split('.', 1)
794 794 assert '=' in key, ('extra config opt %s must '
795 795 'have an = for assignment' % opt)
796 796 hgrc.write(b'[%s]\n%s\n' % (section, key))
797 797 hgrc.close()
798 798
799 799 def fail(self, msg):
800 800 # unittest differentiates between errored and failed.
801 801 # Failed is denoted by AssertionError (by default at least).
802 802 raise AssertionError(msg)
803 803
804 804 def _runcommand(self, cmd, env, normalizenewlines=False):
805 805 """Run command in a sub-process, capturing the output (stdout and
806 806 stderr).
807 807
808 808 Return a tuple (exitcode, output). output is None in debug mode.
809 809 """
810 810 if self._debug:
811 811 proc = subprocess.Popen(cmd, shell=True, cwd=self._testtmp,
812 812 env=env)
813 813 ret = proc.wait()
814 814 return (ret, None)
815 815
816 816 proc = Popen4(cmd, self._testtmp, self._timeout, env)
817 817 def cleanup():
818 818 terminate(proc)
819 819 ret = proc.wait()
820 820 if ret == 0:
821 821 ret = signal.SIGTERM << 8
822 822 killdaemons(env['DAEMON_PIDS'])
823 823 return ret
824 824
825 825 output = ''
826 826 proc.tochild.close()
827 827
828 828 try:
829 829 output = proc.fromchild.read()
830 830 except KeyboardInterrupt:
831 831 vlog('# Handling keyboard interrupt')
832 832 cleanup()
833 833 raise
834 834
835 835 ret = proc.wait()
836 836 if wifexited(ret):
837 837 ret = os.WEXITSTATUS(ret)
838 838
839 839 if proc.timeout:
840 840 ret = 'timeout'
841 841
842 842 if ret:
843 843 killdaemons(env['DAEMON_PIDS'])
844 844
845 845 for s, r in self._getreplacements():
846 846 output = re.sub(s, r, output)
847 847
848 848 if normalizenewlines:
849 849 output = output.replace('\r\n', '\n')
850 850
851 851 return ret, output.splitlines(True)
852 852
853 853 class PythonTest(Test):
854 854 """A Python-based test."""
855 855
856 856 @property
857 857 def refpath(self):
858 858 return os.path.join(self._testdir, b'%s.out' % self.bname)
859 859
860 860 def _run(self, env):
861 861 py3kswitch = self._py3kwarnings and b' -3' or b''
862 862 cmd = b'%s%s "%s"' % (PYTHON, py3kswitch, self.path)
863 863 vlog("# Running", cmd)
864 864 normalizenewlines = os.name == 'nt'
865 865 result = self._runcommand(cmd, env,
866 866 normalizenewlines=normalizenewlines)
867 867 if self._aborted:
868 868 raise KeyboardInterrupt()
869 869
870 870 return result
871 871
872 872 # This script may want to drop globs from lines matching these patterns on
873 873 # Windows, but check-code.py wants a glob on these lines unconditionally. Don't
874 874 # warn if that is the case for anything matching these lines.
875 875 checkcodeglobpats = [
876 876 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
877 877 re.compile(br'^moving \S+/.*[^)]$'),
878 878 re.compile(br'^pulling from \$TESTTMP/.*[^)]$')
879 879 ]
880 880
881 881 bchr = chr
882 882 if PYTHON3:
883 883 bchr = lambda x: bytes([x])
884 884
885 885 class TTest(Test):
886 886 """A "t test" is a test backed by a .t file."""
887 887
888 888 SKIPPED_PREFIX = 'skipped: '
889 889 FAILED_PREFIX = 'hghave check failed: '
890 890 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
891 891
892 892 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
893 893 ESCAPEMAP = dict((bchr(i), br'\x%02x' % i) for i in range(256))
894 894 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
895 895
896 896 @property
897 897 def refpath(self):
898 898 return os.path.join(self._testdir, self.bname)
899 899
900 900 def _run(self, env):
901 901 f = open(self.path, 'rb')
902 902 lines = f.readlines()
903 903 f.close()
904 904
905 905 salt, script, after, expected = self._parsetest(lines)
906 906
907 907 # Write out the generated script.
908 908 fname = b'%s.sh' % self._testtmp
909 909 f = open(fname, 'wb')
910 910 for l in script:
911 911 f.write(l)
912 912 f.close()
913 913
914 914 cmd = b'%s "%s"' % (self._shell, fname)
915 915 vlog("# Running", cmd)
916 916
917 917 exitcode, output = self._runcommand(cmd, env)
918 918
919 919 if self._aborted:
920 920 raise KeyboardInterrupt()
921 921
922 922 # Do not merge output if skipped. Return hghave message instead.
923 923 # Similarly, with --debug, output is None.
924 924 if exitcode == self.SKIPPED_STATUS or output is None:
925 925 return exitcode, output
926 926
927 927 return self._processoutput(exitcode, output, salt, after, expected)
928 928
929 929 def _hghave(self, reqs):
930 930 # TODO do something smarter when all other uses of hghave are gone.
931 931 runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__)))
932 932 tdir = runtestdir.replace(b'\\', b'/')
933 933 proc = Popen4(b'%s -c "%s/hghave %s"' %
934 934 (self._shell, tdir, b' '.join(reqs)),
935 935 self._testtmp, 0, self._getenv())
936 936 stdout, stderr = proc.communicate()
937 937 ret = proc.wait()
938 938 if wifexited(ret):
939 939 ret = os.WEXITSTATUS(ret)
940 940 if ret == 2:
941 941 print(stdout)
942 942 sys.exit(1)
943 943
944 944 if ret != 0:
945 945 return False, stdout
946 946
947 947 if 'slow' in reqs:
948 948 self._timeout = self._slowtimeout
949 949 return True, None
950 950
951 951 def _parsetest(self, lines):
952 952 # We generate a shell script which outputs unique markers to line
953 953 # up script results with our source. These markers include input
954 954 # line number and the last return code.
955 955 salt = b"SALT%d" % time.time()
956 956 def addsalt(line, inpython):
957 957 if inpython:
958 958 script.append(b'%s %d 0\n' % (salt, line))
959 959 else:
960 960 script.append(b'echo %s %d $?\n' % (salt, line))
961 961
962 962 script = []
963 963
964 964 # After we run the shell script, we re-unify the script output
965 965 # with non-active parts of the source, with synchronization by our
966 966 # SALT line number markers. The after table contains the non-active
967 967 # components, ordered by line number.
968 968 after = {}
969 969
970 970 # Expected shell script output.
971 971 expected = {}
972 972
973 973 pos = prepos = -1
974 974
975 975 # True or False when in a true or false conditional section
976 976 skipping = None
977 977
978 978 # We keep track of whether or not we're in a Python block so we
979 979 # can generate the surrounding doctest magic.
980 980 inpython = False
981 981
982 982 if self._debug:
983 983 script.append(b'set -x\n')
984 984 if os.getenv('MSYSTEM'):
985 985 script.append(b'alias pwd="pwd -W"\n')
986 986
987 987 for n, l in enumerate(lines):
988 988 if not l.endswith(b'\n'):
989 989 l += b'\n'
990 990 if l.startswith(b'#require'):
991 991 lsplit = l.split()
992 992 if len(lsplit) < 2 or lsplit[0] != b'#require':
993 993 after.setdefault(pos, []).append(' !!! invalid #require\n')
994 994 haveresult, message = self._hghave(lsplit[1:])
995 995 if not haveresult:
996 996 script = [b'echo "%s"\nexit 80\n' % message]
997 997 break
998 998 after.setdefault(pos, []).append(l)
999 999 elif l.startswith(b'#if'):
1000 1000 lsplit = l.split()
1001 1001 if len(lsplit) < 2 or lsplit[0] != b'#if':
1002 1002 after.setdefault(pos, []).append(' !!! invalid #if\n')
1003 1003 if skipping is not None:
1004 1004 after.setdefault(pos, []).append(' !!! nested #if\n')
1005 1005 skipping = not self._hghave(lsplit[1:])[0]
1006 1006 after.setdefault(pos, []).append(l)
1007 1007 elif l.startswith(b'#else'):
1008 1008 if skipping is None:
1009 1009 after.setdefault(pos, []).append(' !!! missing #if\n')
1010 1010 skipping = not skipping
1011 1011 after.setdefault(pos, []).append(l)
1012 1012 elif l.startswith(b'#endif'):
1013 1013 if skipping is None:
1014 1014 after.setdefault(pos, []).append(' !!! missing #if\n')
1015 1015 skipping = None
1016 1016 after.setdefault(pos, []).append(l)
1017 1017 elif skipping:
1018 1018 after.setdefault(pos, []).append(l)
1019 1019 elif l.startswith(b' >>> '): # python inlines
1020 1020 after.setdefault(pos, []).append(l)
1021 1021 prepos = pos
1022 1022 pos = n
1023 1023 if not inpython:
1024 1024 # We've just entered a Python block. Add the header.
1025 1025 inpython = True
1026 1026 addsalt(prepos, False) # Make sure we report the exit code.
1027 1027 script.append(b'%s -m heredoctest <<EOF\n' % PYTHON)
1028 1028 addsalt(n, True)
1029 1029 script.append(l[2:])
1030 1030 elif l.startswith(b' ... '): # python inlines
1031 1031 after.setdefault(prepos, []).append(l)
1032 1032 script.append(l[2:])
1033 1033 elif l.startswith(b' $ '): # commands
1034 1034 if inpython:
1035 1035 script.append(b'EOF\n')
1036 1036 inpython = False
1037 1037 after.setdefault(pos, []).append(l)
1038 1038 prepos = pos
1039 1039 pos = n
1040 1040 addsalt(n, False)
1041 1041 cmd = l[4:].split()
1042 1042 if len(cmd) == 2 and cmd[0] == b'cd':
1043 1043 l = b' $ cd %s || exit 1\n' % cmd[1]
1044 1044 script.append(l[4:])
1045 1045 elif l.startswith(b' > '): # continuations
1046 1046 after.setdefault(prepos, []).append(l)
1047 1047 script.append(l[4:])
1048 1048 elif l.startswith(b' '): # results
1049 1049 # Queue up a list of expected results.
1050 1050 expected.setdefault(pos, []).append(l[2:])
1051 1051 else:
1052 1052 if inpython:
1053 1053 script.append(b'EOF\n')
1054 1054 inpython = False
1055 1055 # Non-command/result. Queue up for merged output.
1056 1056 after.setdefault(pos, []).append(l)
1057 1057
1058 1058 if inpython:
1059 1059 script.append(b'EOF\n')
1060 1060 if skipping is not None:
1061 1061 after.setdefault(pos, []).append(' !!! missing #endif\n')
1062 1062 addsalt(n + 1, False)
1063 1063
1064 1064 return salt, script, after, expected
1065 1065
1066 1066 def _processoutput(self, exitcode, output, salt, after, expected):
1067 1067 # Merge the script output back into a unified test.
1068 1068 warnonly = 1 # 1: not yet; 2: yes; 3: for sure not
1069 1069 if exitcode != 0:
1070 1070 warnonly = 3
1071 1071
1072 1072 pos = -1
1073 1073 postout = []
1074 1074 for l in output:
1075 1075 lout, lcmd = l, None
1076 1076 if salt in l:
1077 1077 lout, lcmd = l.split(salt, 1)
1078 1078
1079 1079 while lout:
1080 1080 if not lout.endswith(b'\n'):
1081 1081 lout += b' (no-eol)\n'
1082 1082
1083 1083 # Find the expected output at the current position.
1084 1084 el = None
1085 1085 if expected.get(pos, None):
1086 1086 el = expected[pos].pop(0)
1087 1087
1088 1088 r = TTest.linematch(el, lout)
1089 1089 if isinstance(r, str):
1090 1090 if r == '+glob':
1091 1091 lout = el[:-1] + ' (glob)\n'
1092 1092 r = '' # Warn only this line.
1093 1093 elif r == '-glob':
1094 1094 lout = ''.join(el.rsplit(' (glob)', 1))
1095 1095 r = '' # Warn only this line.
1096 1096 elif r == "retry":
1097 1097 postout.append(b' ' + el)
1098 1098 continue
1099 1099 else:
1100 1100 log('\ninfo, unknown linematch result: %r\n' % r)
1101 1101 r = False
1102 1102 if r:
1103 1103 postout.append(b' ' + el)
1104 1104 else:
1105 1105 if self.NEEDESCAPE(lout):
1106 1106 lout = TTest._stringescape(b'%s (esc)\n' %
1107 1107 lout.rstrip(b'\n'))
1108 1108 postout.append(b' ' + lout) # Let diff deal with it.
1109 1109 if r != '': # If line failed.
1110 1110 warnonly = 3 # for sure not
1111 1111 elif warnonly == 1: # Is "not yet" and line is warn only.
1112 1112 warnonly = 2 # Yes do warn.
1113 1113 break
1114 1114
1115 1115 # clean up any optional leftovers
1116 1116 while expected.get(pos, None):
1117 1117 el = expected[pos].pop(0)
1118 1118 if not el.endswith(b" (?)\n"):
1119 1119 expected[pos].insert(0, el)
1120 1120 break
1121 1121 postout.append(b' ' + el)
1122 1122
1123 1123 if lcmd:
1124 1124 # Add on last return code.
1125 1125 ret = int(lcmd.split()[1])
1126 1126 if ret != 0:
1127 1127 postout.append(b' [%d]\n' % ret)
1128 1128 if pos in after:
1129 1129 # Merge in non-active test bits.
1130 1130 postout += after.pop(pos)
1131 1131 pos = int(lcmd.split()[0])
1132 1132
1133 1133 if pos in after:
1134 1134 postout += after.pop(pos)
1135 1135
1136 1136 if warnonly == 2:
1137 1137 exitcode = False # Set exitcode to warned.
1138 1138
1139 1139 return exitcode, postout
1140 1140
1141 1141 @staticmethod
1142 1142 def rematch(el, l):
1143 1143 try:
1144 1144 # use \Z to ensure that the regex matches to the end of the string
1145 1145 if os.name == 'nt':
1146 1146 return re.match(el + br'\r?\n\Z', l)
1147 1147 return re.match(el + br'\n\Z', l)
1148 1148 except re.error:
1149 1149 # el is an invalid regex
1150 1150 return False
1151 1151
1152 1152 @staticmethod
1153 1153 def globmatch(el, l):
1154 1154 # The only supported special characters are * and ? plus / which also
1155 1155 # matches \ on windows. Escaping of these characters is supported.
1156 1156 if el + b'\n' == l:
1157 1157 if os.altsep:
1158 1158 # matching on "/" is not needed for this line
1159 1159 for pat in checkcodeglobpats:
1160 1160 if pat.match(el):
1161 1161 return True
1162 1162 return b'-glob'
1163 1163 return True
1164 1164 i, n = 0, len(el)
1165 1165 res = b''
1166 1166 while i < n:
1167 1167 c = el[i:i + 1]
1168 1168 i += 1
1169 1169 if c == b'\\' and i < n and el[i:i + 1] in b'*?\\/':
1170 1170 res += el[i - 1:i + 1]
1171 1171 i += 1
1172 1172 elif c == b'*':
1173 1173 res += b'.*'
1174 1174 elif c == b'?':
1175 1175 res += b'.'
1176 1176 elif c == b'/' and os.altsep:
1177 1177 res += b'[/\\\\]'
1178 1178 else:
1179 1179 res += re.escape(c)
1180 1180 return TTest.rematch(res, l)
1181 1181
1182 1182 @staticmethod
1183 1183 def linematch(el, l):
1184 1184 retry = False
1185 1185 if el == l: # perfect match (fast)
1186 1186 return True
1187 1187 if el:
1188 1188 if el.endswith(b" (?)\n"):
1189 1189 retry = "retry"
1190 1190 el = el[:-5] + "\n"
1191 1191 if el.endswith(b" (esc)\n"):
1192 1192 if PYTHON3:
1193 1193 el = el[:-7].decode('unicode_escape') + '\n'
1194 1194 el = el.encode('utf-8')
1195 1195 else:
1196 1196 el = el[:-7].decode('string-escape') + '\n'
1197 1197 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
1198 1198 return True
1199 1199 if el.endswith(b" (re)\n"):
1200 1200 return TTest.rematch(el[:-6], l) or retry
1201 1201 if el.endswith(b" (glob)\n"):
1202 1202 # ignore '(glob)' added to l by 'replacements'
1203 1203 if l.endswith(b" (glob)\n"):
1204 1204 l = l[:-8] + b"\n"
1205 1205 return TTest.globmatch(el[:-8], l)
1206 1206 if os.altsep and l.replace(b'\\', b'/') == el:
1207 1207 return b'+glob'
1208 1208 return retry
1209 1209
1210 1210 @staticmethod
1211 1211 def parsehghaveoutput(lines):
1212 1212 '''Parse hghave log lines.
1213 1213
1214 1214 Return tuple of lists (missing, failed):
1215 1215 * the missing/unknown features
1216 1216 * the features for which existence check failed'''
1217 1217 missing = []
1218 1218 failed = []
1219 1219 for line in lines:
1220 1220 if line.startswith(TTest.SKIPPED_PREFIX):
1221 1221 line = line.splitlines()[0]
1222 1222 missing.append(line[len(TTest.SKIPPED_PREFIX):])
1223 1223 elif line.startswith(TTest.FAILED_PREFIX):
1224 1224 line = line.splitlines()[0]
1225 1225 failed.append(line[len(TTest.FAILED_PREFIX):])
1226 1226
1227 1227 return missing, failed
1228 1228
1229 1229 @staticmethod
1230 1230 def _escapef(m):
1231 1231 return TTest.ESCAPEMAP[m.group(0)]
1232 1232
1233 1233 @staticmethod
1234 1234 def _stringescape(s):
1235 1235 return TTest.ESCAPESUB(TTest._escapef, s)
1236 1236
1237 1237 iolock = threading.RLock()
1238 1238
1239 1239 class SkipTest(Exception):
1240 1240 """Raised to indicate that a test is to be skipped."""
1241 1241
1242 1242 class IgnoreTest(Exception):
1243 1243 """Raised to indicate that a test is to be ignored."""
1244 1244
1245 1245 class WarnTest(Exception):
1246 1246 """Raised to indicate that a test warned."""
1247 1247
1248 1248 class ReportedTest(Exception):
1249 1249 """Raised to indicate that a test already reported."""
1250 1250
1251 1251 class TestResult(unittest._TextTestResult):
1252 1252 """Holds results when executing via unittest."""
1253 1253 # Don't worry too much about accessing the non-public _TextTestResult.
1254 1254 # It is relatively common in Python testing tools.
1255 1255 def __init__(self, options, *args, **kwargs):
1256 1256 super(TestResult, self).__init__(*args, **kwargs)
1257 1257
1258 1258 self._options = options
1259 1259
1260 1260 # unittest.TestResult didn't have skipped until 2.7. We need to
1261 1261 # polyfill it.
1262 1262 self.skipped = []
1263 1263
1264 1264 # We have a custom "ignored" result that isn't present in any Python
1265 1265 # unittest implementation. It is very similar to skipped. It may make
1266 1266 # sense to map it into skip some day.
1267 1267 self.ignored = []
1268 1268
1269 1269 # We have a custom "warned" result that isn't present in any Python
1270 1270 # unittest implementation. It is very similar to failed. It may make
1271 1271 # sense to map it into fail some day.
1272 1272 self.warned = []
1273 1273
1274 1274 self.times = []
1275 1275 self._firststarttime = None
1276 1276 # Data stored for the benefit of generating xunit reports.
1277 1277 self.successes = []
1278 1278 self.faildata = {}
1279 1279
1280 1280 def addFailure(self, test, reason):
1281 1281 self.failures.append((test, reason))
1282 1282
1283 1283 if self._options.first:
1284 1284 self.stop()
1285 1285 else:
1286 1286 with iolock:
1287 1287 if reason == "timed out":
1288 1288 self.stream.write('t')
1289 1289 else:
1290 1290 if not self._options.nodiff:
1291 1291 self.stream.write('\nERROR: %s output changed\n' % test)
1292 1292 self.stream.write('!')
1293 1293
1294 1294 self.stream.flush()
1295 1295
1296 1296 def addSuccess(self, test):
1297 1297 with iolock:
1298 1298 super(TestResult, self).addSuccess(test)
1299 1299 self.successes.append(test)
1300 1300
1301 1301 def addError(self, test, err):
1302 1302 super(TestResult, self).addError(test, err)
1303 1303 if self._options.first:
1304 1304 self.stop()
1305 1305
1306 1306 # Polyfill.
1307 1307 def addSkip(self, test, reason):
1308 1308 self.skipped.append((test, reason))
1309 1309 with iolock:
1310 1310 if self.showAll:
1311 1311 self.stream.writeln('skipped %s' % reason)
1312 1312 else:
1313 1313 self.stream.write('s')
1314 1314 self.stream.flush()
1315 1315
1316 1316 def addIgnore(self, test, reason):
1317 1317 self.ignored.append((test, reason))
1318 1318 with iolock:
1319 1319 if self.showAll:
1320 1320 self.stream.writeln('ignored %s' % reason)
1321 1321 else:
1322 1322 if reason not in ('not retesting', "doesn't match keyword"):
1323 1323 self.stream.write('i')
1324 1324 else:
1325 1325 self.testsRun += 1
1326 1326 self.stream.flush()
1327 1327
1328 1328 def addWarn(self, test, reason):
1329 1329 self.warned.append((test, reason))
1330 1330
1331 1331 if self._options.first:
1332 1332 self.stop()
1333 1333
1334 1334 with iolock:
1335 1335 if self.showAll:
1336 1336 self.stream.writeln('warned %s' % reason)
1337 1337 else:
1338 1338 self.stream.write('~')
1339 1339 self.stream.flush()
1340 1340
1341 1341 def addOutputMismatch(self, test, ret, got, expected):
1342 1342 """Record a mismatch in test output for a particular test."""
1343 1343 if self.shouldStop:
1344 1344 # don't print, some other test case already failed and
1345 1345 # printed, we're just stale and probably failed due to our
1346 1346 # temp dir getting cleaned up.
1347 1347 return
1348 1348
1349 1349 accepted = False
1350 1350 failed = False
1351 1351 lines = []
1352 1352
1353 1353 with iolock:
1354 1354 if self._options.nodiff:
1355 1355 pass
1356 1356 elif self._options.view:
1357 1357 v = self._options.view
1358 1358 if PYTHON3:
1359 1359 v = _bytespath(v)
1360 1360 os.system(b"%s %s %s" %
1361 1361 (v, test.refpath, test.errpath))
1362 1362 else:
1363 1363 servefail, lines = getdiff(expected, got,
1364 1364 test.refpath, test.errpath)
1365 1365 if servefail:
1366 1366 self.addFailure(
1367 1367 test,
1368 1368 'server failed to start (HGPORT=%s)' % test._startport)
1369 1369 raise ReportedTest('server failed to start')
1370 1370 else:
1371 1371 self.stream.write('\n')
1372 1372 for line in lines:
1373 1373 if PYTHON3:
1374 1374 self.stream.flush()
1375 1375 self.stream.buffer.write(line)
1376 1376 self.stream.buffer.flush()
1377 1377 else:
1378 1378 self.stream.write(line)
1379 1379 self.stream.flush()
1380 1380
1381 1381 # handle interactive prompt without releasing iolock
1382 1382 if self._options.interactive:
1383 1383 self.stream.write('Accept this change? [n] ')
1384 1384 answer = sys.stdin.readline().strip()
1385 1385 if answer.lower() in ('y', 'yes'):
1386 1386 if test.name.endswith('.t'):
1387 1387 rename(test.errpath, test.path)
1388 1388 else:
1389 1389 rename(test.errpath, '%s.out' % test.path)
1390 1390 accepted = True
1391 1391 if not accepted and not failed:
1392 1392 self.faildata[test.name] = b''.join(lines)
1393 1393
1394 1394 return accepted
1395 1395
1396 1396 def startTest(self, test):
1397 1397 super(TestResult, self).startTest(test)
1398 1398
1399 1399 # os.times module computes the user time and system time spent by
1400 1400 # child's processes along with real elapsed time taken by a process.
1401 1401 # This module has one limitation. It can only work for Linux user
1402 1402 # and not for Windows.
1403 1403 test.started = os.times()
1404 1404 if self._firststarttime is None: # thread racy but irrelevant
1405 1405 self._firststarttime = test.started[4]
1406 1406
1407 1407 def stopTest(self, test, interrupted=False):
1408 1408 super(TestResult, self).stopTest(test)
1409 1409
1410 1410 test.stopped = os.times()
1411 1411
1412 1412 starttime = test.started
1413 1413 endtime = test.stopped
1414 1414 origin = self._firststarttime
1415 1415 self.times.append((test.name,
1416 1416 endtime[2] - starttime[2], # user space CPU time
1417 1417 endtime[3] - starttime[3], # sys space CPU time
1418 1418 endtime[4] - starttime[4], # real time
1419 1419 starttime[4] - origin, # start date in run context
1420 1420 endtime[4] - origin, # end date in run context
1421 1421 ))
1422 1422
1423 1423 if interrupted:
1424 1424 with iolock:
1425 1425 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
1426 1426 test.name, self.times[-1][3]))
1427 1427
1428 1428 class TestSuite(unittest.TestSuite):
1429 1429 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
1430 1430
1431 1431 def __init__(self, testdir, jobs=1, whitelist=None, blacklist=None,
1432 1432 retest=False, keywords=None, loop=False, runs_per_test=1,
1433 1433 loadtest=None, showchannels=False,
1434 1434 *args, **kwargs):
1435 1435 """Create a new instance that can run tests with a configuration.
1436 1436
1437 1437 testdir specifies the directory where tests are executed from. This
1438 1438 is typically the ``tests`` directory from Mercurial's source
1439 1439 repository.
1440 1440
1441 1441 jobs specifies the number of jobs to run concurrently. Each test
1442 1442 executes on its own thread. Tests actually spawn new processes, so
1443 1443 state mutation should not be an issue.
1444 1444
1445 1445 whitelist and blacklist denote tests that have been whitelisted and
1446 1446 blacklisted, respectively. These arguments don't belong in TestSuite.
1447 1447 Instead, whitelist and blacklist should be handled by the thing that
1448 1448 populates the TestSuite with tests. They are present to preserve
1449 1449 backwards compatible behavior which reports skipped tests as part
1450 1450 of the results.
1451 1451
1452 1452 retest denotes whether to retest failed tests. This arguably belongs
1453 1453 outside of TestSuite.
1454 1454
1455 1455 keywords denotes key words that will be used to filter which tests
1456 1456 to execute. This arguably belongs outside of TestSuite.
1457 1457
1458 1458 loop denotes whether to loop over tests forever.
1459 1459 """
1460 1460 super(TestSuite, self).__init__(*args, **kwargs)
1461 1461
1462 1462 self._jobs = jobs
1463 1463 self._whitelist = whitelist
1464 1464 self._blacklist = blacklist
1465 1465 self._retest = retest
1466 1466 self._keywords = keywords
1467 1467 self._loop = loop
1468 1468 self._runs_per_test = runs_per_test
1469 1469 self._loadtest = loadtest
1470 1470 self._showchannels = showchannels
1471 1471
1472 1472 def run(self, result):
1473 1473 # We have a number of filters that need to be applied. We do this
1474 1474 # here instead of inside Test because it makes the running logic for
1475 1475 # Test simpler.
1476 1476 tests = []
1477 1477 num_tests = [0]
1478 1478 for test in self._tests:
1479 1479 def get():
1480 1480 num_tests[0] += 1
1481 1481 if getattr(test, 'should_reload', False):
1482 1482 return self._loadtest(test.bname, num_tests[0])
1483 1483 return test
1484 1484 if not os.path.exists(test.path):
1485 1485 result.addSkip(test, "Doesn't exist")
1486 1486 continue
1487 1487
1488 1488 if not (self._whitelist and test.name in self._whitelist):
1489 1489 if self._blacklist and test.bname in self._blacklist:
1490 1490 result.addSkip(test, 'blacklisted')
1491 1491 continue
1492 1492
1493 1493 if self._retest and not os.path.exists(test.errpath):
1494 1494 result.addIgnore(test, 'not retesting')
1495 1495 continue
1496 1496
1497 1497 if self._keywords:
1498 1498 f = open(test.path, 'rb')
1499 1499 t = f.read().lower() + test.bname.lower()
1500 1500 f.close()
1501 1501 ignored = False
1502 1502 for k in self._keywords.lower().split():
1503 1503 if k not in t:
1504 1504 result.addIgnore(test, "doesn't match keyword")
1505 1505 ignored = True
1506 1506 break
1507 1507
1508 1508 if ignored:
1509 1509 continue
1510 1510 for _ in xrange(self._runs_per_test):
1511 1511 tests.append(get())
1512 1512
1513 1513 runtests = list(tests)
1514 1514 done = queue.Queue()
1515 1515 running = 0
1516 1516
1517 1517 channels = [""] * self._jobs
1518 1518
1519 1519 def job(test, result):
1520 1520 for n, v in enumerate(channels):
1521 1521 if not v:
1522 1522 channel = n
1523 1523 break
1524 1524 channels[channel] = "=" + test.name[5:].split(".")[0]
1525 1525 try:
1526 1526 test(result)
1527 1527 done.put(None)
1528 1528 except KeyboardInterrupt:
1529 1529 pass
1530 1530 except: # re-raises
1531 1531 done.put(('!', test, 'run-test raised an error, see traceback'))
1532 1532 raise
1533 1533 channels[channel] = ''
1534 1534
1535 1535 def stat():
1536 1536 count = 0
1537 1537 while channels:
1538 1538 d = '\n%03s ' % count
1539 1539 for n, v in enumerate(channels):
1540 1540 if v:
1541 1541 d += v[0]
1542 1542 channels[n] = v[1:] or '.'
1543 1543 else:
1544 1544 d += ' '
1545 1545 d += ' '
1546 1546 with iolock:
1547 1547 sys.stdout.write(d + ' ')
1548 1548 sys.stdout.flush()
1549 1549 for x in xrange(10):
1550 1550 if channels:
1551 1551 time.sleep(.1)
1552 1552 count += 1
1553 1553
1554 1554 stoppedearly = False
1555 1555
1556 1556 if self._showchannels:
1557 1557 statthread = threading.Thread(target=stat, name="stat")
1558 1558 statthread.start()
1559 1559
1560 1560 try:
1561 1561 while tests or running:
1562 1562 if not done.empty() or running == self._jobs or not tests:
1563 1563 try:
1564 1564 done.get(True, 1)
1565 1565 running -= 1
1566 1566 if result and result.shouldStop:
1567 1567 stoppedearly = True
1568 1568 break
1569 1569 except queue.Empty:
1570 1570 continue
1571 1571 if tests and not running == self._jobs:
1572 1572 test = tests.pop(0)
1573 1573 if self._loop:
1574 1574 if getattr(test, 'should_reload', False):
1575 1575 num_tests[0] += 1
1576 1576 tests.append(
1577 1577 self._loadtest(test.name, num_tests[0]))
1578 1578 else:
1579 1579 tests.append(test)
1580 1580 t = threading.Thread(target=job, name=test.name,
1581 1581 args=(test, result))
1582 1582 t.start()
1583 1583 running += 1
1584 1584
1585 1585 # If we stop early we still need to wait on started tests to
1586 1586 # finish. Otherwise, there is a race between the test completing
1587 1587 # and the test's cleanup code running. This could result in the
1588 1588 # test reporting incorrect.
1589 1589 if stoppedearly:
1590 1590 while running:
1591 1591 try:
1592 1592 done.get(True, 1)
1593 1593 running -= 1
1594 1594 except queue.Empty:
1595 1595 continue
1596 1596 except KeyboardInterrupt:
1597 1597 for test in runtests:
1598 1598 test.abort()
1599 1599
1600 1600 channels = []
1601 1601
1602 1602 return result
1603 1603
1604 1604 class TextTestRunner(unittest.TextTestRunner):
1605 1605 """Custom unittest test runner that uses appropriate settings."""
1606 1606
1607 1607 def __init__(self, runner, *args, **kwargs):
1608 1608 super(TextTestRunner, self).__init__(*args, **kwargs)
1609 1609
1610 1610 self._runner = runner
1611 1611
1612 1612 def run(self, test):
1613 1613 result = TestResult(self._runner.options, self.stream,
1614 1614 self.descriptions, self.verbosity)
1615 1615
1616 1616 test(result)
1617 1617
1618 1618 failed = len(result.failures)
1619 1619 warned = len(result.warned)
1620 1620 skipped = len(result.skipped)
1621 1621 ignored = len(result.ignored)
1622 1622
1623 1623 with iolock:
1624 1624 self.stream.writeln('')
1625 1625
1626 1626 if not self._runner.options.noskips:
1627 1627 for test, msg in result.skipped:
1628 1628 self.stream.writeln('Skipped %s: %s' % (test.name, msg))
1629 1629 for test, msg in result.warned:
1630 1630 self.stream.writeln('Warned %s: %s' % (test.name, msg))
1631 1631 for test, msg in result.failures:
1632 1632 self.stream.writeln('Failed %s: %s' % (test.name, msg))
1633 1633 for test, msg in result.errors:
1634 1634 self.stream.writeln('Errored %s: %s' % (test.name, msg))
1635 1635
1636 1636 if self._runner.options.xunit:
1637 1637 xuf = open(self._runner.options.xunit, 'wb')
1638 1638 try:
1639 1639 timesd = dict((t[0], t[3]) for t in result.times)
1640 1640 doc = minidom.Document()
1641 1641 s = doc.createElement('testsuite')
1642 1642 s.setAttribute('name', 'run-tests')
1643 1643 s.setAttribute('tests', str(result.testsRun))
1644 1644 s.setAttribute('errors', "0") # TODO
1645 1645 s.setAttribute('failures', str(failed))
1646 1646 s.setAttribute('skipped', str(skipped + ignored))
1647 1647 doc.appendChild(s)
1648 1648 for tc in result.successes:
1649 1649 t = doc.createElement('testcase')
1650 1650 t.setAttribute('name', tc.name)
1651 1651 t.setAttribute('time', '%.3f' % timesd[tc.name])
1652 1652 s.appendChild(t)
1653 1653 for tc, err in sorted(result.faildata.items()):
1654 1654 t = doc.createElement('testcase')
1655 1655 t.setAttribute('name', tc)
1656 1656 t.setAttribute('time', '%.3f' % timesd[tc])
1657 1657 # createCDATASection expects a unicode or it will
1658 1658 # convert using default conversion rules, which will
1659 1659 # fail if string isn't ASCII.
1660 1660 err = cdatasafe(err).decode('utf-8', 'replace')
1661 1661 cd = doc.createCDATASection(err)
1662 1662 t.appendChild(cd)
1663 1663 s.appendChild(t)
1664 1664 xuf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
1665 1665 finally:
1666 1666 xuf.close()
1667 1667
1668 1668 if self._runner.options.json:
1669 1669 if json is None:
1670 1670 raise ImportError("json module not installed")
1671 1671 jsonpath = os.path.join(self._runner._testdir, 'report.json')
1672 1672 fp = open(jsonpath, 'w')
1673 1673 try:
1674 1674 timesd = {}
1675 1675 for tdata in result.times:
1676 1676 test = tdata[0]
1677 1677 timesd[test] = tdata[1:]
1678 1678
1679 1679 outcome = {}
1680 1680 groups = [('success', ((tc, None)
1681 1681 for tc in result.successes)),
1682 1682 ('failure', result.failures),
1683 1683 ('skip', result.skipped)]
1684 1684 for res, testcases in groups:
1685 1685 for tc, __ in testcases:
1686 1686 tres = {'result': res,
1687 1687 'time': ('%0.3f' % timesd[tc.name][2]),
1688 1688 'cuser': ('%0.3f' % timesd[tc.name][0]),
1689 1689 'csys': ('%0.3f' % timesd[tc.name][1]),
1690 1690 'start': ('%0.3f' % timesd[tc.name][3]),
1691 1691 'end': ('%0.3f' % timesd[tc.name][4])}
1692 1692 outcome[tc.name] = tres
1693 1693 jsonout = json.dumps(outcome, sort_keys=True, indent=4)
1694 1694 fp.writelines(("testreport =", jsonout))
1695 1695 finally:
1696 1696 fp.close()
1697 1697
1698 1698 self._runner._checkhglib('Tested')
1699 1699
1700 1700 self.stream.writeln(
1701 1701 '# Ran %d tests, %d skipped, %d warned, %d failed.'
1702 1702 % (result.testsRun,
1703 1703 skipped + ignored, warned, failed))
1704 1704 if failed:
1705 1705 self.stream.writeln('python hash seed: %s' %
1706 1706 os.environ['PYTHONHASHSEED'])
1707 1707 if self._runner.options.time:
1708 1708 self.printtimes(result.times)
1709 1709
1710 1710 return result
1711 1711
1712 1712 def printtimes(self, times):
1713 1713 # iolock held by run
1714 1714 self.stream.writeln('# Producing time report')
1715 1715 times.sort(key=lambda t: (t[3]))
1716 1716 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
1717 1717 self.stream.writeln('%-7s %-7s %-7s %-7s %-7s %s' %
1718 1718 ('start', 'end', 'cuser', 'csys', 'real', 'Test'))
1719 1719 for tdata in times:
1720 1720 test = tdata[0]
1721 1721 cuser, csys, real, start, end = tdata[1:6]
1722 1722 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
1723 1723
1724 1724 class TestRunner(object):
1725 1725 """Holds context for executing tests.
1726 1726
1727 1727 Tests rely on a lot of state. This object holds it for them.
1728 1728 """
1729 1729
1730 1730 # Programs required to run tests.
1731 1731 REQUIREDTOOLS = [
1732 1732 os.path.basename(_bytespath(sys.executable)),
1733 1733 b'diff',
1734 1734 b'grep',
1735 1735 b'unzip',
1736 1736 b'gunzip',
1737 1737 b'bunzip2',
1738 1738 b'sed',
1739 1739 ]
1740 1740
1741 1741 # Maps file extensions to test class.
1742 1742 TESTTYPES = [
1743 1743 (b'.py', PythonTest),
1744 1744 (b'.t', TTest),
1745 1745 ]
1746 1746
1747 1747 def __init__(self):
1748 1748 self.options = None
1749 1749 self._hgroot = None
1750 1750 self._testdir = None
1751 1751 self._hgtmp = None
1752 1752 self._installdir = None
1753 1753 self._bindir = None
1754 1754 self._tmpbinddir = None
1755 1755 self._pythondir = None
1756 1756 self._coveragefile = None
1757 1757 self._createdfiles = []
1758 1758 self._hgpath = None
1759 1759 self._portoffset = 0
1760 1760 self._ports = {}
1761 1761
1762 1762 def run(self, args, parser=None):
1763 1763 """Run the test suite."""
1764 1764 oldmask = os.umask(0o22)
1765 1765 try:
1766 1766 parser = parser or getparser()
1767 1767 options, args = parseargs(args, parser)
1768 1768 # positional arguments are paths to test files to run, so
1769 1769 # we make sure they're all bytestrings
1770 1770 args = [_bytespath(a) for a in args]
1771 1771 self.options = options
1772 1772
1773 1773 self._checktools()
1774 1774 tests = self.findtests(args)
1775 1775 if options.profile_runner:
1776 1776 import statprof
1777 1777 statprof.start()
1778 1778 result = self._run(tests)
1779 1779 if options.profile_runner:
1780 1780 statprof.stop()
1781 1781 statprof.display()
1782 1782 return result
1783 1783
1784 1784 finally:
1785 1785 os.umask(oldmask)
1786 1786
1787 1787 def _run(self, tests):
1788 1788 if self.options.random:
1789 1789 random.shuffle(tests)
1790 1790 else:
1791 1791 # keywords for slow tests
1792 1792 slow = {b'svn': 10,
1793 1793 b'cvs': 10,
1794 1794 b'hghave': 10,
1795 1795 b'largefiles-update': 10,
1796 1796 b'run-tests': 10,
1797 1797 b'corruption': 10,
1798 1798 b'race': 10,
1799 1799 b'i18n': 10,
1800 1800 b'check': 100,
1801 1801 b'gendoc': 100,
1802 1802 b'contrib-perf': 200,
1803 1803 }
1804 1804 def sortkey(f):
1805 1805 # run largest tests first, as they tend to take the longest
1806 1806 try:
1807 1807 val = -os.stat(f).st_size
1808 1808 except OSError as e:
1809 1809 if e.errno != errno.ENOENT:
1810 1810 raise
1811 1811 return -1e9 # file does not exist, tell early
1812 1812 for kw, mul in slow.items():
1813 1813 if kw in f:
1814 1814 val *= mul
1815 1815 return val
1816 1816 tests.sort(key=sortkey)
1817 1817
1818 1818 self._testdir = osenvironb[b'TESTDIR'] = getattr(
1819 1819 os, 'getcwdb', os.getcwd)()
1820 1820
1821 1821 if 'PYTHONHASHSEED' not in os.environ:
1822 1822 # use a random python hash seed all the time
1823 1823 # we do the randomness ourself to know what seed is used
1824 1824 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
1825 1825
1826 1826 if self.options.tmpdir:
1827 1827 self.options.keep_tmpdir = True
1828 1828 tmpdir = _bytespath(self.options.tmpdir)
1829 1829 if os.path.exists(tmpdir):
1830 1830 # Meaning of tmpdir has changed since 1.3: we used to create
1831 1831 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
1832 1832 # tmpdir already exists.
1833 1833 print("error: temp dir %r already exists" % tmpdir)
1834 1834 return 1
1835 1835
1836 1836 # Automatically removing tmpdir sounds convenient, but could
1837 1837 # really annoy anyone in the habit of using "--tmpdir=/tmp"
1838 1838 # or "--tmpdir=$HOME".
1839 1839 #vlog("# Removing temp dir", tmpdir)
1840 1840 #shutil.rmtree(tmpdir)
1841 1841 os.makedirs(tmpdir)
1842 1842 else:
1843 1843 d = None
1844 1844 if os.name == 'nt':
1845 1845 # without this, we get the default temp dir location, but
1846 1846 # in all lowercase, which causes troubles with paths (issue3490)
1847 1847 d = osenvironb.get(b'TMP', None)
1848 1848 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
1849 1849
1850 1850 self._hgtmp = osenvironb[b'HGTMP'] = (
1851 1851 os.path.realpath(tmpdir))
1852 1852
1853 1853 if self.options.with_hg:
1854 1854 self._installdir = None
1855 1855 whg = self.options.with_hg
1856 1856 # If --with-hg is not specified, we have bytes already,
1857 1857 # but if it was specified in python3 we get a str, so we
1858 1858 # have to encode it back into a bytes.
1859 1859 if PYTHON3:
1860 1860 if not isinstance(whg, bytes):
1861 1861 whg = _bytespath(whg)
1862 1862 self._bindir = os.path.dirname(os.path.realpath(whg))
1863 1863 assert isinstance(self._bindir, bytes)
1864 1864 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
1865 1865 os.makedirs(self._tmpbindir)
1866 1866
1867 1867 # This looks redundant with how Python initializes sys.path from
1868 1868 # the location of the script being executed. Needed because the
1869 1869 # "hg" specified by --with-hg is not the only Python script
1870 1870 # executed in the test suite that needs to import 'mercurial'
1871 1871 # ... which means it's not really redundant at all.
1872 1872 self._pythondir = self._bindir
1873 1873 else:
1874 1874 self._installdir = os.path.join(self._hgtmp, b"install")
1875 1875 self._bindir = osenvironb[b"BINDIR"] = \
1876 1876 os.path.join(self._installdir, b"bin")
1877 1877 self._tmpbindir = self._bindir
1878 1878 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
1879 1879
1880 1880 osenvironb[b"BINDIR"] = self._bindir
1881 1881 osenvironb[b"PYTHON"] = PYTHON
1882 1882
1883 1883 fileb = _bytespath(__file__)
1884 1884 runtestdir = os.path.abspath(os.path.dirname(fileb))
1885 1885 osenvironb[b'RUNTESTDIR'] = runtestdir
1886 1886 if PYTHON3:
1887 1887 sepb = _bytespath(os.pathsep)
1888 1888 else:
1889 1889 sepb = os.pathsep
1890 1890 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
1891 1891 if os.path.islink(__file__):
1892 1892 # test helper will likely be at the end of the symlink
1893 1893 realfile = os.path.realpath(fileb)
1894 1894 realdir = os.path.abspath(os.path.dirname(realfile))
1895 1895 path.insert(2, realdir)
1896 1896 if self._testdir != runtestdir:
1897 1897 path = [self._testdir] + path
1898 1898 if self._tmpbindir != self._bindir:
1899 1899 path = [self._tmpbindir] + path
1900 1900 osenvironb[b"PATH"] = sepb.join(path)
1901 1901
1902 1902 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
1903 1903 # can run .../tests/run-tests.py test-foo where test-foo
1904 1904 # adds an extension to HGRC. Also include run-test.py directory to
1905 1905 # import modules like heredoctest.
1906 1906 pypath = [self._pythondir, self._testdir, runtestdir]
1907 1907 # We have to augment PYTHONPATH, rather than simply replacing
1908 1908 # it, in case external libraries are only available via current
1909 1909 # PYTHONPATH. (In particular, the Subversion bindings on OS X
1910 1910 # are in /opt/subversion.)
1911 1911 oldpypath = osenvironb.get(IMPL_PATH)
1912 1912 if oldpypath:
1913 1913 pypath.append(oldpypath)
1914 1914 osenvironb[IMPL_PATH] = sepb.join(pypath)
1915 1915
1916 1916 if self.options.pure:
1917 1917 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
1918 1918
1919 1919 if self.options.allow_slow_tests:
1920 1920 os.environ["HGTEST_SLOW"] = "slow"
1921 1921 elif 'HGTEST_SLOW' in os.environ:
1922 1922 del os.environ['HGTEST_SLOW']
1923 1923
1924 1924 self._coveragefile = os.path.join(self._testdir, b'.coverage')
1925 1925
1926 1926 vlog("# Using TESTDIR", self._testdir)
1927 1927 vlog("# Using RUNTESTDIR", osenvironb[b'RUNTESTDIR'])
1928 1928 vlog("# Using HGTMP", self._hgtmp)
1929 1929 vlog("# Using PATH", os.environ["PATH"])
1930 1930 vlog("# Using", IMPL_PATH, osenvironb[IMPL_PATH])
1931 1931
1932 1932 try:
1933 1933 return self._runtests(tests) or 0
1934 1934 finally:
1935 1935 time.sleep(.1)
1936 1936 self._cleanup()
1937 1937
1938 1938 def findtests(self, args):
1939 1939 """Finds possible test files from arguments.
1940 1940
1941 1941 If you wish to inject custom tests into the test harness, this would
1942 1942 be a good function to monkeypatch or override in a derived class.
1943 1943 """
1944 1944 if not args:
1945 1945 if self.options.changed:
1946 1946 proc = Popen4('hg st --rev "%s" -man0 .' %
1947 1947 self.options.changed, None, 0)
1948 1948 stdout, stderr = proc.communicate()
1949 1949 args = stdout.strip(b'\0').split(b'\0')
1950 1950 else:
1951 1951 args = os.listdir(b'.')
1952 1952
1953 1953 return [t for t in args
1954 1954 if os.path.basename(t).startswith(b'test-')
1955 1955 and (t.endswith(b'.py') or t.endswith(b'.t'))]
1956 1956
1957 1957 def _runtests(self, tests):
1958 1958 try:
1959 1959 if self._installdir:
1960 1960 self._installhg()
1961 1961 self._checkhglib("Testing")
1962 1962 else:
1963 1963 self._usecorrectpython()
1964 1964
1965 1965 if self.options.restart:
1966 1966 orig = list(tests)
1967 1967 while tests:
1968 1968 if os.path.exists(tests[0] + ".err"):
1969 1969 break
1970 1970 tests.pop(0)
1971 1971 if not tests:
1972 1972 print("running all tests")
1973 1973 tests = orig
1974 1974
1975 1975 tests = [self._gettest(t, i) for i, t in enumerate(tests)]
1976 1976
1977 1977 failed = False
1978 1978 warned = False
1979 1979 kws = self.options.keywords
1980 1980 if kws is not None and PYTHON3:
1981 1981 kws = kws.encode('utf-8')
1982 1982
1983 1983 suite = TestSuite(self._testdir,
1984 1984 jobs=self.options.jobs,
1985 1985 whitelist=self.options.whitelisted,
1986 1986 blacklist=self.options.blacklist,
1987 1987 retest=self.options.retest,
1988 1988 keywords=kws,
1989 1989 loop=self.options.loop,
1990 1990 runs_per_test=self.options.runs_per_test,
1991 1991 showchannels=self.options.showchannels,
1992 1992 tests=tests, loadtest=self._gettest)
1993 1993 verbosity = 1
1994 1994 if self.options.verbose:
1995 1995 verbosity = 2
1996 1996 runner = TextTestRunner(self, verbosity=verbosity)
1997 1997 result = runner.run(suite)
1998 1998
1999 1999 if result.failures:
2000 2000 failed = True
2001 2001 if result.warned:
2002 2002 warned = True
2003 2003
2004 2004 if self.options.anycoverage:
2005 2005 self._outputcoverage()
2006 2006 except KeyboardInterrupt:
2007 2007 failed = True
2008 2008 print("\ninterrupted!")
2009 2009
2010 2010 if failed:
2011 2011 return 1
2012 2012 if warned:
2013 2013 return 80
2014 2014
2015 2015 def _getport(self, count):
2016 2016 port = self._ports.get(count) # do we have a cached entry?
2017 2017 if port is None:
2018 port = self.options.port + self._portoffset
2019 2018 portneeded = 3
2020 2019 # above 100 tries we just give up and let test reports failure
2021 2020 for tries in xrange(100):
2022 2021 allfree = True
2022 port = self.options.port + self._portoffset
2023 2023 for idx in xrange(portneeded):
2024 2024 if not checkportisavailable(port + idx):
2025 2025 allfree = False
2026 2026 break
2027 2027 self._portoffset += portneeded
2028 2028 if allfree:
2029 2029 break
2030 2030 self._ports[count] = port
2031 2031 return port
2032 2032
2033 2033 def _gettest(self, test, count):
2034 2034 """Obtain a Test by looking at its filename.
2035 2035
2036 2036 Returns a Test instance. The Test may not be runnable if it doesn't
2037 2037 map to a known type.
2038 2038 """
2039 2039 lctest = test.lower()
2040 2040 testcls = Test
2041 2041
2042 2042 for ext, cls in self.TESTTYPES:
2043 2043 if lctest.endswith(ext):
2044 2044 testcls = cls
2045 2045 break
2046 2046
2047 2047 refpath = os.path.join(self._testdir, test)
2048 2048 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
2049 2049
2050 2050 t = testcls(refpath, tmpdir,
2051 2051 keeptmpdir=self.options.keep_tmpdir,
2052 2052 debug=self.options.debug,
2053 2053 timeout=self.options.timeout,
2054 2054 startport=self._getport(count),
2055 2055 extraconfigopts=self.options.extra_config_opt,
2056 2056 py3kwarnings=self.options.py3k_warnings,
2057 2057 shell=self.options.shell)
2058 2058 t.should_reload = True
2059 2059 return t
2060 2060
2061 2061 def _cleanup(self):
2062 2062 """Clean up state from this test invocation."""
2063 2063
2064 2064 if self.options.keep_tmpdir:
2065 2065 return
2066 2066
2067 2067 vlog("# Cleaning up HGTMP", self._hgtmp)
2068 2068 shutil.rmtree(self._hgtmp, True)
2069 2069 for f in self._createdfiles:
2070 2070 try:
2071 2071 os.remove(f)
2072 2072 except OSError:
2073 2073 pass
2074 2074
2075 2075 def _usecorrectpython(self):
2076 2076 """Configure the environment to use the appropriate Python in tests."""
2077 2077 # Tests must use the same interpreter as us or bad things will happen.
2078 2078 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
2079 2079 if getattr(os, 'symlink', None):
2080 2080 vlog("# Making python executable in test path a symlink to '%s'" %
2081 2081 sys.executable)
2082 2082 mypython = os.path.join(self._tmpbindir, pyexename)
2083 2083 try:
2084 2084 if os.readlink(mypython) == sys.executable:
2085 2085 return
2086 2086 os.unlink(mypython)
2087 2087 except OSError as err:
2088 2088 if err.errno != errno.ENOENT:
2089 2089 raise
2090 2090 if self._findprogram(pyexename) != sys.executable:
2091 2091 try:
2092 2092 os.symlink(sys.executable, mypython)
2093 2093 self._createdfiles.append(mypython)
2094 2094 except OSError as err:
2095 2095 # child processes may race, which is harmless
2096 2096 if err.errno != errno.EEXIST:
2097 2097 raise
2098 2098 else:
2099 2099 exedir, exename = os.path.split(sys.executable)
2100 2100 vlog("# Modifying search path to find %s as %s in '%s'" %
2101 2101 (exename, pyexename, exedir))
2102 2102 path = os.environ['PATH'].split(os.pathsep)
2103 2103 while exedir in path:
2104 2104 path.remove(exedir)
2105 2105 os.environ['PATH'] = os.pathsep.join([exedir] + path)
2106 2106 if not self._findprogram(pyexename):
2107 2107 print("WARNING: Cannot find %s in search path" % pyexename)
2108 2108
2109 2109 def _installhg(self):
2110 2110 """Install hg into the test environment.
2111 2111
2112 2112 This will also configure hg with the appropriate testing settings.
2113 2113 """
2114 2114 vlog("# Performing temporary installation of HG")
2115 2115 installerrs = os.path.join(b"tests", b"install.err")
2116 2116 compiler = ''
2117 2117 if self.options.compiler:
2118 2118 compiler = '--compiler ' + self.options.compiler
2119 2119 if self.options.pure:
2120 2120 pure = b"--pure"
2121 2121 else:
2122 2122 pure = b""
2123 2123 py3 = ''
2124 2124
2125 2125 # Run installer in hg root
2126 2126 script = os.path.realpath(sys.argv[0])
2127 2127 exe = sys.executable
2128 2128 if PYTHON3:
2129 2129 py3 = b'--c2to3'
2130 2130 compiler = _bytespath(compiler)
2131 2131 script = _bytespath(script)
2132 2132 exe = _bytespath(exe)
2133 2133 hgroot = os.path.dirname(os.path.dirname(script))
2134 2134 self._hgroot = hgroot
2135 2135 os.chdir(hgroot)
2136 2136 nohome = b'--home=""'
2137 2137 if os.name == 'nt':
2138 2138 # The --home="" trick works only on OS where os.sep == '/'
2139 2139 # because of a distutils convert_path() fast-path. Avoid it at
2140 2140 # least on Windows for now, deal with .pydistutils.cfg bugs
2141 2141 # when they happen.
2142 2142 nohome = b''
2143 2143 cmd = (b'%(exe)s setup.py %(py3)s %(pure)s clean --all'
2144 2144 b' build %(compiler)s --build-base="%(base)s"'
2145 2145 b' install --force --prefix="%(prefix)s"'
2146 2146 b' --install-lib="%(libdir)s"'
2147 2147 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
2148 2148 % {b'exe': exe, b'py3': py3, b'pure': pure,
2149 2149 b'compiler': compiler,
2150 2150 b'base': os.path.join(self._hgtmp, b"build"),
2151 2151 b'prefix': self._installdir, b'libdir': self._pythondir,
2152 2152 b'bindir': self._bindir,
2153 2153 b'nohome': nohome, b'logfile': installerrs})
2154 2154
2155 2155 # setuptools requires install directories to exist.
2156 2156 def makedirs(p):
2157 2157 try:
2158 2158 os.makedirs(p)
2159 2159 except OSError as e:
2160 2160 if e.errno != errno.EEXIST:
2161 2161 raise
2162 2162 makedirs(self._pythondir)
2163 2163 makedirs(self._bindir)
2164 2164
2165 2165 vlog("# Running", cmd)
2166 2166 if os.system(cmd) == 0:
2167 2167 if not self.options.verbose:
2168 2168 try:
2169 2169 os.remove(installerrs)
2170 2170 except OSError as e:
2171 2171 if e.errno != errno.ENOENT:
2172 2172 raise
2173 2173 else:
2174 2174 f = open(installerrs, 'rb')
2175 2175 for line in f:
2176 2176 if PYTHON3:
2177 2177 sys.stdout.buffer.write(line)
2178 2178 else:
2179 2179 sys.stdout.write(line)
2180 2180 f.close()
2181 2181 sys.exit(1)
2182 2182 os.chdir(self._testdir)
2183 2183
2184 2184 self._usecorrectpython()
2185 2185
2186 2186 if self.options.py3k_warnings and not self.options.anycoverage:
2187 2187 vlog("# Updating hg command to enable Py3k Warnings switch")
2188 2188 f = open(os.path.join(self._bindir, 'hg'), 'rb')
2189 2189 lines = [line.rstrip() for line in f]
2190 2190 lines[0] += ' -3'
2191 2191 f.close()
2192 2192 f = open(os.path.join(self._bindir, 'hg'), 'wb')
2193 2193 for line in lines:
2194 2194 f.write(line + '\n')
2195 2195 f.close()
2196 2196
2197 2197 hgbat = os.path.join(self._bindir, b'hg.bat')
2198 2198 if os.path.isfile(hgbat):
2199 2199 # hg.bat expects to be put in bin/scripts while run-tests.py
2200 2200 # installation layout put it in bin/ directly. Fix it
2201 2201 f = open(hgbat, 'rb')
2202 2202 data = f.read()
2203 2203 f.close()
2204 2204 if b'"%~dp0..\python" "%~dp0hg" %*' in data:
2205 2205 data = data.replace(b'"%~dp0..\python" "%~dp0hg" %*',
2206 2206 b'"%~dp0python" "%~dp0hg" %*')
2207 2207 f = open(hgbat, 'wb')
2208 2208 f.write(data)
2209 2209 f.close()
2210 2210 else:
2211 2211 print('WARNING: cannot fix hg.bat reference to python.exe')
2212 2212
2213 2213 if self.options.anycoverage:
2214 2214 custom = os.path.join(self._testdir, 'sitecustomize.py')
2215 2215 target = os.path.join(self._pythondir, 'sitecustomize.py')
2216 2216 vlog('# Installing coverage trigger to %s' % target)
2217 2217 shutil.copyfile(custom, target)
2218 2218 rc = os.path.join(self._testdir, '.coveragerc')
2219 2219 vlog('# Installing coverage rc to %s' % rc)
2220 2220 os.environ['COVERAGE_PROCESS_START'] = rc
2221 2221 covdir = os.path.join(self._installdir, '..', 'coverage')
2222 2222 try:
2223 2223 os.mkdir(covdir)
2224 2224 except OSError as e:
2225 2225 if e.errno != errno.EEXIST:
2226 2226 raise
2227 2227
2228 2228 os.environ['COVERAGE_DIR'] = covdir
2229 2229
2230 2230 def _checkhglib(self, verb):
2231 2231 """Ensure that the 'mercurial' package imported by python is
2232 2232 the one we expect it to be. If not, print a warning to stderr."""
2233 2233 if ((self._bindir == self._pythondir) and
2234 2234 (self._bindir != self._tmpbindir)):
2235 2235 # The pythondir has been inferred from --with-hg flag.
2236 2236 # We cannot expect anything sensible here.
2237 2237 return
2238 2238 expecthg = os.path.join(self._pythondir, b'mercurial')
2239 2239 actualhg = self._gethgpath()
2240 2240 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
2241 2241 sys.stderr.write('warning: %s with unexpected mercurial lib: %s\n'
2242 2242 ' (expected %s)\n'
2243 2243 % (verb, actualhg, expecthg))
2244 2244 def _gethgpath(self):
2245 2245 """Return the path to the mercurial package that is actually found by
2246 2246 the current Python interpreter."""
2247 2247 if self._hgpath is not None:
2248 2248 return self._hgpath
2249 2249
2250 2250 cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
2251 2251 cmd = cmd % PYTHON
2252 2252 if PYTHON3:
2253 2253 cmd = _strpath(cmd)
2254 2254 pipe = os.popen(cmd)
2255 2255 try:
2256 2256 self._hgpath = _bytespath(pipe.read().strip())
2257 2257 finally:
2258 2258 pipe.close()
2259 2259
2260 2260 return self._hgpath
2261 2261
2262 2262 def _outputcoverage(self):
2263 2263 """Produce code coverage output."""
2264 2264 from coverage import coverage
2265 2265
2266 2266 vlog('# Producing coverage report')
2267 2267 # chdir is the easiest way to get short, relative paths in the
2268 2268 # output.
2269 2269 os.chdir(self._hgroot)
2270 2270 covdir = os.path.join(self._installdir, '..', 'coverage')
2271 2271 cov = coverage(data_file=os.path.join(covdir, 'cov'))
2272 2272
2273 2273 # Map install directory paths back to source directory.
2274 2274 cov.config.paths['srcdir'] = ['.', self._pythondir]
2275 2275
2276 2276 cov.combine()
2277 2277
2278 2278 omit = [os.path.join(x, '*') for x in [self._bindir, self._testdir]]
2279 2279 cov.report(ignore_errors=True, omit=omit)
2280 2280
2281 2281 if self.options.htmlcov:
2282 2282 htmldir = os.path.join(self._testdir, 'htmlcov')
2283 2283 cov.html_report(directory=htmldir, omit=omit)
2284 2284 if self.options.annotate:
2285 2285 adir = os.path.join(self._testdir, 'annotated')
2286 2286 if not os.path.isdir(adir):
2287 2287 os.mkdir(adir)
2288 2288 cov.annotate(directory=adir, omit=omit)
2289 2289
2290 2290 def _findprogram(self, program):
2291 2291 """Search PATH for a executable program"""
2292 2292 dpb = _bytespath(os.defpath)
2293 2293 sepb = _bytespath(os.pathsep)
2294 2294 for p in osenvironb.get(b'PATH', dpb).split(sepb):
2295 2295 name = os.path.join(p, program)
2296 2296 if os.name == 'nt' or os.access(name, os.X_OK):
2297 2297 return name
2298 2298 return None
2299 2299
2300 2300 def _checktools(self):
2301 2301 """Ensure tools required to run tests are present."""
2302 2302 for p in self.REQUIREDTOOLS:
2303 2303 if os.name == 'nt' and not p.endswith('.exe'):
2304 2304 p += '.exe'
2305 2305 found = self._findprogram(p)
2306 2306 if found:
2307 2307 vlog("# Found prerequisite", p, "at", found)
2308 2308 else:
2309 2309 print("WARNING: Did not find prerequisite tool: %s " % p)
2310 2310
2311 2311 if __name__ == '__main__':
2312 2312 runner = TestRunner()
2313 2313
2314 2314 try:
2315 2315 import msvcrt
2316 2316 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
2317 2317 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
2318 2318 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
2319 2319 except ImportError:
2320 2320 pass
2321 2321
2322 2322 sys.exit(runner.run(sys.argv[1:]))
@@ -1,675 +1,689
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ unset HGTEST_JOBS
6 6 $ unset HGTEST_TIMEOUT
7 7 $ unset HGTEST_PORT
8 8 $ unset HGTEST_SHELL
9 9
10 10 Smoke test with install
11 11 ============
12 12
13 13 $ run-tests.py $HGTEST_RUN_TESTS_PURE -l
14 14
15 15 # Ran 0 tests, 0 skipped, 0 warned, 0 failed.
16 16
17 17 Define a helper to avoid the install step
18 18 =============
19 19 $ rt()
20 20 > {
21 21 > run-tests.py --with-hg=`which hg` "$@"
22 22 > }
23 23
24 24 a succesful test
25 25 =======================
26 26
27 27 $ cat > test-success.t << EOF
28 28 > $ echo babar
29 29 > babar
30 30 > $ echo xyzzy
31 31 > never happens (?)
32 32 > xyzzy
33 33 > nor this (?)
34 34 > EOF
35 35
36 36 $ rt
37 37 .
38 38 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
39 39
40 40 failing test
41 41 ==================
42 42
43 43 $ cat > test-failure.t << EOF
44 44 > $ echo babar
45 45 > rataxes
46 46 > This is a noop statement so that
47 47 > this test is still more bytes than success.
48 48 > EOF
49 49
50 50 >>> fh = open('test-failure-unicode.t', 'wb')
51 51 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
52 52 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
53 53
54 54 $ rt
55 55
56 56 --- $TESTTMP/test-failure.t
57 57 +++ $TESTTMP/test-failure.t.err
58 58 @@ -1,4 +1,4 @@
59 59 $ echo babar
60 60 - rataxes
61 61 + babar
62 62 This is a noop statement so that
63 63 this test is still more bytes than success.
64 64
65 65 ERROR: test-failure.t output changed
66 66 !.
67 67 --- $TESTTMP/test-failure-unicode.t
68 68 +++ $TESTTMP/test-failure-unicode.t.err
69 69 @@ -1,2 +1,2 @@
70 70 $ echo babar\xce\xb1 (esc)
71 71 - l\xce\xb5\xce\xb5t (esc)
72 72 + babar\xce\xb1 (esc)
73 73
74 74 ERROR: test-failure-unicode.t output changed
75 75 !
76 76 Failed test-failure.t: output changed
77 77 Failed test-failure-unicode.t: output changed
78 78 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
79 79 python hash seed: * (glob)
80 80 [1]
81 81
82 82 test --xunit support
83 83 $ rt --xunit=xunit.xml
84 84
85 85 --- $TESTTMP/test-failure.t
86 86 +++ $TESTTMP/test-failure.t.err
87 87 @@ -1,4 +1,4 @@
88 88 $ echo babar
89 89 - rataxes
90 90 + babar
91 91 This is a noop statement so that
92 92 this test is still more bytes than success.
93 93
94 94 ERROR: test-failure.t output changed
95 95 !.
96 96 --- $TESTTMP/test-failure-unicode.t
97 97 +++ $TESTTMP/test-failure-unicode.t.err
98 98 @@ -1,2 +1,2 @@
99 99 $ echo babar\xce\xb1 (esc)
100 100 - l\xce\xb5\xce\xb5t (esc)
101 101 + babar\xce\xb1 (esc)
102 102
103 103 ERROR: test-failure-unicode.t output changed
104 104 !
105 105 Failed test-failure.t: output changed
106 106 Failed test-failure-unicode.t: output changed
107 107 # Ran 3 tests, 0 skipped, 0 warned, 2 failed.
108 108 python hash seed: * (glob)
109 109 [1]
110 110 $ cat xunit.xml
111 111 <?xml version="1.0" encoding="utf-8"?>
112 112 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
113 113 <testcase name="test-success.t" time="*"/> (glob)
114 114 <testcase name="test-failure-unicode.t" time="*"> (glob)
115 115 <![CDATA[--- $TESTTMP/test-failure-unicode.t
116 116 +++ $TESTTMP/test-failure-unicode.t.err
117 117 @@ -1,2 +1,2 @@
118 118 $ echo babar\xce\xb1 (esc)
119 119 - l\xce\xb5\xce\xb5t (esc)
120 120 + babar\xce\xb1 (esc)
121 121 ]]> </testcase>
122 122 <testcase name="test-failure.t" time="*"> (glob)
123 123 <![CDATA[--- $TESTTMP/test-failure.t
124 124 +++ $TESTTMP/test-failure.t.err
125 125 @@ -1,4 +1,4 @@
126 126 $ echo babar
127 127 - rataxes
128 128 + babar
129 129 This is a noop statement so that
130 130 this test is still more bytes than success.
131 131 ]]> </testcase>
132 132 </testsuite>
133 133
134 134 $ rm test-failure-unicode.t
135 135
136 136 test for --retest
137 137 ====================
138 138
139 139 $ rt --retest
140 140
141 141 --- $TESTTMP/test-failure.t
142 142 +++ $TESTTMP/test-failure.t.err
143 143 @@ -1,4 +1,4 @@
144 144 $ echo babar
145 145 - rataxes
146 146 + babar
147 147 This is a noop statement so that
148 148 this test is still more bytes than success.
149 149
150 150 ERROR: test-failure.t output changed
151 151 !
152 152 Failed test-failure.t: output changed
153 153 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
154 154 python hash seed: * (glob)
155 155 [1]
156 156
157 157 Selecting Tests To Run
158 158 ======================
159 159
160 160 successful
161 161
162 162 $ rt test-success.t
163 163 .
164 164 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
165 165
166 166 success w/ keyword
167 167 $ rt -k xyzzy
168 168 .
169 169 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
170 170
171 171 failed
172 172
173 173 $ rt test-failure.t
174 174
175 175 --- $TESTTMP/test-failure.t
176 176 +++ $TESTTMP/test-failure.t.err
177 177 @@ -1,4 +1,4 @@
178 178 $ echo babar
179 179 - rataxes
180 180 + babar
181 181 This is a noop statement so that
182 182 this test is still more bytes than success.
183 183
184 184 ERROR: test-failure.t output changed
185 185 !
186 186 Failed test-failure.t: output changed
187 187 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
188 188 python hash seed: * (glob)
189 189 [1]
190 190
191 191 failure w/ keyword
192 192 $ rt -k rataxes
193 193
194 194 --- $TESTTMP/test-failure.t
195 195 +++ $TESTTMP/test-failure.t.err
196 196 @@ -1,4 +1,4 @@
197 197 $ echo babar
198 198 - rataxes
199 199 + babar
200 200 This is a noop statement so that
201 201 this test is still more bytes than success.
202 202
203 203 ERROR: test-failure.t output changed
204 204 !
205 205 Failed test-failure.t: output changed
206 206 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
207 207 python hash seed: * (glob)
208 208 [1]
209 209
210 210 Verify that when a process fails to start we show a useful message
211 211 ==================================================================
212 212
213 213 $ cat > test-serve-fail.t <<EOF
214 214 > $ echo 'abort: child process failed to start blah'
215 215 > EOF
216 216 $ rt test-serve-fail.t
217 217
218 218 ERROR: test-serve-fail.t output changed
219 219 !
220 220 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
221 221 # Ran 1 tests, 0 skipped, 0 warned, 1 failed.
222 222 python hash seed: * (glob)
223 223 [1]
224 224 $ rm test-serve-fail.t
225 225
226 Verify that we can try other ports
227 ===================================
228 $ hg init inuse
229 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
230 $ cat blocks.pid >> $DAEMON_PIDS
231 $ cat > test-serve-inuse.t <<EOF
232 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
233 > $ cat hg.pid >> \$DAEMON_PIDS
234 > EOF
235 $ rt test-serve-inuse.t
236 .
237 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
238 $ rm test-serve-inuse.t
239
226 240 Running In Debug Mode
227 241 ======================
228 242
229 243 $ rt --debug 2>&1 | grep -v pwd
230 244 + echo *SALT* 0 0 (glob)
231 245 *SALT* 0 0 (glob)
232 246 + echo babar
233 247 babar
234 248 + echo *SALT* 4 0 (glob)
235 249 *SALT* 4 0 (glob)
236 250 *+ echo *SALT* 0 0 (glob)
237 251 *SALT* 0 0 (glob)
238 252 + echo babar
239 253 babar
240 254 + echo *SALT* 2 0 (glob)
241 255 *SALT* 2 0 (glob)
242 256 + echo xyzzy
243 257 xyzzy
244 258 + echo *SALT* 6 0 (glob)
245 259 *SALT* 6 0 (glob)
246 260 .
247 261 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
248 262
249 263 Parallel runs
250 264 ==============
251 265
252 266 (duplicate the failing test to get predictable output)
253 267 $ cp test-failure.t test-failure-copy.t
254 268
255 269 $ rt --jobs 2 test-failure*.t -n
256 270 !!
257 271 Failed test-failure*.t: output changed (glob)
258 272 Failed test-failure*.t: output changed (glob)
259 273 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
260 274 python hash seed: * (glob)
261 275 [1]
262 276
263 277 failures in parallel with --first should only print one failure
264 278 >>> f = open('test-nothing.t', 'w')
265 279 >>> f.write('foo\n' * 1024) and None
266 280 >>> f.write(' $ sleep 1') and None
267 281 $ rt --jobs 2 --first
268 282
269 283 --- $TESTTMP/test-failure*.t (glob)
270 284 +++ $TESTTMP/test-failure*.t.err (glob)
271 285 @@ -1,4 +1,4 @@
272 286 $ echo babar
273 287 - rataxes
274 288 + babar
275 289 This is a noop statement so that
276 290 this test is still more bytes than success.
277 291
278 292 Failed test-failure*.t: output changed (glob)
279 293 Failed test-nothing.t: output changed
280 294 # Ran 2 tests, 0 skipped, 0 warned, 2 failed.
281 295 python hash seed: * (glob)
282 296 [1]
283 297
284 298
285 299 (delete the duplicated test file)
286 300 $ rm test-failure-copy.t test-nothing.t
287 301
288 302
289 303 Interactive run
290 304 ===============
291 305
292 306 (backup the failing test)
293 307 $ cp test-failure.t backup
294 308
295 309 Refuse the fix
296 310
297 311 $ echo 'n' | rt -i
298 312
299 313 --- $TESTTMP/test-failure.t
300 314 +++ $TESTTMP/test-failure.t.err
301 315 @@ -1,4 +1,4 @@
302 316 $ echo babar
303 317 - rataxes
304 318 + babar
305 319 This is a noop statement so that
306 320 this test is still more bytes than success.
307 321 Accept this change? [n]
308 322 ERROR: test-failure.t output changed
309 323 !.
310 324 Failed test-failure.t: output changed
311 325 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
312 326 python hash seed: * (glob)
313 327 [1]
314 328
315 329 $ cat test-failure.t
316 330 $ echo babar
317 331 rataxes
318 332 This is a noop statement so that
319 333 this test is still more bytes than success.
320 334
321 335 Interactive with custom view
322 336
323 337 $ echo 'n' | rt -i --view echo
324 338 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
325 339 Accept this change? [n]* (glob)
326 340 ERROR: test-failure.t output changed
327 341 !.
328 342 Failed test-failure.t: output changed
329 343 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
330 344 python hash seed: * (glob)
331 345 [1]
332 346
333 347 View the fix
334 348
335 349 $ echo 'y' | rt --view echo
336 350 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err (glob)
337 351
338 352 ERROR: test-failure.t output changed
339 353 !.
340 354 Failed test-failure.t: output changed
341 355 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
342 356 python hash seed: * (glob)
343 357 [1]
344 358
345 359 Accept the fix
346 360
347 361 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
348 362 $ echo " saved backup bundle to \$TESTTMP/foo.hg" >> test-failure.t
349 363 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
350 364 $ echo " saved backup bundle to \$TESTTMP/foo.hg (glob)" >> test-failure.t
351 365 $ echo " $ echo 'saved backup bundle to \$TESTTMP/foo.hg'" >> test-failure.t
352 366 $ echo " saved backup bundle to \$TESTTMP/*.hg (glob)" >> test-failure.t
353 367 $ echo 'y' | rt -i 2>&1
354 368
355 369 --- $TESTTMP/test-failure.t
356 370 +++ $TESTTMP/test-failure.t.err
357 371 @@ -1,9 +1,9 @@
358 372 $ echo babar
359 373 - rataxes
360 374 + babar
361 375 This is a noop statement so that
362 376 this test is still more bytes than success.
363 377 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
364 378 - saved backup bundle to $TESTTMP/foo.hg
365 379 + saved backup bundle to $TESTTMP/foo.hg* (glob)
366 380 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
367 381 saved backup bundle to $TESTTMP/foo.hg* (glob)
368 382 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
369 383 Accept this change? [n] ..
370 384 # Ran 2 tests, 0 skipped, 0 warned, 0 failed.
371 385
372 386 $ sed -e 's,(glob)$,&<,g' test-failure.t
373 387 $ echo babar
374 388 babar
375 389 This is a noop statement so that
376 390 this test is still more bytes than success.
377 391 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
378 392 saved backup bundle to $TESTTMP/foo.hg (glob)<
379 393 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
380 394 saved backup bundle to $TESTTMP/foo.hg (glob)<
381 395 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
382 396 saved backup bundle to $TESTTMP/*.hg (glob)<
383 397
384 398 (reinstall)
385 399 $ mv backup test-failure.t
386 400
387 401 No Diff
388 402 ===============
389 403
390 404 $ rt --nodiff
391 405 !.
392 406 Failed test-failure.t: output changed
393 407 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
394 408 python hash seed: * (glob)
395 409 [1]
396 410
397 411 test --tmpdir support
398 412 $ rt --tmpdir=$TESTTMP/keep test-success.t
399 413
400 414 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t (glob)
401 415 Keeping threadtmp dir: $TESTTMP/keep/child1 (glob)
402 416 .
403 417 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
404 418
405 419 timeouts
406 420 ========
407 421 $ cat > test-timeout.t <<EOF
408 422 > $ sleep 2
409 423 > $ echo pass
410 424 > pass
411 425 > EOF
412 426 > echo '#require slow' > test-slow-timeout.t
413 427 > cat test-timeout.t >> test-slow-timeout.t
414 428 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
415 429 st
416 430 Skipped test-slow-timeout.t: missing feature: allow slow tests
417 431 Failed test-timeout.t: timed out
418 432 # Ran 1 tests, 1 skipped, 0 warned, 1 failed.
419 433 python hash seed: * (glob)
420 434 [1]
421 435 $ rt --timeout=1 --slowtimeout=3 \
422 436 > test-timeout.t test-slow-timeout.t --allow-slow-tests
423 437 .t
424 438 Failed test-timeout.t: timed out
425 439 # Ran 2 tests, 0 skipped, 0 warned, 1 failed.
426 440 python hash seed: * (glob)
427 441 [1]
428 442 $ rm test-timeout.t test-slow-timeout.t
429 443
430 444 test for --time
431 445 ==================
432 446
433 447 $ rt test-success.t --time
434 448 .
435 449 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
436 450 # Producing time report
437 451 start end cuser csys real Test
438 452 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
439 453
440 454 test for --time with --job enabled
441 455 ====================================
442 456
443 457 $ rt test-success.t --time --jobs 2
444 458 .
445 459 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
446 460 # Producing time report
447 461 start end cuser csys real Test
448 462 \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} \s*[\d\.]{5} test-success.t (re)
449 463
450 464 Skips
451 465 ================
452 466 $ cat > test-skip.t <<EOF
453 467 > $ echo xyzzy
454 468 > #require false
455 469 > EOF
456 470 $ rt --nodiff
457 471 !.s
458 472 Skipped test-skip.t: missing feature: nail clipper
459 473 Failed test-failure.t: output changed
460 474 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
461 475 python hash seed: * (glob)
462 476 [1]
463 477
464 478 $ rt --keyword xyzzy
465 479 .s
466 480 Skipped test-skip.t: missing feature: nail clipper
467 481 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
468 482
469 483 Skips with xml
470 484 $ rt --keyword xyzzy \
471 485 > --xunit=xunit.xml
472 486 .s
473 487 Skipped test-skip.t: missing feature: nail clipper
474 488 # Ran 2 tests, 2 skipped, 0 warned, 0 failed.
475 489 $ cat xunit.xml
476 490 <?xml version="1.0" encoding="utf-8"?>
477 491 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
478 492 <testcase name="test-success.t" time="*"/> (glob)
479 493 </testsuite>
480 494
481 495 Missing skips or blacklisted skips don't count as executed:
482 496 $ echo test-failure.t > blacklist
483 497 $ rt --blacklist=blacklist \
484 498 > test-failure.t test-bogus.t
485 499 ss
486 500 Skipped test-bogus.t: Doesn't exist
487 501 Skipped test-failure.t: blacklisted
488 502 # Ran 0 tests, 2 skipped, 0 warned, 0 failed.
489 503
490 504 #if json
491 505
492 506 test for --json
493 507 ==================
494 508
495 509 $ rt --json
496 510
497 511 --- $TESTTMP/test-failure.t
498 512 +++ $TESTTMP/test-failure.t.err
499 513 @@ -1,4 +1,4 @@
500 514 $ echo babar
501 515 - rataxes
502 516 + babar
503 517 This is a noop statement so that
504 518 this test is still more bytes than success.
505 519
506 520 ERROR: test-failure.t output changed
507 521 !.s
508 522 Skipped test-skip.t: missing feature: nail clipper
509 523 Failed test-failure.t: output changed
510 524 # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
511 525 python hash seed: * (glob)
512 526 [1]
513 527
514 528 $ cat report.json
515 529 testreport ={
516 530 "test-failure.t": [\{] (re)
517 531 "csys": "\s*[\d\.]{4,5}", ? (re)
518 532 "cuser": "\s*[\d\.]{4,5}", ? (re)
519 533 "end": "\s*[\d\.]{4,5}", ? (re)
520 534 "result": "failure", ? (re)
521 535 "start": "\s*[\d\.]{4,5}", ? (re)
522 536 "time": "\s*[\d\.]{4,5}" (re)
523 537 }, ? (re)
524 538 "test-skip.t": {
525 539 "csys": "\s*[\d\.]{4,5}", ? (re)
526 540 "cuser": "\s*[\d\.]{4,5}", ? (re)
527 541 "end": "\s*[\d\.]{4,5}", ? (re)
528 542 "result": "skip", ? (re)
529 543 "start": "\s*[\d\.]{4,5}", ? (re)
530 544 "time": "\s*[\d\.]{4,5}" (re)
531 545 }, ? (re)
532 546 "test-success.t": [\{] (re)
533 547 "csys": "\s*[\d\.]{4,5}", ? (re)
534 548 "cuser": "\s*[\d\.]{4,5}", ? (re)
535 549 "end": "\s*[\d\.]{4,5}", ? (re)
536 550 "result": "success", ? (re)
537 551 "start": "\s*[\d\.]{4,5}", ? (re)
538 552 "time": "\s*[\d\.]{4,5}" (re)
539 553 }
540 554 } (no-eol)
541 555
542 556 Test that failed test accepted through interactive are properly reported:
543 557
544 558 $ cp test-failure.t backup
545 559 $ echo y | rt --json -i
546 560
547 561 --- $TESTTMP/test-failure.t
548 562 +++ $TESTTMP/test-failure.t.err
549 563 @@ -1,4 +1,4 @@
550 564 $ echo babar
551 565 - rataxes
552 566 + babar
553 567 This is a noop statement so that
554 568 this test is still more bytes than success.
555 569 Accept this change? [n] ..s
556 570 Skipped test-skip.t: missing feature: nail clipper
557 571 # Ran 2 tests, 1 skipped, 0 warned, 0 failed.
558 572
559 573 $ cat report.json
560 574 testreport ={
561 575 "test-failure.t": [\{] (re)
562 576 "csys": "\s*[\d\.]{4,5}", ? (re)
563 577 "cuser": "\s*[\d\.]{4,5}", ? (re)
564 578 "end": "\s*[\d\.]{4,5}", ? (re)
565 579 "result": "success", ? (re)
566 580 "start": "\s*[\d\.]{4,5}", ? (re)
567 581 "time": "\s*[\d\.]{4,5}" (re)
568 582 }, ? (re)
569 583 "test-skip.t": {
570 584 "csys": "\s*[\d\.]{4,5}", ? (re)
571 585 "cuser": "\s*[\d\.]{4,5}", ? (re)
572 586 "end": "\s*[\d\.]{4,5}", ? (re)
573 587 "result": "skip", ? (re)
574 588 "start": "\s*[\d\.]{4,5}", ? (re)
575 589 "time": "\s*[\d\.]{4,5}" (re)
576 590 }, ? (re)
577 591 "test-success.t": [\{] (re)
578 592 "csys": "\s*[\d\.]{4,5}", ? (re)
579 593 "cuser": "\s*[\d\.]{4,5}", ? (re)
580 594 "end": "\s*[\d\.]{4,5}", ? (re)
581 595 "result": "success", ? (re)
582 596 "start": "\s*[\d\.]{4,5}", ? (re)
583 597 "time": "\s*[\d\.]{4,5}" (re)
584 598 }
585 599 } (no-eol)
586 600 $ mv backup test-failure.t
587 601
588 602 #endif
589 603
590 604 backslash on end of line with glob matching is handled properly
591 605
592 606 $ cat > test-glob-backslash.t << EOF
593 607 > $ echo 'foo bar \\'
594 608 > foo * \ (glob)
595 609 > EOF
596 610
597 611 $ rt test-glob-backslash.t
598 612 .
599 613 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
600 614
601 615 $ rm -f test-glob-backslash.t
602 616
603 617 Test reusability for third party tools
604 618 ======================================
605 619
606 620 $ mkdir "$TESTTMP"/anothertests
607 621 $ cd "$TESTTMP"/anothertests
608 622
609 623 test that `run-tests.py` can execute hghave, even if it runs not in
610 624 Mercurial source tree.
611 625
612 626 $ cat > test-hghave.t <<EOF
613 627 > #require true
614 628 > $ echo foo
615 629 > foo
616 630 > EOF
617 631 $ rt $HGTEST_RUN_TESTS_PURE test-hghave.t
618 632 .
619 633 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
620 634
621 635 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
622 636 running is placed.
623 637
624 638 $ cat > test-runtestdir.t <<EOF
625 639 > - $TESTDIR, in which test-run-tests.t is placed
626 640 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
627 641 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
628 642 >
629 643 > #if windows
630 644 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
631 645 > #else
632 646 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
633 647 > #endif
634 648 > $ test "\$RUNTESTDIR" = "$TESTDIR"
635 649 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py
636 650 > #!/usr/bin/env python
637 651 > #
638 652 > # check-code - a style and portability checker for Mercurial
639 653 > EOF
640 654 $ rt $HGTEST_RUN_TESTS_PURE test-runtestdir.t
641 655 .
642 656 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
643 657
644 658 #if execbit
645 659
646 660 test that TESTDIR is referred in PATH
647 661
648 662 $ cat > custom-command.sh <<EOF
649 663 > #!/bin/sh
650 664 > echo "hello world"
651 665 > EOF
652 666 $ chmod +x custom-command.sh
653 667 $ cat > test-testdir-path.t <<EOF
654 668 > $ custom-command.sh
655 669 > hello world
656 670 > EOF
657 671 $ rt $HGTEST_RUN_TESTS_PURE test-testdir-path.t
658 672 .
659 673 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
660 674
661 675 #endif
662 676
663 677 test support for --allow-slow-tests
664 678 $ cat > test-very-slow-test.t <<EOF
665 679 > #require slow
666 680 > $ echo pass
667 681 > pass
668 682 > EOF
669 683 $ rt $HGTEST_RUN_TESTS_PURE test-very-slow-test.t
670 684 s
671 685 Skipped test-very-slow-test.t: missing feature: allow slow tests
672 686 # Ran 0 tests, 1 skipped, 0 warned, 0 failed.
673 687 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
674 688 .
675 689 # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
General Comments 0
You need to be logged in to leave comments. Login now