##// END OF EJS Templates
run-tests: fix whitelist/blacklist with directories on Windows...
Matt Harbison -
r47984:dff19fe2 stable
parent child Browse files
Show More
@@ -1,3915 +1,3916 b''
1 1 #!/usr/bin/env python3
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Olivia Mackall <olivia@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import argparse
49 49 import collections
50 50 import contextlib
51 51 import difflib
52 52 import distutils.version as version
53 53 import errno
54 54 import json
55 55 import multiprocessing
56 56 import os
57 57 import platform
58 58 import random
59 59 import re
60 60 import shutil
61 61 import signal
62 62 import socket
63 63 import subprocess
64 64 import sys
65 65 import sysconfig
66 66 import tempfile
67 67 import threading
68 68 import time
69 69 import unittest
70 70 import uuid
71 71 import xml.dom.minidom as minidom
72 72
73 73 try:
74 74 import Queue as queue
75 75 except ImportError:
76 76 import queue
77 77
78 78 try:
79 79 import shlex
80 80
81 81 shellquote = shlex.quote
82 82 except (ImportError, AttributeError):
83 83 import pipes
84 84
85 85 shellquote = pipes.quote
86 86
87 87 processlock = threading.Lock()
88 88
89 89 pygmentspresent = False
90 90 # ANSI color is unsupported prior to Windows 10
91 91 if os.name != 'nt':
92 92 try: # is pygments installed
93 93 import pygments
94 94 import pygments.lexers as lexers
95 95 import pygments.lexer as lexer
96 96 import pygments.formatters as formatters
97 97 import pygments.token as token
98 98 import pygments.style as style
99 99
100 100 pygmentspresent = True
101 101 difflexer = lexers.DiffLexer()
102 102 terminal256formatter = formatters.Terminal256Formatter()
103 103 except ImportError:
104 104 pass
105 105
106 106 if pygmentspresent:
107 107
108 108 class TestRunnerStyle(style.Style):
109 109 default_style = ""
110 110 skipped = token.string_to_tokentype("Token.Generic.Skipped")
111 111 failed = token.string_to_tokentype("Token.Generic.Failed")
112 112 skippedname = token.string_to_tokentype("Token.Generic.SName")
113 113 failedname = token.string_to_tokentype("Token.Generic.FName")
114 114 styles = {
115 115 skipped: '#e5e5e5',
116 116 skippedname: '#00ffff',
117 117 failed: '#7f0000',
118 118 failedname: '#ff0000',
119 119 }
120 120
121 121 class TestRunnerLexer(lexer.RegexLexer):
122 122 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
123 123 tokens = {
124 124 'root': [
125 125 (r'^Skipped', token.Generic.Skipped, 'skipped'),
126 126 (r'^Failed ', token.Generic.Failed, 'failed'),
127 127 (r'^ERROR: ', token.Generic.Failed, 'failed'),
128 128 ],
129 129 'skipped': [
130 130 (testpattern, token.Generic.SName),
131 131 (r':.*', token.Generic.Skipped),
132 132 ],
133 133 'failed': [
134 134 (testpattern, token.Generic.FName),
135 135 (r'(:| ).*', token.Generic.Failed),
136 136 ],
137 137 }
138 138
139 139 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
140 140 runnerlexer = TestRunnerLexer()
141 141
142 142 origenviron = os.environ.copy()
143 143
144 144 if sys.version_info > (3, 5, 0):
145 145 PYTHON3 = True
146 146 xrange = range # we use xrange in one place, and we'd rather not use range
147 147
148 148 def _sys2bytes(p):
149 149 if p is None:
150 150 return p
151 151 return p.encode('utf-8')
152 152
153 153 def _bytes2sys(p):
154 154 if p is None:
155 155 return p
156 156 return p.decode('utf-8')
157 157
158 158 osenvironb = getattr(os, 'environb', None)
159 159 if osenvironb is None:
160 160 # Windows lacks os.environb, for instance. A proxy over the real thing
161 161 # instead of a copy allows the environment to be updated via bytes on
162 162 # all platforms.
163 163 class environbytes(object):
164 164 def __init__(self, strenv):
165 165 self.__len__ = strenv.__len__
166 166 self.clear = strenv.clear
167 167 self._strenv = strenv
168 168
169 169 def __getitem__(self, k):
170 170 v = self._strenv.__getitem__(_bytes2sys(k))
171 171 return _sys2bytes(v)
172 172
173 173 def __setitem__(self, k, v):
174 174 self._strenv.__setitem__(_bytes2sys(k), _bytes2sys(v))
175 175
176 176 def __delitem__(self, k):
177 177 self._strenv.__delitem__(_bytes2sys(k))
178 178
179 179 def __contains__(self, k):
180 180 return self._strenv.__contains__(_bytes2sys(k))
181 181
182 182 def __iter__(self):
183 183 return iter([_sys2bytes(k) for k in iter(self._strenv)])
184 184
185 185 def get(self, k, default=None):
186 186 v = self._strenv.get(_bytes2sys(k), _bytes2sys(default))
187 187 return _sys2bytes(v)
188 188
189 189 def pop(self, k, default=None):
190 190 v = self._strenv.pop(_bytes2sys(k), _bytes2sys(default))
191 191 return _sys2bytes(v)
192 192
193 193 osenvironb = environbytes(os.environ)
194 194
195 195 getcwdb = getattr(os, 'getcwdb')
196 196 if not getcwdb or os.name == 'nt':
197 197 getcwdb = lambda: _sys2bytes(os.getcwd())
198 198
199 199 elif sys.version_info >= (3, 0, 0):
200 200 print(
201 201 '%s is only supported on Python 3.5+ and 2.7, not %s'
202 202 % (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))
203 203 )
204 204 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
205 205 else:
206 206 PYTHON3 = False
207 207
208 208 # In python 2.x, path operations are generally done using
209 209 # bytestrings by default, so we don't have to do any extra
210 210 # fiddling there. We define the wrapper functions anyway just to
211 211 # help keep code consistent between platforms.
212 212 def _sys2bytes(p):
213 213 return p
214 214
215 215 _bytes2sys = _sys2bytes
216 216 osenvironb = os.environ
217 217 getcwdb = os.getcwd
218 218
219 219 # For Windows support
220 220 wifexited = getattr(os, "WIFEXITED", lambda x: False)
221 221
222 222 # Whether to use IPv6
223 223 def checksocketfamily(name, port=20058):
224 224 """return true if we can listen on localhost using family=name
225 225
226 226 name should be either 'AF_INET', or 'AF_INET6'.
227 227 port being used is okay - EADDRINUSE is considered as successful.
228 228 """
229 229 family = getattr(socket, name, None)
230 230 if family is None:
231 231 return False
232 232 try:
233 233 s = socket.socket(family, socket.SOCK_STREAM)
234 234 s.bind(('localhost', port))
235 235 s.close()
236 236 return True
237 237 except socket.error as exc:
238 238 if exc.errno == errno.EADDRINUSE:
239 239 return True
240 240 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
241 241 return False
242 242 else:
243 243 raise
244 244 else:
245 245 return False
246 246
247 247
248 248 # useipv6 will be set by parseargs
249 249 useipv6 = None
250 250
251 251
252 252 def checkportisavailable(port):
253 253 """return true if a port seems free to bind on localhost"""
254 254 if useipv6:
255 255 family = socket.AF_INET6
256 256 else:
257 257 family = socket.AF_INET
258 258 try:
259 259 with contextlib.closing(socket.socket(family, socket.SOCK_STREAM)) as s:
260 260 s.bind(('localhost', port))
261 261 return True
262 262 except socket.error as exc:
263 263 if os.name == 'nt' and exc.errno == errno.WSAEACCES:
264 264 return False
265 265 elif PYTHON3:
266 266 # TODO: make a proper exception handler after dropping py2. This
267 267 # works because socket.error is an alias for OSError on py3,
268 268 # which is also the baseclass of PermissionError.
269 269 if isinstance(exc, PermissionError):
270 270 return False
271 271 if exc.errno not in (
272 272 errno.EADDRINUSE,
273 273 errno.EADDRNOTAVAIL,
274 274 errno.EPROTONOSUPPORT,
275 275 ):
276 276 raise
277 277 return False
278 278
279 279
280 280 closefds = os.name == 'posix'
281 281
282 282
283 283 def Popen4(cmd, wd, timeout, env=None):
284 284 processlock.acquire()
285 285 p = subprocess.Popen(
286 286 _bytes2sys(cmd),
287 287 shell=True,
288 288 bufsize=-1,
289 289 cwd=_bytes2sys(wd),
290 290 env=env,
291 291 close_fds=closefds,
292 292 stdin=subprocess.PIPE,
293 293 stdout=subprocess.PIPE,
294 294 stderr=subprocess.STDOUT,
295 295 )
296 296 processlock.release()
297 297
298 298 p.fromchild = p.stdout
299 299 p.tochild = p.stdin
300 300 p.childerr = p.stderr
301 301
302 302 p.timeout = False
303 303 if timeout:
304 304
305 305 def t():
306 306 start = time.time()
307 307 while time.time() - start < timeout and p.returncode is None:
308 308 time.sleep(0.1)
309 309 p.timeout = True
310 310 vlog('# Timout reached for process %d' % p.pid)
311 311 if p.returncode is None:
312 312 terminate(p)
313 313
314 314 threading.Thread(target=t).start()
315 315
316 316 return p
317 317
318 318
319 319 if sys.executable:
320 320 sysexecutable = sys.executable
321 321 elif os.environ.get('PYTHONEXECUTABLE'):
322 322 sysexecutable = os.environ['PYTHONEXECUTABLE']
323 323 elif os.environ.get('PYTHON'):
324 324 sysexecutable = os.environ['PYTHON']
325 325 else:
326 326 raise AssertionError('Could not find Python interpreter')
327 327
328 328 PYTHON = _sys2bytes(sysexecutable.replace('\\', '/'))
329 329 IMPL_PATH = b'PYTHONPATH'
330 330 if 'java' in sys.platform:
331 331 IMPL_PATH = b'JYTHONPATH'
332 332
333 333 default_defaults = {
334 334 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
335 335 'timeout': ('HGTEST_TIMEOUT', 360),
336 336 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 1500),
337 337 'port': ('HGTEST_PORT', 20059),
338 338 'shell': ('HGTEST_SHELL', 'sh'),
339 339 }
340 340
341 341 defaults = default_defaults.copy()
342 342
343 343
344 344 def canonpath(path):
345 345 return os.path.realpath(os.path.expanduser(path))
346 346
347 347
348 348 def parselistfiles(files, listtype, warn=True):
349 349 entries = dict()
350 350 for filename in files:
351 351 try:
352 352 path = os.path.expanduser(os.path.expandvars(filename))
353 353 f = open(path, "rb")
354 354 except IOError as err:
355 355 if err.errno != errno.ENOENT:
356 356 raise
357 357 if warn:
358 358 print("warning: no such %s file: %s" % (listtype, filename))
359 359 continue
360 360
361 361 for line in f.readlines():
362 362 line = line.split(b'#', 1)[0].strip()
363 363 if line:
364 entries[line] = filename
364 # Ensure path entries are compatible with os.path.relpath()
365 entries[os.path.normpath(line)] = filename
365 366
366 367 f.close()
367 368 return entries
368 369
369 370
370 371 def parsettestcases(path):
371 372 """read a .t test file, return a set of test case names
372 373
373 374 If path does not exist, return an empty set.
374 375 """
375 376 cases = []
376 377 try:
377 378 with open(path, 'rb') as f:
378 379 for l in f:
379 380 if l.startswith(b'#testcases '):
380 381 cases.append(sorted(l[11:].split()))
381 382 except IOError as ex:
382 383 if ex.errno != errno.ENOENT:
383 384 raise
384 385 return cases
385 386
386 387
387 388 def getparser():
388 389 """Obtain the OptionParser used by the CLI."""
389 390 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
390 391
391 392 selection = parser.add_argument_group('Test Selection')
392 393 selection.add_argument(
393 394 '--allow-slow-tests',
394 395 action='store_true',
395 396 help='allow extremely slow tests',
396 397 )
397 398 selection.add_argument(
398 399 "--blacklist",
399 400 action="append",
400 401 help="skip tests listed in the specified blacklist file",
401 402 )
402 403 selection.add_argument(
403 404 "--changed",
404 405 help="run tests that are changed in parent rev or working directory",
405 406 )
406 407 selection.add_argument(
407 408 "-k", "--keywords", help="run tests matching keywords"
408 409 )
409 410 selection.add_argument(
410 411 "-r", "--retest", action="store_true", help="retest failed tests"
411 412 )
412 413 selection.add_argument(
413 414 "--test-list",
414 415 action="append",
415 416 help="read tests to run from the specified file",
416 417 )
417 418 selection.add_argument(
418 419 "--whitelist",
419 420 action="append",
420 421 help="always run tests listed in the specified whitelist file",
421 422 )
422 423 selection.add_argument(
423 424 'tests', metavar='TESTS', nargs='*', help='Tests to run'
424 425 )
425 426
426 427 harness = parser.add_argument_group('Test Harness Behavior')
427 428 harness.add_argument(
428 429 '--bisect-repo',
429 430 metavar='bisect_repo',
430 431 help=(
431 432 "Path of a repo to bisect. Use together with " "--known-good-rev"
432 433 ),
433 434 )
434 435 harness.add_argument(
435 436 "-d",
436 437 "--debug",
437 438 action="store_true",
438 439 help="debug mode: write output of test scripts to console"
439 440 " rather than capturing and diffing it (disables timeout)",
440 441 )
441 442 harness.add_argument(
442 443 "-f",
443 444 "--first",
444 445 action="store_true",
445 446 help="exit on the first test failure",
446 447 )
447 448 harness.add_argument(
448 449 "-i",
449 450 "--interactive",
450 451 action="store_true",
451 452 help="prompt to accept changed output",
452 453 )
453 454 harness.add_argument(
454 455 "-j",
455 456 "--jobs",
456 457 type=int,
457 458 help="number of jobs to run in parallel"
458 459 " (default: $%s or %d)" % defaults['jobs'],
459 460 )
460 461 harness.add_argument(
461 462 "--keep-tmpdir",
462 463 action="store_true",
463 464 help="keep temporary directory after running tests",
464 465 )
465 466 harness.add_argument(
466 467 '--known-good-rev',
467 468 metavar="known_good_rev",
468 469 help=(
469 470 "Automatically bisect any failures using this "
470 471 "revision as a known-good revision."
471 472 ),
472 473 )
473 474 harness.add_argument(
474 475 "--list-tests",
475 476 action="store_true",
476 477 help="list tests instead of running them",
477 478 )
478 479 harness.add_argument(
479 480 "--loop", action="store_true", help="loop tests repeatedly"
480 481 )
481 482 harness.add_argument(
482 483 '--random', action="store_true", help='run tests in random order'
483 484 )
484 485 harness.add_argument(
485 486 '--order-by-runtime',
486 487 action="store_true",
487 488 help='run slowest tests first, according to .testtimes',
488 489 )
489 490 harness.add_argument(
490 491 "-p",
491 492 "--port",
492 493 type=int,
493 494 help="port on which servers should listen"
494 495 " (default: $%s or %d)" % defaults['port'],
495 496 )
496 497 harness.add_argument(
497 498 '--profile-runner',
498 499 action='store_true',
499 500 help='run statprof on run-tests',
500 501 )
501 502 harness.add_argument(
502 503 "-R", "--restart", action="store_true", help="restart at last error"
503 504 )
504 505 harness.add_argument(
505 506 "--runs-per-test",
506 507 type=int,
507 508 dest="runs_per_test",
508 509 help="run each test N times (default=1)",
509 510 default=1,
510 511 )
511 512 harness.add_argument(
512 513 "--shell", help="shell to use (default: $%s or %s)" % defaults['shell']
513 514 )
514 515 harness.add_argument(
515 516 '--showchannels', action='store_true', help='show scheduling channels'
516 517 )
517 518 harness.add_argument(
518 519 "--slowtimeout",
519 520 type=int,
520 521 help="kill errant slow tests after SLOWTIMEOUT seconds"
521 522 " (default: $%s or %d)" % defaults['slowtimeout'],
522 523 )
523 524 harness.add_argument(
524 525 "-t",
525 526 "--timeout",
526 527 type=int,
527 528 help="kill errant tests after TIMEOUT seconds"
528 529 " (default: $%s or %d)" % defaults['timeout'],
529 530 )
530 531 harness.add_argument(
531 532 "--tmpdir",
532 533 help="run tests in the given temporary directory"
533 534 " (implies --keep-tmpdir)",
534 535 )
535 536 harness.add_argument(
536 537 "-v", "--verbose", action="store_true", help="output verbose messages"
537 538 )
538 539
539 540 hgconf = parser.add_argument_group('Mercurial Configuration')
540 541 hgconf.add_argument(
541 542 "--chg",
542 543 action="store_true",
543 544 help="install and use chg wrapper in place of hg",
544 545 )
545 546 hgconf.add_argument(
546 547 "--chg-debug",
547 548 action="store_true",
548 549 help="show chg debug logs",
549 550 )
550 551 hgconf.add_argument(
551 552 "--rhg",
552 553 action="store_true",
553 554 help="install and use rhg Rust implementation in place of hg",
554 555 )
555 556 hgconf.add_argument("--compiler", help="compiler to build with")
556 557 hgconf.add_argument(
557 558 '--extra-config-opt',
558 559 action="append",
559 560 default=[],
560 561 help='set the given config opt in the test hgrc',
561 562 )
562 563 hgconf.add_argument(
563 564 "-l",
564 565 "--local",
565 566 action="store_true",
566 567 help="shortcut for --with-hg=<testdir>/../hg, "
567 568 "--with-rhg=<testdir>/../rust/target/release/rhg if --rhg is set, "
568 569 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set",
569 570 )
570 571 hgconf.add_argument(
571 572 "--ipv6",
572 573 action="store_true",
573 574 help="prefer IPv6 to IPv4 for network related tests",
574 575 )
575 576 hgconf.add_argument(
576 577 "--pure",
577 578 action="store_true",
578 579 help="use pure Python code instead of C extensions",
579 580 )
580 581 hgconf.add_argument(
581 582 "--rust",
582 583 action="store_true",
583 584 help="use Rust code alongside C extensions",
584 585 )
585 586 hgconf.add_argument(
586 587 "--no-rust",
587 588 action="store_true",
588 589 help="do not use Rust code even if compiled",
589 590 )
590 591 hgconf.add_argument(
591 592 "--with-chg",
592 593 metavar="CHG",
593 594 help="use specified chg wrapper in place of hg",
594 595 )
595 596 hgconf.add_argument(
596 597 "--with-rhg",
597 598 metavar="RHG",
598 599 help="use specified rhg Rust implementation in place of hg",
599 600 )
600 601 hgconf.add_argument(
601 602 "--with-hg",
602 603 metavar="HG",
603 604 help="test using specified hg script rather than a "
604 605 "temporary installation",
605 606 )
606 607
607 608 reporting = parser.add_argument_group('Results Reporting')
608 609 reporting.add_argument(
609 610 "-C",
610 611 "--annotate",
611 612 action="store_true",
612 613 help="output files annotated with coverage",
613 614 )
614 615 reporting.add_argument(
615 616 "--color",
616 617 choices=["always", "auto", "never"],
617 618 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
618 619 help="colorisation: always|auto|never (default: auto)",
619 620 )
620 621 reporting.add_argument(
621 622 "-c",
622 623 "--cover",
623 624 action="store_true",
624 625 help="print a test coverage report",
625 626 )
626 627 reporting.add_argument(
627 628 '--exceptions',
628 629 action='store_true',
629 630 help='log all exceptions and generate an exception report',
630 631 )
631 632 reporting.add_argument(
632 633 "-H",
633 634 "--htmlcov",
634 635 action="store_true",
635 636 help="create an HTML report of the coverage of the files",
636 637 )
637 638 reporting.add_argument(
638 639 "--json",
639 640 action="store_true",
640 641 help="store test result data in 'report.json' file",
641 642 )
642 643 reporting.add_argument(
643 644 "--outputdir",
644 645 help="directory to write error logs to (default=test directory)",
645 646 )
646 647 reporting.add_argument(
647 648 "-n", "--nodiff", action="store_true", help="skip showing test changes"
648 649 )
649 650 reporting.add_argument(
650 651 "-S",
651 652 "--noskips",
652 653 action="store_true",
653 654 help="don't report skip tests verbosely",
654 655 )
655 656 reporting.add_argument(
656 657 "--time", action="store_true", help="time how long each test takes"
657 658 )
658 659 reporting.add_argument("--view", help="external diff viewer")
659 660 reporting.add_argument(
660 661 "--xunit", help="record xunit results at specified path"
661 662 )
662 663
663 664 for option, (envvar, default) in defaults.items():
664 665 defaults[option] = type(default)(os.environ.get(envvar, default))
665 666 parser.set_defaults(**defaults)
666 667
667 668 return parser
668 669
669 670
670 671 def parseargs(args, parser):
671 672 """Parse arguments with our OptionParser and validate results."""
672 673 options = parser.parse_args(args)
673 674
674 675 # jython is always pure
675 676 if 'java' in sys.platform or '__pypy__' in sys.modules:
676 677 options.pure = True
677 678
678 679 if platform.python_implementation() != 'CPython' and options.rust:
679 680 parser.error('Rust extensions are only available with CPython')
680 681
681 682 if options.pure and options.rust:
682 683 parser.error('--rust cannot be used with --pure')
683 684
684 685 if options.rust and options.no_rust:
685 686 parser.error('--rust cannot be used with --no-rust')
686 687
687 688 if options.local:
688 689 if options.with_hg or options.with_rhg or options.with_chg:
689 690 parser.error(
690 691 '--local cannot be used with --with-hg or --with-rhg or --with-chg'
691 692 )
692 693 testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
693 694 reporootdir = os.path.dirname(testdir)
694 695 pathandattrs = [(b'hg', 'with_hg')]
695 696 if options.chg:
696 697 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
697 698 if options.rhg:
698 699 pathandattrs.append((b'rust/target/release/rhg', 'with_rhg'))
699 700 for relpath, attr in pathandattrs:
700 701 binpath = os.path.join(reporootdir, relpath)
701 702 if os.name != 'nt' and not os.access(binpath, os.X_OK):
702 703 parser.error(
703 704 '--local specified, but %r not found or '
704 705 'not executable' % binpath
705 706 )
706 707 setattr(options, attr, _bytes2sys(binpath))
707 708
708 709 if options.with_hg:
709 710 options.with_hg = canonpath(_sys2bytes(options.with_hg))
710 711 if not (
711 712 os.path.isfile(options.with_hg)
712 713 and os.access(options.with_hg, os.X_OK)
713 714 ):
714 715 parser.error('--with-hg must specify an executable hg script')
715 716 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
716 717 sys.stderr.write('warning: --with-hg should specify an hg script\n')
717 718 sys.stderr.flush()
718 719
719 720 if (options.chg or options.with_chg) and os.name == 'nt':
720 721 parser.error('chg does not work on %s' % os.name)
721 722 if (options.rhg or options.with_rhg) and os.name == 'nt':
722 723 parser.error('rhg does not work on %s' % os.name)
723 724 if options.with_chg:
724 725 options.chg = False # no installation to temporary location
725 726 options.with_chg = canonpath(_sys2bytes(options.with_chg))
726 727 if not (
727 728 os.path.isfile(options.with_chg)
728 729 and os.access(options.with_chg, os.X_OK)
729 730 ):
730 731 parser.error('--with-chg must specify a chg executable')
731 732 if options.with_rhg:
732 733 options.rhg = False # no installation to temporary location
733 734 options.with_rhg = canonpath(_sys2bytes(options.with_rhg))
734 735 if not (
735 736 os.path.isfile(options.with_rhg)
736 737 and os.access(options.with_rhg, os.X_OK)
737 738 ):
738 739 parser.error('--with-rhg must specify a rhg executable')
739 740 if options.chg and options.with_hg:
740 741 # chg shares installation location with hg
741 742 parser.error(
742 743 '--chg does not work when --with-hg is specified '
743 744 '(use --with-chg instead)'
744 745 )
745 746 if options.rhg and options.with_hg:
746 747 # rhg shares installation location with hg
747 748 parser.error(
748 749 '--rhg does not work when --with-hg is specified '
749 750 '(use --with-rhg instead)'
750 751 )
751 752 if options.rhg and options.chg:
752 753 parser.error('--rhg and --chg do not work together')
753 754
754 755 if options.color == 'always' and not pygmentspresent:
755 756 sys.stderr.write(
756 757 'warning: --color=always ignored because '
757 758 'pygments is not installed\n'
758 759 )
759 760
760 761 if options.bisect_repo and not options.known_good_rev:
761 762 parser.error("--bisect-repo cannot be used without --known-good-rev")
762 763
763 764 global useipv6
764 765 if options.ipv6:
765 766 useipv6 = checksocketfamily('AF_INET6')
766 767 else:
767 768 # only use IPv6 if IPv4 is unavailable and IPv6 is available
768 769 useipv6 = (not checksocketfamily('AF_INET')) and checksocketfamily(
769 770 'AF_INET6'
770 771 )
771 772
772 773 options.anycoverage = options.cover or options.annotate or options.htmlcov
773 774 if options.anycoverage:
774 775 try:
775 776 import coverage
776 777
777 778 covver = version.StrictVersion(coverage.__version__).version
778 779 if covver < (3, 3):
779 780 parser.error('coverage options require coverage 3.3 or later')
780 781 except ImportError:
781 782 parser.error('coverage options now require the coverage package')
782 783
783 784 if options.anycoverage and options.local:
784 785 # this needs some path mangling somewhere, I guess
785 786 parser.error(
786 787 "sorry, coverage options do not work when --local " "is specified"
787 788 )
788 789
789 790 if options.anycoverage and options.with_hg:
790 791 parser.error(
791 792 "sorry, coverage options do not work when --with-hg " "is specified"
792 793 )
793 794
794 795 global verbose
795 796 if options.verbose:
796 797 verbose = ''
797 798
798 799 if options.tmpdir:
799 800 options.tmpdir = canonpath(options.tmpdir)
800 801
801 802 if options.jobs < 1:
802 803 parser.error('--jobs must be positive')
803 804 if options.interactive and options.debug:
804 805 parser.error("-i/--interactive and -d/--debug are incompatible")
805 806 if options.debug:
806 807 if options.timeout != defaults['timeout']:
807 808 sys.stderr.write('warning: --timeout option ignored with --debug\n')
808 809 if options.slowtimeout != defaults['slowtimeout']:
809 810 sys.stderr.write(
810 811 'warning: --slowtimeout option ignored with --debug\n'
811 812 )
812 813 options.timeout = 0
813 814 options.slowtimeout = 0
814 815
815 816 if options.blacklist:
816 817 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
817 818 if options.whitelist:
818 819 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
819 820 else:
820 821 options.whitelisted = {}
821 822
822 823 if options.showchannels:
823 824 options.nodiff = True
824 825
825 826 return options
826 827
827 828
828 829 def rename(src, dst):
829 830 """Like os.rename(), trade atomicity and opened files friendliness
830 831 for existing destination support.
831 832 """
832 833 shutil.copy(src, dst)
833 834 os.remove(src)
834 835
835 836
836 837 def makecleanable(path):
837 838 """Try to fix directory permission recursively so that the entire tree
838 839 can be deleted"""
839 840 for dirpath, dirnames, _filenames in os.walk(path, topdown=True):
840 841 for d in dirnames:
841 842 p = os.path.join(dirpath, d)
842 843 try:
843 844 os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx
844 845 except OSError:
845 846 pass
846 847
847 848
848 849 _unified_diff = difflib.unified_diff
849 850 if PYTHON3:
850 851 import functools
851 852
852 853 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
853 854
854 855
855 856 def getdiff(expected, output, ref, err):
856 857 servefail = False
857 858 lines = []
858 859 for line in _unified_diff(expected, output, ref, err):
859 860 if line.startswith(b'+++') or line.startswith(b'---'):
860 861 line = line.replace(b'\\', b'/')
861 862 if line.endswith(b' \n'):
862 863 line = line[:-2] + b'\n'
863 864 lines.append(line)
864 865 if not servefail and line.startswith(
865 866 b'+ abort: child process failed to start'
866 867 ):
867 868 servefail = True
868 869
869 870 return servefail, lines
870 871
871 872
872 873 verbose = False
873 874
874 875
875 876 def vlog(*msg):
876 877 """Log only when in verbose mode."""
877 878 if verbose is False:
878 879 return
879 880
880 881 return log(*msg)
881 882
882 883
883 884 # Bytes that break XML even in a CDATA block: control characters 0-31
884 885 # sans \t, \n and \r
885 886 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
886 887
887 888 # Match feature conditionalized output lines in the form, capturing the feature
888 889 # list in group 2, and the preceeding line output in group 1:
889 890 #
890 891 # output..output (feature !)\n
891 892 optline = re.compile(br'(.*) \((.+?) !\)\n$')
892 893
893 894
894 895 def cdatasafe(data):
895 896 """Make a string safe to include in a CDATA block.
896 897
897 898 Certain control characters are illegal in a CDATA block, and
898 899 there's no way to include a ]]> in a CDATA either. This function
899 900 replaces illegal bytes with ? and adds a space between the ]] so
900 901 that it won't break the CDATA block.
901 902 """
902 903 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
903 904
904 905
905 906 def log(*msg):
906 907 """Log something to stdout.
907 908
908 909 Arguments are strings to print.
909 910 """
910 911 with iolock:
911 912 if verbose:
912 913 print(verbose, end=' ')
913 914 for m in msg:
914 915 print(m, end=' ')
915 916 print()
916 917 sys.stdout.flush()
917 918
918 919
919 920 def highlightdiff(line, color):
920 921 if not color:
921 922 return line
922 923 assert pygmentspresent
923 924 return pygments.highlight(
924 925 line.decode('latin1'), difflexer, terminal256formatter
925 926 ).encode('latin1')
926 927
927 928
928 929 def highlightmsg(msg, color):
929 930 if not color:
930 931 return msg
931 932 assert pygmentspresent
932 933 return pygments.highlight(msg, runnerlexer, runnerformatter)
933 934
934 935
935 936 def terminate(proc):
936 937 """Terminate subprocess"""
937 938 vlog('# Terminating process %d' % proc.pid)
938 939 try:
939 940 proc.terminate()
940 941 except OSError:
941 942 pass
942 943
943 944
944 945 def killdaemons(pidfile):
945 946 import killdaemons as killmod
946 947
947 948 return killmod.killdaemons(pidfile, tryhard=False, remove=True, logfn=vlog)
948 949
949 950
950 951 class Test(unittest.TestCase):
951 952 """Encapsulates a single, runnable test.
952 953
953 954 While this class conforms to the unittest.TestCase API, it differs in that
954 955 instances need to be instantiated manually. (Typically, unittest.TestCase
955 956 classes are instantiated automatically by scanning modules.)
956 957 """
957 958
958 959 # Status code reserved for skipped tests (used by hghave).
959 960 SKIPPED_STATUS = 80
960 961
961 962 def __init__(
962 963 self,
963 964 path,
964 965 outputdir,
965 966 tmpdir,
966 967 keeptmpdir=False,
967 968 debug=False,
968 969 first=False,
969 970 timeout=None,
970 971 startport=None,
971 972 extraconfigopts=None,
972 973 shell=None,
973 974 hgcommand=None,
974 975 slowtimeout=None,
975 976 usechg=False,
976 977 chgdebug=False,
977 978 useipv6=False,
978 979 ):
979 980 """Create a test from parameters.
980 981
981 982 path is the full path to the file defining the test.
982 983
983 984 tmpdir is the main temporary directory to use for this test.
984 985
985 986 keeptmpdir determines whether to keep the test's temporary directory
986 987 after execution. It defaults to removal (False).
987 988
988 989 debug mode will make the test execute verbosely, with unfiltered
989 990 output.
990 991
991 992 timeout controls the maximum run time of the test. It is ignored when
992 993 debug is True. See slowtimeout for tests with #require slow.
993 994
994 995 slowtimeout overrides timeout if the test has #require slow.
995 996
996 997 startport controls the starting port number to use for this test. Each
997 998 test will reserve 3 port numbers for execution. It is the caller's
998 999 responsibility to allocate a non-overlapping port range to Test
999 1000 instances.
1000 1001
1001 1002 extraconfigopts is an iterable of extra hgrc config options. Values
1002 1003 must have the form "key=value" (something understood by hgrc). Values
1003 1004 of the form "foo.key=value" will result in "[foo] key=value".
1004 1005
1005 1006 shell is the shell to execute tests in.
1006 1007 """
1007 1008 if timeout is None:
1008 1009 timeout = defaults['timeout']
1009 1010 if startport is None:
1010 1011 startport = defaults['port']
1011 1012 if slowtimeout is None:
1012 1013 slowtimeout = defaults['slowtimeout']
1013 1014 self.path = path
1014 1015 self.relpath = os.path.relpath(path)
1015 1016 self.bname = os.path.basename(path)
1016 1017 self.name = _bytes2sys(self.bname)
1017 1018 self._testdir = os.path.dirname(path)
1018 1019 self._outputdir = outputdir
1019 1020 self._tmpname = os.path.basename(path)
1020 1021 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
1021 1022
1022 1023 self._threadtmp = tmpdir
1023 1024 self._keeptmpdir = keeptmpdir
1024 1025 self._debug = debug
1025 1026 self._first = first
1026 1027 self._timeout = timeout
1027 1028 self._slowtimeout = slowtimeout
1028 1029 self._startport = startport
1029 1030 self._extraconfigopts = extraconfigopts or []
1030 1031 self._shell = _sys2bytes(shell)
1031 1032 self._hgcommand = hgcommand or b'hg'
1032 1033 self._usechg = usechg
1033 1034 self._chgdebug = chgdebug
1034 1035 self._useipv6 = useipv6
1035 1036
1036 1037 self._aborted = False
1037 1038 self._daemonpids = []
1038 1039 self._finished = None
1039 1040 self._ret = None
1040 1041 self._out = None
1041 1042 self._skipped = None
1042 1043 self._testtmp = None
1043 1044 self._chgsockdir = None
1044 1045
1045 1046 self._refout = self.readrefout()
1046 1047
1047 1048 def readrefout(self):
1048 1049 """read reference output"""
1049 1050 # If we're not in --debug mode and reference output file exists,
1050 1051 # check test output against it.
1051 1052 if self._debug:
1052 1053 return None # to match "out is None"
1053 1054 elif os.path.exists(self.refpath):
1054 1055 with open(self.refpath, 'rb') as f:
1055 1056 return f.read().splitlines(True)
1056 1057 else:
1057 1058 return []
1058 1059
1059 1060 # needed to get base class __repr__ running
1060 1061 @property
1061 1062 def _testMethodName(self):
1062 1063 return self.name
1063 1064
1064 1065 def __str__(self):
1065 1066 return self.name
1066 1067
1067 1068 def shortDescription(self):
1068 1069 return self.name
1069 1070
1070 1071 def setUp(self):
1071 1072 """Tasks to perform before run()."""
1072 1073 self._finished = False
1073 1074 self._ret = None
1074 1075 self._out = None
1075 1076 self._skipped = None
1076 1077
1077 1078 try:
1078 1079 os.mkdir(self._threadtmp)
1079 1080 except OSError as e:
1080 1081 if e.errno != errno.EEXIST:
1081 1082 raise
1082 1083
1083 1084 name = self._tmpname
1084 1085 self._testtmp = os.path.join(self._threadtmp, name)
1085 1086 os.mkdir(self._testtmp)
1086 1087
1087 1088 # Remove any previous output files.
1088 1089 if os.path.exists(self.errpath):
1089 1090 try:
1090 1091 os.remove(self.errpath)
1091 1092 except OSError as e:
1092 1093 # We might have raced another test to clean up a .err
1093 1094 # file, so ignore ENOENT when removing a previous .err
1094 1095 # file.
1095 1096 if e.errno != errno.ENOENT:
1096 1097 raise
1097 1098
1098 1099 if self._usechg:
1099 1100 self._chgsockdir = os.path.join(
1100 1101 self._threadtmp, b'%s.chgsock' % name
1101 1102 )
1102 1103 os.mkdir(self._chgsockdir)
1103 1104
1104 1105 def run(self, result):
1105 1106 """Run this test and report results against a TestResult instance."""
1106 1107 # This function is extremely similar to unittest.TestCase.run(). Once
1107 1108 # we require Python 2.7 (or at least its version of unittest), this
1108 1109 # function can largely go away.
1109 1110 self._result = result
1110 1111 result.startTest(self)
1111 1112 try:
1112 1113 try:
1113 1114 self.setUp()
1114 1115 except (KeyboardInterrupt, SystemExit):
1115 1116 self._aborted = True
1116 1117 raise
1117 1118 except Exception:
1118 1119 result.addError(self, sys.exc_info())
1119 1120 return
1120 1121
1121 1122 success = False
1122 1123 try:
1123 1124 self.runTest()
1124 1125 except KeyboardInterrupt:
1125 1126 self._aborted = True
1126 1127 raise
1127 1128 except unittest.SkipTest as e:
1128 1129 result.addSkip(self, str(e))
1129 1130 # The base class will have already counted this as a
1130 1131 # test we "ran", but we want to exclude skipped tests
1131 1132 # from those we count towards those run.
1132 1133 result.testsRun -= 1
1133 1134 except self.failureException as e:
1134 1135 # This differs from unittest in that we don't capture
1135 1136 # the stack trace. This is for historical reasons and
1136 1137 # this decision could be revisited in the future,
1137 1138 # especially for PythonTest instances.
1138 1139 if result.addFailure(self, str(e)):
1139 1140 success = True
1140 1141 except Exception:
1141 1142 result.addError(self, sys.exc_info())
1142 1143 else:
1143 1144 success = True
1144 1145
1145 1146 try:
1146 1147 self.tearDown()
1147 1148 except (KeyboardInterrupt, SystemExit):
1148 1149 self._aborted = True
1149 1150 raise
1150 1151 except Exception:
1151 1152 result.addError(self, sys.exc_info())
1152 1153 success = False
1153 1154
1154 1155 if success:
1155 1156 result.addSuccess(self)
1156 1157 finally:
1157 1158 result.stopTest(self, interrupted=self._aborted)
1158 1159
1159 1160 def runTest(self):
1160 1161 """Run this test instance.
1161 1162
1162 1163 This will return a tuple describing the result of the test.
1163 1164 """
1164 1165 env = self._getenv()
1165 1166 self._genrestoreenv(env)
1166 1167 self._daemonpids.append(env['DAEMON_PIDS'])
1167 1168 self._createhgrc(env['HGRCPATH'])
1168 1169
1169 1170 vlog('# Test', self.name)
1170 1171
1171 1172 ret, out = self._run(env)
1172 1173 self._finished = True
1173 1174 self._ret = ret
1174 1175 self._out = out
1175 1176
1176 1177 def describe(ret):
1177 1178 if ret < 0:
1178 1179 return 'killed by signal: %d' % -ret
1179 1180 return 'returned error code %d' % ret
1180 1181
1181 1182 self._skipped = False
1182 1183
1183 1184 if ret == self.SKIPPED_STATUS:
1184 1185 if out is None: # Debug mode, nothing to parse.
1185 1186 missing = ['unknown']
1186 1187 failed = None
1187 1188 else:
1188 1189 missing, failed = TTest.parsehghaveoutput(out)
1189 1190
1190 1191 if not missing:
1191 1192 missing = ['skipped']
1192 1193
1193 1194 if failed:
1194 1195 self.fail('hg have failed checking for %s' % failed[-1])
1195 1196 else:
1196 1197 self._skipped = True
1197 1198 raise unittest.SkipTest(missing[-1])
1198 1199 elif ret == 'timeout':
1199 1200 self.fail('timed out')
1200 1201 elif ret is False:
1201 1202 self.fail('no result code from test')
1202 1203 elif out != self._refout:
1203 1204 # Diff generation may rely on written .err file.
1204 1205 if (
1205 1206 (ret != 0 or out != self._refout)
1206 1207 and not self._skipped
1207 1208 and not self._debug
1208 1209 ):
1209 1210 with open(self.errpath, 'wb') as f:
1210 1211 for line in out:
1211 1212 f.write(line)
1212 1213
1213 1214 # The result object handles diff calculation for us.
1214 1215 with firstlock:
1215 1216 if self._result.addOutputMismatch(self, ret, out, self._refout):
1216 1217 # change was accepted, skip failing
1217 1218 return
1218 1219 if self._first:
1219 1220 global firsterror
1220 1221 firsterror = True
1221 1222
1222 1223 if ret:
1223 1224 msg = 'output changed and ' + describe(ret)
1224 1225 else:
1225 1226 msg = 'output changed'
1226 1227
1227 1228 self.fail(msg)
1228 1229 elif ret:
1229 1230 self.fail(describe(ret))
1230 1231
1231 1232 def tearDown(self):
1232 1233 """Tasks to perform after run()."""
1233 1234 for entry in self._daemonpids:
1234 1235 killdaemons(entry)
1235 1236 self._daemonpids = []
1236 1237
1237 1238 if self._keeptmpdir:
1238 1239 log(
1239 1240 '\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s'
1240 1241 % (
1241 1242 _bytes2sys(self._testtmp),
1242 1243 _bytes2sys(self._threadtmp),
1243 1244 )
1244 1245 )
1245 1246 else:
1246 1247 try:
1247 1248 shutil.rmtree(self._testtmp)
1248 1249 except OSError:
1249 1250 # unreadable directory may be left in $TESTTMP; fix permission
1250 1251 # and try again
1251 1252 makecleanable(self._testtmp)
1252 1253 shutil.rmtree(self._testtmp, True)
1253 1254 shutil.rmtree(self._threadtmp, True)
1254 1255
1255 1256 if self._usechg:
1256 1257 # chgservers will stop automatically after they find the socket
1257 1258 # files are deleted
1258 1259 shutil.rmtree(self._chgsockdir, True)
1259 1260
1260 1261 if (
1261 1262 (self._ret != 0 or self._out != self._refout)
1262 1263 and not self._skipped
1263 1264 and not self._debug
1264 1265 and self._out
1265 1266 ):
1266 1267 with open(self.errpath, 'wb') as f:
1267 1268 for line in self._out:
1268 1269 f.write(line)
1269 1270
1270 1271 vlog("# Ret was:", self._ret, '(%s)' % self.name)
1271 1272
1272 1273 def _run(self, env):
1273 1274 # This should be implemented in child classes to run tests.
1274 1275 raise unittest.SkipTest('unknown test type')
1275 1276
1276 1277 def abort(self):
1277 1278 """Terminate execution of this test."""
1278 1279 self._aborted = True
1279 1280
1280 1281 def _portmap(self, i):
1281 1282 offset = b'' if i == 0 else b'%d' % i
1282 1283 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
1283 1284
1284 1285 def _getreplacements(self):
1285 1286 """Obtain a mapping of text replacements to apply to test output.
1286 1287
1287 1288 Test output needs to be normalized so it can be compared to expected
1288 1289 output. This function defines how some of that normalization will
1289 1290 occur.
1290 1291 """
1291 1292 r = [
1292 1293 # This list should be parallel to defineport in _getenv
1293 1294 self._portmap(0),
1294 1295 self._portmap(1),
1295 1296 self._portmap(2),
1296 1297 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
1297 1298 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
1298 1299 ]
1299 1300 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
1300 1301
1301 1302 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1302 1303
1303 1304 if os.path.exists(replacementfile):
1304 1305 data = {}
1305 1306 with open(replacementfile, mode='rb') as source:
1306 1307 # the intermediate 'compile' step help with debugging
1307 1308 code = compile(source.read(), replacementfile, 'exec')
1308 1309 exec(code, data)
1309 1310 for value in data.get('substitutions', ()):
1310 1311 if len(value) != 2:
1311 1312 msg = 'malformatted substitution in %s: %r'
1312 1313 msg %= (replacementfile, value)
1313 1314 raise ValueError(msg)
1314 1315 r.append(value)
1315 1316 return r
1316 1317
1317 1318 def _escapepath(self, p):
1318 1319 if os.name == 'nt':
1319 1320 return b''.join(
1320 1321 c.isalpha()
1321 1322 and b'[%s%s]' % (c.lower(), c.upper())
1322 1323 or c in b'/\\'
1323 1324 and br'[/\\]'
1324 1325 or c.isdigit()
1325 1326 and c
1326 1327 or b'\\' + c
1327 1328 for c in [p[i : i + 1] for i in range(len(p))]
1328 1329 )
1329 1330 else:
1330 1331 return re.escape(p)
1331 1332
1332 1333 def _localip(self):
1333 1334 if self._useipv6:
1334 1335 return b'::1'
1335 1336 else:
1336 1337 return b'127.0.0.1'
1337 1338
1338 1339 def _genrestoreenv(self, testenv):
1339 1340 """Generate a script that can be used by tests to restore the original
1340 1341 environment."""
1341 1342 # Put the restoreenv script inside self._threadtmp
1342 1343 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1343 1344 testenv['HGTEST_RESTOREENV'] = _bytes2sys(scriptpath)
1344 1345
1345 1346 # Only restore environment variable names that the shell allows
1346 1347 # us to export.
1347 1348 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1348 1349
1349 1350 # Do not restore these variables; otherwise tests would fail.
1350 1351 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1351 1352
1352 1353 with open(scriptpath, 'w') as envf:
1353 1354 for name, value in origenviron.items():
1354 1355 if not name_regex.match(name):
1355 1356 # Skip environment variables with unusual names not
1356 1357 # allowed by most shells.
1357 1358 continue
1358 1359 if name in reqnames:
1359 1360 continue
1360 1361 envf.write('%s=%s\n' % (name, shellquote(value)))
1361 1362
1362 1363 for name in testenv:
1363 1364 if name in origenviron or name in reqnames:
1364 1365 continue
1365 1366 envf.write('unset %s\n' % (name,))
1366 1367
1367 1368 def _getenv(self):
1368 1369 """Obtain environment variables to use during test execution."""
1369 1370
1370 1371 def defineport(i):
1371 1372 offset = '' if i == 0 else '%s' % i
1372 1373 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1373 1374
1374 1375 env = os.environ.copy()
1375 1376 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1376 1377 env['HGEMITWARNINGS'] = '1'
1377 1378 env['TESTTMP'] = _bytes2sys(self._testtmp)
1378 1379 env['TESTNAME'] = self.name
1379 1380 env['HOME'] = _bytes2sys(self._testtmp)
1380 1381 if os.name == 'nt':
1381 1382 env['REALUSERPROFILE'] = env['USERPROFILE']
1382 1383 # py3.8+ ignores HOME: https://bugs.python.org/issue36264
1383 1384 env['USERPROFILE'] = env['HOME']
1384 1385 formated_timeout = _bytes2sys(b"%d" % default_defaults['timeout'][1])
1385 1386 env['HGTEST_TIMEOUT_DEFAULT'] = formated_timeout
1386 1387 env['HGTEST_TIMEOUT'] = _bytes2sys(b"%d" % self._timeout)
1387 1388 # This number should match portneeded in _getport
1388 1389 for port in xrange(3):
1389 1390 # This list should be parallel to _portmap in _getreplacements
1390 1391 defineport(port)
1391 1392 env["HGRCPATH"] = _bytes2sys(os.path.join(self._threadtmp, b'.hgrc'))
1392 1393 env["DAEMON_PIDS"] = _bytes2sys(
1393 1394 os.path.join(self._threadtmp, b'daemon.pids')
1394 1395 )
1395 1396 env["HGEDITOR"] = (
1396 1397 '"' + sysexecutable + '"' + ' -c "import sys; sys.exit(0)"'
1397 1398 )
1398 1399 env["HGUSER"] = "test"
1399 1400 env["HGENCODING"] = "ascii"
1400 1401 env["HGENCODINGMODE"] = "strict"
1401 1402 env["HGHOSTNAME"] = "test-hostname"
1402 1403 env['HGIPV6'] = str(int(self._useipv6))
1403 1404 # See contrib/catapipe.py for how to use this functionality.
1404 1405 if 'HGTESTCATAPULTSERVERPIPE' not in env:
1405 1406 # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
1406 1407 # non-test one in as a default, otherwise set to devnull
1407 1408 env['HGTESTCATAPULTSERVERPIPE'] = env.get(
1408 1409 'HGCATAPULTSERVERPIPE', os.devnull
1409 1410 )
1410 1411
1411 1412 extraextensions = []
1412 1413 for opt in self._extraconfigopts:
1413 1414 section, key = opt.split('.', 1)
1414 1415 if section != 'extensions':
1415 1416 continue
1416 1417 name = key.split('=', 1)[0]
1417 1418 extraextensions.append(name)
1418 1419
1419 1420 if extraextensions:
1420 1421 env['HGTESTEXTRAEXTENSIONS'] = ' '.join(extraextensions)
1421 1422
1422 1423 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1423 1424 # IP addresses.
1424 1425 env['LOCALIP'] = _bytes2sys(self._localip())
1425 1426
1426 1427 # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
1427 1428 # but this is needed for testing python instances like dummyssh,
1428 1429 # dummysmtpd.py, and dumbhttp.py.
1429 1430 if PYTHON3 and os.name == 'nt':
1430 1431 env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
1431 1432
1432 1433 # Modified HOME in test environment can confuse Rust tools. So set
1433 1434 # CARGO_HOME and RUSTUP_HOME automatically if a Rust toolchain is
1434 1435 # present and these variables aren't already defined.
1435 1436 cargo_home_path = os.path.expanduser('~/.cargo')
1436 1437 rustup_home_path = os.path.expanduser('~/.rustup')
1437 1438
1438 1439 if os.path.exists(cargo_home_path) and b'CARGO_HOME' not in osenvironb:
1439 1440 env['CARGO_HOME'] = cargo_home_path
1440 1441 if (
1441 1442 os.path.exists(rustup_home_path)
1442 1443 and b'RUSTUP_HOME' not in osenvironb
1443 1444 ):
1444 1445 env['RUSTUP_HOME'] = rustup_home_path
1445 1446
1446 1447 # Reset some environment variables to well-known values so that
1447 1448 # the tests produce repeatable output.
1448 1449 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1449 1450 env['TZ'] = 'GMT'
1450 1451 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1451 1452 env['COLUMNS'] = '80'
1452 1453 env['TERM'] = 'xterm'
1453 1454
1454 1455 dropped = [
1455 1456 'CDPATH',
1456 1457 'CHGDEBUG',
1457 1458 'EDITOR',
1458 1459 'GREP_OPTIONS',
1459 1460 'HG',
1460 1461 'HGMERGE',
1461 1462 'HGPLAIN',
1462 1463 'HGPLAINEXCEPT',
1463 1464 'HGPROF',
1464 1465 'http_proxy',
1465 1466 'no_proxy',
1466 1467 'NO_PROXY',
1467 1468 'PAGER',
1468 1469 'VISUAL',
1469 1470 ]
1470 1471
1471 1472 for k in dropped:
1472 1473 if k in env:
1473 1474 del env[k]
1474 1475
1475 1476 # unset env related to hooks
1476 1477 for k in list(env):
1477 1478 if k.startswith('HG_'):
1478 1479 del env[k]
1479 1480
1480 1481 if self._usechg:
1481 1482 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1482 1483 if self._chgdebug:
1483 1484 env['CHGDEBUG'] = 'true'
1484 1485
1485 1486 return env
1486 1487
1487 1488 def _createhgrc(self, path):
1488 1489 """Create an hgrc file for this test."""
1489 1490 with open(path, 'wb') as hgrc:
1490 1491 hgrc.write(b'[ui]\n')
1491 1492 hgrc.write(b'slash = True\n')
1492 1493 hgrc.write(b'interactive = False\n')
1493 1494 hgrc.write(b'detailed-exit-code = True\n')
1494 1495 hgrc.write(b'merge = internal:merge\n')
1495 1496 hgrc.write(b'mergemarkers = detailed\n')
1496 1497 hgrc.write(b'promptecho = True\n')
1497 1498 hgrc.write(b'timeout.warn=15\n')
1498 1499 hgrc.write(b'[defaults]\n')
1499 1500 hgrc.write(b'[devel]\n')
1500 1501 hgrc.write(b'all-warnings = true\n')
1501 1502 hgrc.write(b'default-date = 0 0\n')
1502 1503 hgrc.write(b'[largefiles]\n')
1503 1504 hgrc.write(
1504 1505 b'usercache = %s\n'
1505 1506 % (os.path.join(self._testtmp, b'.cache/largefiles'))
1506 1507 )
1507 1508 hgrc.write(b'[lfs]\n')
1508 1509 hgrc.write(
1509 1510 b'usercache = %s\n'
1510 1511 % (os.path.join(self._testtmp, b'.cache/lfs'))
1511 1512 )
1512 1513 hgrc.write(b'[web]\n')
1513 1514 hgrc.write(b'address = localhost\n')
1514 1515 hgrc.write(b'ipv6 = %r\n' % self._useipv6)
1515 1516 hgrc.write(b'server-header = testing stub value\n')
1516 1517
1517 1518 for opt in self._extraconfigopts:
1518 1519 section, key = _sys2bytes(opt).split(b'.', 1)
1519 1520 assert b'=' in key, (
1520 1521 'extra config opt %s must ' 'have an = for assignment' % opt
1521 1522 )
1522 1523 hgrc.write(b'[%s]\n%s\n' % (section, key))
1523 1524
1524 1525 def fail(self, msg):
1525 1526 # unittest differentiates between errored and failed.
1526 1527 # Failed is denoted by AssertionError (by default at least).
1527 1528 raise AssertionError(msg)
1528 1529
1529 1530 def _runcommand(self, cmd, env, normalizenewlines=False):
1530 1531 """Run command in a sub-process, capturing the output (stdout and
1531 1532 stderr).
1532 1533
1533 1534 Return a tuple (exitcode, output). output is None in debug mode.
1534 1535 """
1535 1536 if self._debug:
1536 1537 proc = subprocess.Popen(
1537 1538 _bytes2sys(cmd),
1538 1539 shell=True,
1539 1540 cwd=_bytes2sys(self._testtmp),
1540 1541 env=env,
1541 1542 )
1542 1543 ret = proc.wait()
1543 1544 return (ret, None)
1544 1545
1545 1546 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1546 1547
1547 1548 def cleanup():
1548 1549 terminate(proc)
1549 1550 ret = proc.wait()
1550 1551 if ret == 0:
1551 1552 ret = signal.SIGTERM << 8
1552 1553 killdaemons(env['DAEMON_PIDS'])
1553 1554 return ret
1554 1555
1555 1556 proc.tochild.close()
1556 1557
1557 1558 try:
1558 1559 output = proc.fromchild.read()
1559 1560 except KeyboardInterrupt:
1560 1561 vlog('# Handling keyboard interrupt')
1561 1562 cleanup()
1562 1563 raise
1563 1564
1564 1565 ret = proc.wait()
1565 1566 if wifexited(ret):
1566 1567 ret = os.WEXITSTATUS(ret)
1567 1568
1568 1569 if proc.timeout:
1569 1570 ret = 'timeout'
1570 1571
1571 1572 if ret:
1572 1573 killdaemons(env['DAEMON_PIDS'])
1573 1574
1574 1575 for s, r in self._getreplacements():
1575 1576 output = re.sub(s, r, output)
1576 1577
1577 1578 if normalizenewlines:
1578 1579 output = output.replace(b'\r\n', b'\n')
1579 1580
1580 1581 return ret, output.splitlines(True)
1581 1582
1582 1583
1583 1584 class PythonTest(Test):
1584 1585 """A Python-based test."""
1585 1586
1586 1587 @property
1587 1588 def refpath(self):
1588 1589 return os.path.join(self._testdir, b'%s.out' % self.bname)
1589 1590
1590 1591 def _run(self, env):
1591 1592 # Quote the python(3) executable for Windows
1592 1593 cmd = b'"%s" "%s"' % (PYTHON, self.path)
1593 1594 vlog("# Running", cmd.decode("utf-8"))
1594 1595 normalizenewlines = os.name == 'nt'
1595 1596 result = self._runcommand(cmd, env, normalizenewlines=normalizenewlines)
1596 1597 if self._aborted:
1597 1598 raise KeyboardInterrupt()
1598 1599
1599 1600 return result
1600 1601
1601 1602
1602 1603 # Some glob patterns apply only in some circumstances, so the script
1603 1604 # might want to remove (glob) annotations that otherwise should be
1604 1605 # retained.
1605 1606 checkcodeglobpats = [
1606 1607 # On Windows it looks like \ doesn't require a (glob), but we know
1607 1608 # better.
1608 1609 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1609 1610 re.compile(br'^moving \S+/.*[^)]$'),
1610 1611 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1611 1612 # Not all platforms have 127.0.0.1 as loopback (though most do),
1612 1613 # so we always glob that too.
1613 1614 re.compile(br'.*\$LOCALIP.*$'),
1614 1615 ]
1615 1616
1616 1617 bchr = chr
1617 1618 if PYTHON3:
1618 1619 bchr = lambda x: bytes([x])
1619 1620
1620 1621 WARN_UNDEFINED = 1
1621 1622 WARN_YES = 2
1622 1623 WARN_NO = 3
1623 1624
1624 1625 MARK_OPTIONAL = b" (?)\n"
1625 1626
1626 1627
1627 1628 def isoptional(line):
1628 1629 return line.endswith(MARK_OPTIONAL)
1629 1630
1630 1631
1631 1632 class TTest(Test):
1632 1633 """A "t test" is a test backed by a .t file."""
1633 1634
1634 1635 SKIPPED_PREFIX = b'skipped: '
1635 1636 FAILED_PREFIX = b'hghave check failed: '
1636 1637 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1637 1638
1638 1639 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1639 1640 ESCAPEMAP = {bchr(i): br'\x%02x' % i for i in range(256)}
1640 1641 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1641 1642
1642 1643 def __init__(self, path, *args, **kwds):
1643 1644 # accept an extra "case" parameter
1644 1645 case = kwds.pop('case', [])
1645 1646 self._case = case
1646 1647 self._allcases = {x for y in parsettestcases(path) for x in y}
1647 1648 super(TTest, self).__init__(path, *args, **kwds)
1648 1649 if case:
1649 1650 casepath = b'#'.join(case)
1650 1651 self.name = '%s#%s' % (self.name, _bytes2sys(casepath))
1651 1652 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1652 1653 self._tmpname += b'-%s' % casepath.replace(b'#', b'-')
1653 1654 self._have = {}
1654 1655
1655 1656 @property
1656 1657 def refpath(self):
1657 1658 return os.path.join(self._testdir, self.bname)
1658 1659
1659 1660 def _run(self, env):
1660 1661 with open(self.path, 'rb') as f:
1661 1662 lines = f.readlines()
1662 1663
1663 1664 # .t file is both reference output and the test input, keep reference
1664 1665 # output updated with the the test input. This avoids some race
1665 1666 # conditions where the reference output does not match the actual test.
1666 1667 if self._refout is not None:
1667 1668 self._refout = lines
1668 1669
1669 1670 salt, script, after, expected = self._parsetest(lines)
1670 1671
1671 1672 # Write out the generated script.
1672 1673 fname = b'%s.sh' % self._testtmp
1673 1674 with open(fname, 'wb') as f:
1674 1675 for l in script:
1675 1676 f.write(l)
1676 1677
1677 1678 cmd = b'%s "%s"' % (self._shell, fname)
1678 1679 vlog("# Running", cmd.decode("utf-8"))
1679 1680
1680 1681 exitcode, output = self._runcommand(cmd, env)
1681 1682
1682 1683 if self._aborted:
1683 1684 raise KeyboardInterrupt()
1684 1685
1685 1686 # Do not merge output if skipped. Return hghave message instead.
1686 1687 # Similarly, with --debug, output is None.
1687 1688 if exitcode == self.SKIPPED_STATUS or output is None:
1688 1689 return exitcode, output
1689 1690
1690 1691 return self._processoutput(exitcode, output, salt, after, expected)
1691 1692
1692 1693 def _hghave(self, reqs):
1693 1694 allreqs = b' '.join(reqs)
1694 1695
1695 1696 self._detectslow(reqs)
1696 1697
1697 1698 if allreqs in self._have:
1698 1699 return self._have.get(allreqs)
1699 1700
1700 1701 # TODO do something smarter when all other uses of hghave are gone.
1701 1702 runtestdir = osenvironb[b'RUNTESTDIR']
1702 1703 tdir = runtestdir.replace(b'\\', b'/')
1703 1704 proc = Popen4(
1704 1705 b'%s -c "%s/hghave %s"' % (self._shell, tdir, allreqs),
1705 1706 self._testtmp,
1706 1707 0,
1707 1708 self._getenv(),
1708 1709 )
1709 1710 stdout, stderr = proc.communicate()
1710 1711 ret = proc.wait()
1711 1712 if wifexited(ret):
1712 1713 ret = os.WEXITSTATUS(ret)
1713 1714 if ret == 2:
1714 1715 print(stdout.decode('utf-8'))
1715 1716 sys.exit(1)
1716 1717
1717 1718 if ret != 0:
1718 1719 self._have[allreqs] = (False, stdout)
1719 1720 return False, stdout
1720 1721
1721 1722 self._have[allreqs] = (True, None)
1722 1723 return True, None
1723 1724
1724 1725 def _detectslow(self, reqs):
1725 1726 """update the timeout of slow test when appropriate"""
1726 1727 if b'slow' in reqs:
1727 1728 self._timeout = self._slowtimeout
1728 1729
1729 1730 def _iftest(self, args):
1730 1731 # implements "#if"
1731 1732 reqs = []
1732 1733 for arg in args:
1733 1734 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1734 1735 if arg[3:] in self._case:
1735 1736 return False
1736 1737 elif arg in self._allcases:
1737 1738 if arg not in self._case:
1738 1739 return False
1739 1740 else:
1740 1741 reqs.append(arg)
1741 1742 self._detectslow(reqs)
1742 1743 return self._hghave(reqs)[0]
1743 1744
1744 1745 def _parsetest(self, lines):
1745 1746 # We generate a shell script which outputs unique markers to line
1746 1747 # up script results with our source. These markers include input
1747 1748 # line number and the last return code.
1748 1749 salt = b"SALT%d" % time.time()
1749 1750
1750 1751 def addsalt(line, inpython):
1751 1752 if inpython:
1752 1753 script.append(b'%s %d 0\n' % (salt, line))
1753 1754 else:
1754 1755 script.append(b'echo %s %d $?\n' % (salt, line))
1755 1756
1756 1757 activetrace = []
1757 1758 session = str(uuid.uuid4())
1758 1759 if PYTHON3:
1759 1760 session = session.encode('ascii')
1760 1761 hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE') or os.getenv(
1761 1762 'HGCATAPULTSERVERPIPE'
1762 1763 )
1763 1764
1764 1765 def toggletrace(cmd=None):
1765 1766 if not hgcatapult or hgcatapult == os.devnull:
1766 1767 return
1767 1768
1768 1769 if activetrace:
1769 1770 script.append(
1770 1771 b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1771 1772 % (session, activetrace[0])
1772 1773 )
1773 1774 if cmd is None:
1774 1775 return
1775 1776
1776 1777 if isinstance(cmd, str):
1777 1778 quoted = shellquote(cmd.strip())
1778 1779 else:
1779 1780 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1780 1781 quoted = quoted.replace(b'\\', b'\\\\')
1781 1782 script.append(
1782 1783 b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1783 1784 % (session, quoted)
1784 1785 )
1785 1786 activetrace[0:] = [quoted]
1786 1787
1787 1788 script = []
1788 1789
1789 1790 # After we run the shell script, we re-unify the script output
1790 1791 # with non-active parts of the source, with synchronization by our
1791 1792 # SALT line number markers. The after table contains the non-active
1792 1793 # components, ordered by line number.
1793 1794 after = {}
1794 1795
1795 1796 # Expected shell script output.
1796 1797 expected = {}
1797 1798
1798 1799 pos = prepos = -1
1799 1800
1800 1801 # True or False when in a true or false conditional section
1801 1802 skipping = None
1802 1803
1803 1804 # We keep track of whether or not we're in a Python block so we
1804 1805 # can generate the surrounding doctest magic.
1805 1806 inpython = False
1806 1807
1807 1808 if self._debug:
1808 1809 script.append(b'set -x\n')
1809 1810 if self._hgcommand != b'hg':
1810 1811 script.append(b'alias hg="%s"\n' % self._hgcommand)
1811 1812 if os.getenv('MSYSTEM'):
1812 1813 script.append(b'alias pwd="pwd -W"\n')
1813 1814
1814 1815 if hgcatapult and hgcatapult != os.devnull:
1815 1816 if PYTHON3:
1816 1817 hgcatapult = hgcatapult.encode('utf8')
1817 1818 cataname = self.name.encode('utf8')
1818 1819 else:
1819 1820 cataname = self.name
1820 1821
1821 1822 # Kludge: use a while loop to keep the pipe from getting
1822 1823 # closed by our echo commands. The still-running file gets
1823 1824 # reaped at the end of the script, which causes the while
1824 1825 # loop to exit and closes the pipe. Sigh.
1825 1826 script.append(
1826 1827 b'rtendtracing() {\n'
1827 1828 b' echo END %(session)s %(name)s >> %(catapult)s\n'
1828 1829 b' rm -f "$TESTTMP/.still-running"\n'
1829 1830 b'}\n'
1830 1831 b'trap "rtendtracing" 0\n'
1831 1832 b'touch "$TESTTMP/.still-running"\n'
1832 1833 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1833 1834 b'> %(catapult)s &\n'
1834 1835 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1835 1836 b'echo START %(session)s %(name)s >> %(catapult)s\n'
1836 1837 % {
1837 1838 b'name': cataname,
1838 1839 b'session': session,
1839 1840 b'catapult': hgcatapult,
1840 1841 }
1841 1842 )
1842 1843
1843 1844 if self._case:
1844 1845 casestr = b'#'.join(self._case)
1845 1846 if isinstance(casestr, str):
1846 1847 quoted = shellquote(casestr)
1847 1848 else:
1848 1849 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1849 1850 script.append(b'TESTCASE=%s\n' % quoted)
1850 1851 script.append(b'export TESTCASE\n')
1851 1852
1852 1853 n = 0
1853 1854 for n, l in enumerate(lines):
1854 1855 if not l.endswith(b'\n'):
1855 1856 l += b'\n'
1856 1857 if l.startswith(b'#require'):
1857 1858 lsplit = l.split()
1858 1859 if len(lsplit) < 2 or lsplit[0] != b'#require':
1859 1860 after.setdefault(pos, []).append(
1860 1861 b' !!! invalid #require\n'
1861 1862 )
1862 1863 if not skipping:
1863 1864 haveresult, message = self._hghave(lsplit[1:])
1864 1865 if not haveresult:
1865 1866 script = [b'echo "%s"\nexit 80\n' % message]
1866 1867 break
1867 1868 after.setdefault(pos, []).append(l)
1868 1869 elif l.startswith(b'#if'):
1869 1870 lsplit = l.split()
1870 1871 if len(lsplit) < 2 or lsplit[0] != b'#if':
1871 1872 after.setdefault(pos, []).append(b' !!! invalid #if\n')
1872 1873 if skipping is not None:
1873 1874 after.setdefault(pos, []).append(b' !!! nested #if\n')
1874 1875 skipping = not self._iftest(lsplit[1:])
1875 1876 after.setdefault(pos, []).append(l)
1876 1877 elif l.startswith(b'#else'):
1877 1878 if skipping is None:
1878 1879 after.setdefault(pos, []).append(b' !!! missing #if\n')
1879 1880 skipping = not skipping
1880 1881 after.setdefault(pos, []).append(l)
1881 1882 elif l.startswith(b'#endif'):
1882 1883 if skipping is None:
1883 1884 after.setdefault(pos, []).append(b' !!! missing #if\n')
1884 1885 skipping = None
1885 1886 after.setdefault(pos, []).append(l)
1886 1887 elif skipping:
1887 1888 after.setdefault(pos, []).append(l)
1888 1889 elif l.startswith(b' >>> '): # python inlines
1889 1890 after.setdefault(pos, []).append(l)
1890 1891 prepos = pos
1891 1892 pos = n
1892 1893 if not inpython:
1893 1894 # We've just entered a Python block. Add the header.
1894 1895 inpython = True
1895 1896 addsalt(prepos, False) # Make sure we report the exit code.
1896 1897 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1897 1898 addsalt(n, True)
1898 1899 script.append(l[2:])
1899 1900 elif l.startswith(b' ... '): # python inlines
1900 1901 after.setdefault(prepos, []).append(l)
1901 1902 script.append(l[2:])
1902 1903 elif l.startswith(b' $ '): # commands
1903 1904 if inpython:
1904 1905 script.append(b'EOF\n')
1905 1906 inpython = False
1906 1907 after.setdefault(pos, []).append(l)
1907 1908 prepos = pos
1908 1909 pos = n
1909 1910 addsalt(n, False)
1910 1911 rawcmd = l[4:]
1911 1912 cmd = rawcmd.split()
1912 1913 toggletrace(rawcmd)
1913 1914 if len(cmd) == 2 and cmd[0] == b'cd':
1914 1915 rawcmd = b'cd %s || exit 1\n' % cmd[1]
1915 1916 script.append(rawcmd)
1916 1917 elif l.startswith(b' > '): # continuations
1917 1918 after.setdefault(prepos, []).append(l)
1918 1919 script.append(l[4:])
1919 1920 elif l.startswith(b' '): # results
1920 1921 # Queue up a list of expected results.
1921 1922 expected.setdefault(pos, []).append(l[2:])
1922 1923 else:
1923 1924 if inpython:
1924 1925 script.append(b'EOF\n')
1925 1926 inpython = False
1926 1927 # Non-command/result. Queue up for merged output.
1927 1928 after.setdefault(pos, []).append(l)
1928 1929
1929 1930 if inpython:
1930 1931 script.append(b'EOF\n')
1931 1932 if skipping is not None:
1932 1933 after.setdefault(pos, []).append(b' !!! missing #endif\n')
1933 1934 addsalt(n + 1, False)
1934 1935 # Need to end any current per-command trace
1935 1936 if activetrace:
1936 1937 toggletrace()
1937 1938 return salt, script, after, expected
1938 1939
1939 1940 def _processoutput(self, exitcode, output, salt, after, expected):
1940 1941 # Merge the script output back into a unified test.
1941 1942 warnonly = WARN_UNDEFINED # 1: not yet; 2: yes; 3: for sure not
1942 1943 if exitcode != 0:
1943 1944 warnonly = WARN_NO
1944 1945
1945 1946 pos = -1
1946 1947 postout = []
1947 1948 for out_rawline in output:
1948 1949 out_line, cmd_line = out_rawline, None
1949 1950 if salt in out_rawline:
1950 1951 out_line, cmd_line = out_rawline.split(salt, 1)
1951 1952
1952 1953 pos, postout, warnonly = self._process_out_line(
1953 1954 out_line, pos, postout, expected, warnonly
1954 1955 )
1955 1956 pos, postout = self._process_cmd_line(cmd_line, pos, postout, after)
1956 1957
1957 1958 if pos in after:
1958 1959 postout += after.pop(pos)
1959 1960
1960 1961 if warnonly == WARN_YES:
1961 1962 exitcode = False # Set exitcode to warned.
1962 1963
1963 1964 return exitcode, postout
1964 1965
1965 1966 def _process_out_line(self, out_line, pos, postout, expected, warnonly):
1966 1967 while out_line:
1967 1968 if not out_line.endswith(b'\n'):
1968 1969 out_line += b' (no-eol)\n'
1969 1970
1970 1971 # Find the expected output at the current position.
1971 1972 els = [None]
1972 1973 if expected.get(pos, None):
1973 1974 els = expected[pos]
1974 1975
1975 1976 optional = []
1976 1977 for i, el in enumerate(els):
1977 1978 r = False
1978 1979 if el:
1979 1980 r, exact = self.linematch(el, out_line)
1980 1981 if isinstance(r, str):
1981 1982 if r == '-glob':
1982 1983 out_line = ''.join(el.rsplit(' (glob)', 1))
1983 1984 r = '' # Warn only this line.
1984 1985 elif r == "retry":
1985 1986 postout.append(b' ' + el)
1986 1987 else:
1987 1988 log('\ninfo, unknown linematch result: %r\n' % r)
1988 1989 r = False
1989 1990 if r:
1990 1991 els.pop(i)
1991 1992 break
1992 1993 if el:
1993 1994 if isoptional(el):
1994 1995 optional.append(i)
1995 1996 else:
1996 1997 m = optline.match(el)
1997 1998 if m:
1998 1999 conditions = [c for c in m.group(2).split(b' ')]
1999 2000
2000 2001 if not self._iftest(conditions):
2001 2002 optional.append(i)
2002 2003 if exact:
2003 2004 # Don't allow line to be matches against a later
2004 2005 # line in the output
2005 2006 els.pop(i)
2006 2007 break
2007 2008
2008 2009 if r:
2009 2010 if r == "retry":
2010 2011 continue
2011 2012 # clean up any optional leftovers
2012 2013 for i in optional:
2013 2014 postout.append(b' ' + els[i])
2014 2015 for i in reversed(optional):
2015 2016 del els[i]
2016 2017 postout.append(b' ' + el)
2017 2018 else:
2018 2019 if self.NEEDESCAPE(out_line):
2019 2020 out_line = TTest._stringescape(
2020 2021 b'%s (esc)\n' % out_line.rstrip(b'\n')
2021 2022 )
2022 2023 postout.append(b' ' + out_line) # Let diff deal with it.
2023 2024 if r != '': # If line failed.
2024 2025 warnonly = WARN_NO
2025 2026 elif warnonly == WARN_UNDEFINED:
2026 2027 warnonly = WARN_YES
2027 2028 break
2028 2029 else:
2029 2030 # clean up any optional leftovers
2030 2031 while expected.get(pos, None):
2031 2032 el = expected[pos].pop(0)
2032 2033 if el:
2033 2034 if not isoptional(el):
2034 2035 m = optline.match(el)
2035 2036 if m:
2036 2037 conditions = [c for c in m.group(2).split(b' ')]
2037 2038
2038 2039 if self._iftest(conditions):
2039 2040 # Don't append as optional line
2040 2041 continue
2041 2042 else:
2042 2043 continue
2043 2044 postout.append(b' ' + el)
2044 2045 return pos, postout, warnonly
2045 2046
2046 2047 def _process_cmd_line(self, cmd_line, pos, postout, after):
2047 2048 """process a "command" part of a line from unified test output"""
2048 2049 if cmd_line:
2049 2050 # Add on last return code.
2050 2051 ret = int(cmd_line.split()[1])
2051 2052 if ret != 0:
2052 2053 postout.append(b' [%d]\n' % ret)
2053 2054 if pos in after:
2054 2055 # Merge in non-active test bits.
2055 2056 postout += after.pop(pos)
2056 2057 pos = int(cmd_line.split()[0])
2057 2058 return pos, postout
2058 2059
2059 2060 @staticmethod
2060 2061 def rematch(el, l):
2061 2062 try:
2062 2063 # parse any flags at the beginning of the regex. Only 'i' is
2063 2064 # supported right now, but this should be easy to extend.
2064 2065 flags, el = re.match(br'^(\(\?i\))?(.*)', el).groups()[0:2]
2065 2066 flags = flags or b''
2066 2067 el = flags + b'(?:' + el + b')'
2067 2068 # use \Z to ensure that the regex matches to the end of the string
2068 2069 if os.name == 'nt':
2069 2070 return re.match(el + br'\r?\n\Z', l)
2070 2071 return re.match(el + br'\n\Z', l)
2071 2072 except re.error:
2072 2073 # el is an invalid regex
2073 2074 return False
2074 2075
2075 2076 @staticmethod
2076 2077 def globmatch(el, l):
2077 2078 # The only supported special characters are * and ? plus / which also
2078 2079 # matches \ on windows. Escaping of these characters is supported.
2079 2080 if el + b'\n' == l:
2080 2081 if os.altsep:
2081 2082 # matching on "/" is not needed for this line
2082 2083 for pat in checkcodeglobpats:
2083 2084 if pat.match(el):
2084 2085 return True
2085 2086 return b'-glob'
2086 2087 return True
2087 2088 el = el.replace(b'$LOCALIP', b'*')
2088 2089 i, n = 0, len(el)
2089 2090 res = b''
2090 2091 while i < n:
2091 2092 c = el[i : i + 1]
2092 2093 i += 1
2093 2094 if c == b'\\' and i < n and el[i : i + 1] in b'*?\\/':
2094 2095 res += el[i - 1 : i + 1]
2095 2096 i += 1
2096 2097 elif c == b'*':
2097 2098 res += b'.*'
2098 2099 elif c == b'?':
2099 2100 res += b'.'
2100 2101 elif c == b'/' and os.altsep:
2101 2102 res += b'[/\\\\]'
2102 2103 else:
2103 2104 res += re.escape(c)
2104 2105 return TTest.rematch(res, l)
2105 2106
2106 2107 def linematch(self, el, l):
2107 2108 if el == l: # perfect match (fast)
2108 2109 return True, True
2109 2110 retry = False
2110 2111 if isoptional(el):
2111 2112 retry = "retry"
2112 2113 el = el[: -len(MARK_OPTIONAL)] + b"\n"
2113 2114 else:
2114 2115 m = optline.match(el)
2115 2116 if m:
2116 2117 conditions = [c for c in m.group(2).split(b' ')]
2117 2118
2118 2119 el = m.group(1) + b"\n"
2119 2120 if not self._iftest(conditions):
2120 2121 # listed feature missing, should not match
2121 2122 return "retry", False
2122 2123
2123 2124 if el.endswith(b" (esc)\n"):
2124 2125 if PYTHON3:
2125 2126 el = el[:-7].decode('unicode_escape') + '\n'
2126 2127 el = el.encode('latin-1')
2127 2128 else:
2128 2129 el = el[:-7].decode('string-escape') + '\n'
2129 2130 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
2130 2131 return True, True
2131 2132 if el.endswith(b" (re)\n"):
2132 2133 return (TTest.rematch(el[:-6], l) or retry), False
2133 2134 if el.endswith(b" (glob)\n"):
2134 2135 # ignore '(glob)' added to l by 'replacements'
2135 2136 if l.endswith(b" (glob)\n"):
2136 2137 l = l[:-8] + b"\n"
2137 2138 return (TTest.globmatch(el[:-8], l) or retry), False
2138 2139 if os.altsep:
2139 2140 _l = l.replace(b'\\', b'/')
2140 2141 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
2141 2142 return True, True
2142 2143 return retry, True
2143 2144
2144 2145 @staticmethod
2145 2146 def parsehghaveoutput(lines):
2146 2147 """Parse hghave log lines.
2147 2148
2148 2149 Return tuple of lists (missing, failed):
2149 2150 * the missing/unknown features
2150 2151 * the features for which existence check failed"""
2151 2152 missing = []
2152 2153 failed = []
2153 2154 for line in lines:
2154 2155 if line.startswith(TTest.SKIPPED_PREFIX):
2155 2156 line = line.splitlines()[0]
2156 2157 missing.append(_bytes2sys(line[len(TTest.SKIPPED_PREFIX) :]))
2157 2158 elif line.startswith(TTest.FAILED_PREFIX):
2158 2159 line = line.splitlines()[0]
2159 2160 failed.append(_bytes2sys(line[len(TTest.FAILED_PREFIX) :]))
2160 2161
2161 2162 return missing, failed
2162 2163
2163 2164 @staticmethod
2164 2165 def _escapef(m):
2165 2166 return TTest.ESCAPEMAP[m.group(0)]
2166 2167
2167 2168 @staticmethod
2168 2169 def _stringescape(s):
2169 2170 return TTest.ESCAPESUB(TTest._escapef, s)
2170 2171
2171 2172
2172 2173 iolock = threading.RLock()
2173 2174 firstlock = threading.RLock()
2174 2175 firsterror = False
2175 2176
2176 2177
2177 2178 class TestResult(unittest._TextTestResult):
2178 2179 """Holds results when executing via unittest."""
2179 2180
2180 2181 # Don't worry too much about accessing the non-public _TextTestResult.
2181 2182 # It is relatively common in Python testing tools.
2182 2183 def __init__(self, options, *args, **kwargs):
2183 2184 super(TestResult, self).__init__(*args, **kwargs)
2184 2185
2185 2186 self._options = options
2186 2187
2187 2188 # unittest.TestResult didn't have skipped until 2.7. We need to
2188 2189 # polyfill it.
2189 2190 self.skipped = []
2190 2191
2191 2192 # We have a custom "ignored" result that isn't present in any Python
2192 2193 # unittest implementation. It is very similar to skipped. It may make
2193 2194 # sense to map it into skip some day.
2194 2195 self.ignored = []
2195 2196
2196 2197 self.times = []
2197 2198 self._firststarttime = None
2198 2199 # Data stored for the benefit of generating xunit reports.
2199 2200 self.successes = []
2200 2201 self.faildata = {}
2201 2202
2202 2203 if options.color == 'auto':
2203 2204 self.color = pygmentspresent and self.stream.isatty()
2204 2205 elif options.color == 'never':
2205 2206 self.color = False
2206 2207 else: # 'always', for testing purposes
2207 2208 self.color = pygmentspresent
2208 2209
2209 2210 def onStart(self, test):
2210 2211 """Can be overriden by custom TestResult"""
2211 2212
2212 2213 def onEnd(self):
2213 2214 """Can be overriden by custom TestResult"""
2214 2215
2215 2216 def addFailure(self, test, reason):
2216 2217 self.failures.append((test, reason))
2217 2218
2218 2219 if self._options.first:
2219 2220 self.stop()
2220 2221 else:
2221 2222 with iolock:
2222 2223 if reason == "timed out":
2223 2224 self.stream.write('t')
2224 2225 else:
2225 2226 if not self._options.nodiff:
2226 2227 self.stream.write('\n')
2227 2228 # Exclude the '\n' from highlighting to lex correctly
2228 2229 formatted = 'ERROR: %s output changed\n' % test
2229 2230 self.stream.write(highlightmsg(formatted, self.color))
2230 2231 self.stream.write('!')
2231 2232
2232 2233 self.stream.flush()
2233 2234
2234 2235 def addSuccess(self, test):
2235 2236 with iolock:
2236 2237 super(TestResult, self).addSuccess(test)
2237 2238 self.successes.append(test)
2238 2239
2239 2240 def addError(self, test, err):
2240 2241 super(TestResult, self).addError(test, err)
2241 2242 if self._options.first:
2242 2243 self.stop()
2243 2244
2244 2245 # Polyfill.
2245 2246 def addSkip(self, test, reason):
2246 2247 self.skipped.append((test, reason))
2247 2248 with iolock:
2248 2249 if self.showAll:
2249 2250 self.stream.writeln('skipped %s' % reason)
2250 2251 else:
2251 2252 self.stream.write('s')
2252 2253 self.stream.flush()
2253 2254
2254 2255 def addIgnore(self, test, reason):
2255 2256 self.ignored.append((test, reason))
2256 2257 with iolock:
2257 2258 if self.showAll:
2258 2259 self.stream.writeln('ignored %s' % reason)
2259 2260 else:
2260 2261 if reason not in ('not retesting', "doesn't match keyword"):
2261 2262 self.stream.write('i')
2262 2263 else:
2263 2264 self.testsRun += 1
2264 2265 self.stream.flush()
2265 2266
2266 2267 def addOutputMismatch(self, test, ret, got, expected):
2267 2268 """Record a mismatch in test output for a particular test."""
2268 2269 if self.shouldStop or firsterror:
2269 2270 # don't print, some other test case already failed and
2270 2271 # printed, we're just stale and probably failed due to our
2271 2272 # temp dir getting cleaned up.
2272 2273 return
2273 2274
2274 2275 accepted = False
2275 2276 lines = []
2276 2277
2277 2278 with iolock:
2278 2279 if self._options.nodiff:
2279 2280 pass
2280 2281 elif self._options.view:
2281 2282 v = self._options.view
2282 2283 subprocess.call(
2283 2284 r'"%s" "%s" "%s"'
2284 2285 % (v, _bytes2sys(test.refpath), _bytes2sys(test.errpath)),
2285 2286 shell=True,
2286 2287 )
2287 2288 else:
2288 2289 servefail, lines = getdiff(
2289 2290 expected, got, test.refpath, test.errpath
2290 2291 )
2291 2292 self.stream.write('\n')
2292 2293 for line in lines:
2293 2294 line = highlightdiff(line, self.color)
2294 2295 if PYTHON3:
2295 2296 self.stream.flush()
2296 2297 self.stream.buffer.write(line)
2297 2298 self.stream.buffer.flush()
2298 2299 else:
2299 2300 self.stream.write(line)
2300 2301 self.stream.flush()
2301 2302
2302 2303 if servefail:
2303 2304 raise test.failureException(
2304 2305 'server failed to start (HGPORT=%s)' % test._startport
2305 2306 )
2306 2307
2307 2308 # handle interactive prompt without releasing iolock
2308 2309 if self._options.interactive:
2309 2310 if test.readrefout() != expected:
2310 2311 self.stream.write(
2311 2312 'Reference output has changed (run again to prompt '
2312 2313 'changes)'
2313 2314 )
2314 2315 else:
2315 2316 self.stream.write('Accept this change? [y/N] ')
2316 2317 self.stream.flush()
2317 2318 answer = sys.stdin.readline().strip()
2318 2319 if answer.lower() in ('y', 'yes'):
2319 2320 if test.path.endswith(b'.t'):
2320 2321 rename(test.errpath, test.path)
2321 2322 else:
2322 2323 rename(test.errpath, b'%s.out' % test.path)
2323 2324 accepted = True
2324 2325 if not accepted:
2325 2326 self.faildata[test.name] = b''.join(lines)
2326 2327
2327 2328 return accepted
2328 2329
2329 2330 def startTest(self, test):
2330 2331 super(TestResult, self).startTest(test)
2331 2332
2332 2333 # os.times module computes the user time and system time spent by
2333 2334 # child's processes along with real elapsed time taken by a process.
2334 2335 # This module has one limitation. It can only work for Linux user
2335 2336 # and not for Windows. Hence why we fall back to another function
2336 2337 # for wall time calculations.
2337 2338 test.started_times = os.times()
2338 2339 # TODO use a monotonic clock once support for Python 2.7 is dropped.
2339 2340 test.started_time = time.time()
2340 2341 if self._firststarttime is None: # thread racy but irrelevant
2341 2342 self._firststarttime = test.started_time
2342 2343
2343 2344 def stopTest(self, test, interrupted=False):
2344 2345 super(TestResult, self).stopTest(test)
2345 2346
2346 2347 test.stopped_times = os.times()
2347 2348 stopped_time = time.time()
2348 2349
2349 2350 starttime = test.started_times
2350 2351 endtime = test.stopped_times
2351 2352 origin = self._firststarttime
2352 2353 self.times.append(
2353 2354 (
2354 2355 test.name,
2355 2356 endtime[2] - starttime[2], # user space CPU time
2356 2357 endtime[3] - starttime[3], # sys space CPU time
2357 2358 stopped_time - test.started_time, # real time
2358 2359 test.started_time - origin, # start date in run context
2359 2360 stopped_time - origin, # end date in run context
2360 2361 )
2361 2362 )
2362 2363
2363 2364 if interrupted:
2364 2365 with iolock:
2365 2366 self.stream.writeln(
2366 2367 'INTERRUPTED: %s (after %d seconds)'
2367 2368 % (test.name, self.times[-1][3])
2368 2369 )
2369 2370
2370 2371
2371 2372 def getTestResult():
2372 2373 """
2373 2374 Returns the relevant test result
2374 2375 """
2375 2376 if "CUSTOM_TEST_RESULT" in os.environ:
2376 2377 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
2377 2378 return testresultmodule.TestResult
2378 2379 else:
2379 2380 return TestResult
2380 2381
2381 2382
2382 2383 class TestSuite(unittest.TestSuite):
2383 2384 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
2384 2385
2385 2386 def __init__(
2386 2387 self,
2387 2388 testdir,
2388 2389 jobs=1,
2389 2390 whitelist=None,
2390 2391 blacklist=None,
2391 2392 keywords=None,
2392 2393 loop=False,
2393 2394 runs_per_test=1,
2394 2395 loadtest=None,
2395 2396 showchannels=False,
2396 2397 *args,
2397 2398 **kwargs
2398 2399 ):
2399 2400 """Create a new instance that can run tests with a configuration.
2400 2401
2401 2402 testdir specifies the directory where tests are executed from. This
2402 2403 is typically the ``tests`` directory from Mercurial's source
2403 2404 repository.
2404 2405
2405 2406 jobs specifies the number of jobs to run concurrently. Each test
2406 2407 executes on its own thread. Tests actually spawn new processes, so
2407 2408 state mutation should not be an issue.
2408 2409
2409 2410 If there is only one job, it will use the main thread.
2410 2411
2411 2412 whitelist and blacklist denote tests that have been whitelisted and
2412 2413 blacklisted, respectively. These arguments don't belong in TestSuite.
2413 2414 Instead, whitelist and blacklist should be handled by the thing that
2414 2415 populates the TestSuite with tests. They are present to preserve
2415 2416 backwards compatible behavior which reports skipped tests as part
2416 2417 of the results.
2417 2418
2418 2419 keywords denotes key words that will be used to filter which tests
2419 2420 to execute. This arguably belongs outside of TestSuite.
2420 2421
2421 2422 loop denotes whether to loop over tests forever.
2422 2423 """
2423 2424 super(TestSuite, self).__init__(*args, **kwargs)
2424 2425
2425 2426 self._jobs = jobs
2426 2427 self._whitelist = whitelist
2427 2428 self._blacklist = blacklist
2428 2429 self._keywords = keywords
2429 2430 self._loop = loop
2430 2431 self._runs_per_test = runs_per_test
2431 2432 self._loadtest = loadtest
2432 2433 self._showchannels = showchannels
2433 2434
2434 2435 def run(self, result):
2435 2436 # We have a number of filters that need to be applied. We do this
2436 2437 # here instead of inside Test because it makes the running logic for
2437 2438 # Test simpler.
2438 2439 tests = []
2439 2440 num_tests = [0]
2440 2441 for test in self._tests:
2441 2442
2442 2443 def get():
2443 2444 num_tests[0] += 1
2444 2445 if getattr(test, 'should_reload', False):
2445 2446 return self._loadtest(test, num_tests[0])
2446 2447 return test
2447 2448
2448 2449 if not os.path.exists(test.path):
2449 2450 result.addSkip(test, "Doesn't exist")
2450 2451 continue
2451 2452
2452 2453 is_whitelisted = self._whitelist and (
2453 2454 test.relpath in self._whitelist or test.bname in self._whitelist
2454 2455 )
2455 2456 if not is_whitelisted:
2456 2457 is_blacklisted = self._blacklist and (
2457 2458 test.relpath in self._blacklist
2458 2459 or test.bname in self._blacklist
2459 2460 )
2460 2461 if is_blacklisted:
2461 2462 result.addSkip(test, 'blacklisted')
2462 2463 continue
2463 2464 if self._keywords:
2464 2465 with open(test.path, 'rb') as f:
2465 2466 t = f.read().lower() + test.bname.lower()
2466 2467 ignored = False
2467 2468 for k in self._keywords.lower().split():
2468 2469 if k not in t:
2469 2470 result.addIgnore(test, "doesn't match keyword")
2470 2471 ignored = True
2471 2472 break
2472 2473
2473 2474 if ignored:
2474 2475 continue
2475 2476 for _ in xrange(self._runs_per_test):
2476 2477 tests.append(get())
2477 2478
2478 2479 runtests = list(tests)
2479 2480 done = queue.Queue()
2480 2481 running = 0
2481 2482
2482 2483 channels = [""] * self._jobs
2483 2484
2484 2485 def job(test, result):
2485 2486 for n, v in enumerate(channels):
2486 2487 if not v:
2487 2488 channel = n
2488 2489 break
2489 2490 else:
2490 2491 raise ValueError('Could not find output channel')
2491 2492 channels[channel] = "=" + test.name[5:].split(".")[0]
2492 2493 try:
2493 2494 test(result)
2494 2495 done.put(None)
2495 2496 except KeyboardInterrupt:
2496 2497 pass
2497 2498 except: # re-raises
2498 2499 done.put(('!', test, 'run-test raised an error, see traceback'))
2499 2500 raise
2500 2501 finally:
2501 2502 try:
2502 2503 channels[channel] = ''
2503 2504 except IndexError:
2504 2505 pass
2505 2506
2506 2507 def stat():
2507 2508 count = 0
2508 2509 while channels:
2509 2510 d = '\n%03s ' % count
2510 2511 for n, v in enumerate(channels):
2511 2512 if v:
2512 2513 d += v[0]
2513 2514 channels[n] = v[1:] or '.'
2514 2515 else:
2515 2516 d += ' '
2516 2517 d += ' '
2517 2518 with iolock:
2518 2519 sys.stdout.write(d + ' ')
2519 2520 sys.stdout.flush()
2520 2521 for x in xrange(10):
2521 2522 if channels:
2522 2523 time.sleep(0.1)
2523 2524 count += 1
2524 2525
2525 2526 stoppedearly = False
2526 2527
2527 2528 if self._showchannels:
2528 2529 statthread = threading.Thread(target=stat, name="stat")
2529 2530 statthread.start()
2530 2531
2531 2532 try:
2532 2533 while tests or running:
2533 2534 if not done.empty() or running == self._jobs or not tests:
2534 2535 try:
2535 2536 done.get(True, 1)
2536 2537 running -= 1
2537 2538 if result and result.shouldStop:
2538 2539 stoppedearly = True
2539 2540 break
2540 2541 except queue.Empty:
2541 2542 continue
2542 2543 if tests and not running == self._jobs:
2543 2544 test = tests.pop(0)
2544 2545 if self._loop:
2545 2546 if getattr(test, 'should_reload', False):
2546 2547 num_tests[0] += 1
2547 2548 tests.append(self._loadtest(test, num_tests[0]))
2548 2549 else:
2549 2550 tests.append(test)
2550 2551 if self._jobs == 1:
2551 2552 job(test, result)
2552 2553 else:
2553 2554 t = threading.Thread(
2554 2555 target=job, name=test.name, args=(test, result)
2555 2556 )
2556 2557 t.start()
2557 2558 running += 1
2558 2559
2559 2560 # If we stop early we still need to wait on started tests to
2560 2561 # finish. Otherwise, there is a race between the test completing
2561 2562 # and the test's cleanup code running. This could result in the
2562 2563 # test reporting incorrect.
2563 2564 if stoppedearly:
2564 2565 while running:
2565 2566 try:
2566 2567 done.get(True, 1)
2567 2568 running -= 1
2568 2569 except queue.Empty:
2569 2570 continue
2570 2571 except KeyboardInterrupt:
2571 2572 for test in runtests:
2572 2573 test.abort()
2573 2574
2574 2575 channels = []
2575 2576
2576 2577 return result
2577 2578
2578 2579
2579 2580 # Save the most recent 5 wall-clock runtimes of each test to a
2580 2581 # human-readable text file named .testtimes. Tests are sorted
2581 2582 # alphabetically, while times for each test are listed from oldest to
2582 2583 # newest.
2583 2584
2584 2585
2585 2586 def loadtimes(outputdir):
2586 2587 times = []
2587 2588 try:
2588 2589 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2589 2590 for line in fp:
2590 2591 m = re.match('(.*?) ([0-9. ]+)', line)
2591 2592 times.append(
2592 2593 (m.group(1), [float(t) for t in m.group(2).split()])
2593 2594 )
2594 2595 except IOError as err:
2595 2596 if err.errno != errno.ENOENT:
2596 2597 raise
2597 2598 return times
2598 2599
2599 2600
2600 2601 def savetimes(outputdir, result):
2601 2602 saved = dict(loadtimes(outputdir))
2602 2603 maxruns = 5
2603 2604 skipped = {str(t[0]) for t in result.skipped}
2604 2605 for tdata in result.times:
2605 2606 test, real = tdata[0], tdata[3]
2606 2607 if test not in skipped:
2607 2608 ts = saved.setdefault(test, [])
2608 2609 ts.append(real)
2609 2610 ts[:] = ts[-maxruns:]
2610 2611
2611 2612 fd, tmpname = tempfile.mkstemp(
2612 2613 prefix=b'.testtimes', dir=outputdir, text=True
2613 2614 )
2614 2615 with os.fdopen(fd, 'w') as fp:
2615 2616 for name, ts in sorted(saved.items()):
2616 2617 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2617 2618 timepath = os.path.join(outputdir, b'.testtimes')
2618 2619 try:
2619 2620 os.unlink(timepath)
2620 2621 except OSError:
2621 2622 pass
2622 2623 try:
2623 2624 os.rename(tmpname, timepath)
2624 2625 except OSError:
2625 2626 pass
2626 2627
2627 2628
2628 2629 class TextTestRunner(unittest.TextTestRunner):
2629 2630 """Custom unittest test runner that uses appropriate settings."""
2630 2631
2631 2632 def __init__(self, runner, *args, **kwargs):
2632 2633 super(TextTestRunner, self).__init__(*args, **kwargs)
2633 2634
2634 2635 self._runner = runner
2635 2636
2636 2637 self._result = getTestResult()(
2637 2638 self._runner.options, self.stream, self.descriptions, self.verbosity
2638 2639 )
2639 2640
2640 2641 def listtests(self, test):
2641 2642 test = sorted(test, key=lambda t: t.name)
2642 2643
2643 2644 self._result.onStart(test)
2644 2645
2645 2646 for t in test:
2646 2647 print(t.name)
2647 2648 self._result.addSuccess(t)
2648 2649
2649 2650 if self._runner.options.xunit:
2650 2651 with open(self._runner.options.xunit, "wb") as xuf:
2651 2652 self._writexunit(self._result, xuf)
2652 2653
2653 2654 if self._runner.options.json:
2654 2655 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2655 2656 with open(jsonpath, 'w') as fp:
2656 2657 self._writejson(self._result, fp)
2657 2658
2658 2659 return self._result
2659 2660
2660 2661 def run(self, test):
2661 2662 self._result.onStart(test)
2662 2663 test(self._result)
2663 2664
2664 2665 failed = len(self._result.failures)
2665 2666 skipped = len(self._result.skipped)
2666 2667 ignored = len(self._result.ignored)
2667 2668
2668 2669 with iolock:
2669 2670 self.stream.writeln('')
2670 2671
2671 2672 if not self._runner.options.noskips:
2672 2673 for test, msg in sorted(
2673 2674 self._result.skipped, key=lambda s: s[0].name
2674 2675 ):
2675 2676 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2676 2677 msg = highlightmsg(formatted, self._result.color)
2677 2678 self.stream.write(msg)
2678 2679 for test, msg in sorted(
2679 2680 self._result.failures, key=lambda f: f[0].name
2680 2681 ):
2681 2682 formatted = 'Failed %s: %s\n' % (test.name, msg)
2682 2683 self.stream.write(highlightmsg(formatted, self._result.color))
2683 2684 for test, msg in sorted(
2684 2685 self._result.errors, key=lambda e: e[0].name
2685 2686 ):
2686 2687 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2687 2688
2688 2689 if self._runner.options.xunit:
2689 2690 with open(self._runner.options.xunit, "wb") as xuf:
2690 2691 self._writexunit(self._result, xuf)
2691 2692
2692 2693 if self._runner.options.json:
2693 2694 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2694 2695 with open(jsonpath, 'w') as fp:
2695 2696 self._writejson(self._result, fp)
2696 2697
2697 2698 self._runner._checkhglib('Tested')
2698 2699
2699 2700 savetimes(self._runner._outputdir, self._result)
2700 2701
2701 2702 if failed and self._runner.options.known_good_rev:
2702 2703 self._bisecttests(t for t, m in self._result.failures)
2703 2704 self.stream.writeln(
2704 2705 '# Ran %d tests, %d skipped, %d failed.'
2705 2706 % (self._result.testsRun, skipped + ignored, failed)
2706 2707 )
2707 2708 if failed:
2708 2709 self.stream.writeln(
2709 2710 'python hash seed: %s' % os.environ['PYTHONHASHSEED']
2710 2711 )
2711 2712 if self._runner.options.time:
2712 2713 self.printtimes(self._result.times)
2713 2714
2714 2715 if self._runner.options.exceptions:
2715 2716 exceptions = aggregateexceptions(
2716 2717 os.path.join(self._runner._outputdir, b'exceptions')
2717 2718 )
2718 2719
2719 2720 self.stream.writeln('Exceptions Report:')
2720 2721 self.stream.writeln(
2721 2722 '%d total from %d frames'
2722 2723 % (exceptions['total'], len(exceptions['exceptioncounts']))
2723 2724 )
2724 2725 combined = exceptions['combined']
2725 2726 for key in sorted(combined, key=combined.get, reverse=True):
2726 2727 frame, line, exc = key
2727 2728 totalcount, testcount, leastcount, leasttest = combined[key]
2728 2729
2729 2730 self.stream.writeln(
2730 2731 '%d (%d tests)\t%s: %s (%s - %d total)'
2731 2732 % (
2732 2733 totalcount,
2733 2734 testcount,
2734 2735 frame,
2735 2736 exc,
2736 2737 leasttest,
2737 2738 leastcount,
2738 2739 )
2739 2740 )
2740 2741
2741 2742 self.stream.flush()
2742 2743
2743 2744 return self._result
2744 2745
2745 2746 def _bisecttests(self, tests):
2746 2747 bisectcmd = ['hg', 'bisect']
2747 2748 bisectrepo = self._runner.options.bisect_repo
2748 2749 if bisectrepo:
2749 2750 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2750 2751
2751 2752 def pread(args):
2752 2753 env = os.environ.copy()
2753 2754 env['HGPLAIN'] = '1'
2754 2755 p = subprocess.Popen(
2755 2756 args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env
2756 2757 )
2757 2758 data = p.stdout.read()
2758 2759 p.wait()
2759 2760 return data
2760 2761
2761 2762 for test in tests:
2762 2763 pread(bisectcmd + ['--reset']),
2763 2764 pread(bisectcmd + ['--bad', '.'])
2764 2765 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2765 2766 # TODO: we probably need to forward more options
2766 2767 # that alter hg's behavior inside the tests.
2767 2768 opts = ''
2768 2769 withhg = self._runner.options.with_hg
2769 2770 if withhg:
2770 2771 opts += ' --with-hg=%s ' % shellquote(_bytes2sys(withhg))
2771 2772 rtc = '%s %s %s %s' % (sysexecutable, sys.argv[0], opts, test)
2772 2773 data = pread(bisectcmd + ['--command', rtc])
2773 2774 m = re.search(
2774 2775 (
2775 2776 br'\nThe first (?P<goodbad>bad|good) revision '
2776 2777 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2777 2778 br'summary: +(?P<summary>[^\n]+)\n'
2778 2779 ),
2779 2780 data,
2780 2781 (re.MULTILINE | re.DOTALL),
2781 2782 )
2782 2783 if m is None:
2783 2784 self.stream.writeln(
2784 2785 'Failed to identify failure point for %s' % test
2785 2786 )
2786 2787 continue
2787 2788 dat = m.groupdict()
2788 2789 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2789 2790 self.stream.writeln(
2790 2791 '%s %s by %s (%s)'
2791 2792 % (
2792 2793 test,
2793 2794 verb,
2794 2795 dat['node'].decode('ascii'),
2795 2796 dat['summary'].decode('utf8', 'ignore'),
2796 2797 )
2797 2798 )
2798 2799
2799 2800 def printtimes(self, times):
2800 2801 # iolock held by run
2801 2802 self.stream.writeln('# Producing time report')
2802 2803 times.sort(key=lambda t: (t[3]))
2803 2804 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2804 2805 self.stream.writeln(
2805 2806 '%-7s %-7s %-7s %-7s %-7s %s'
2806 2807 % ('start', 'end', 'cuser', 'csys', 'real', 'Test')
2807 2808 )
2808 2809 for tdata in times:
2809 2810 test = tdata[0]
2810 2811 cuser, csys, real, start, end = tdata[1:6]
2811 2812 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2812 2813
2813 2814 @staticmethod
2814 2815 def _writexunit(result, outf):
2815 2816 # See http://llg.cubic.org/docs/junit/ for a reference.
2816 2817 timesd = {t[0]: t[3] for t in result.times}
2817 2818 doc = minidom.Document()
2818 2819 s = doc.createElement('testsuite')
2819 2820 s.setAttribute('errors', "0") # TODO
2820 2821 s.setAttribute('failures', str(len(result.failures)))
2821 2822 s.setAttribute('name', 'run-tests')
2822 2823 s.setAttribute(
2823 2824 'skipped', str(len(result.skipped) + len(result.ignored))
2824 2825 )
2825 2826 s.setAttribute('tests', str(result.testsRun))
2826 2827 doc.appendChild(s)
2827 2828 for tc in result.successes:
2828 2829 t = doc.createElement('testcase')
2829 2830 t.setAttribute('name', tc.name)
2830 2831 tctime = timesd.get(tc.name)
2831 2832 if tctime is not None:
2832 2833 t.setAttribute('time', '%.3f' % tctime)
2833 2834 s.appendChild(t)
2834 2835 for tc, err in sorted(result.faildata.items()):
2835 2836 t = doc.createElement('testcase')
2836 2837 t.setAttribute('name', tc)
2837 2838 tctime = timesd.get(tc)
2838 2839 if tctime is not None:
2839 2840 t.setAttribute('time', '%.3f' % tctime)
2840 2841 # createCDATASection expects a unicode or it will
2841 2842 # convert using default conversion rules, which will
2842 2843 # fail if string isn't ASCII.
2843 2844 err = cdatasafe(err).decode('utf-8', 'replace')
2844 2845 cd = doc.createCDATASection(err)
2845 2846 # Use 'failure' here instead of 'error' to match errors = 0,
2846 2847 # failures = len(result.failures) in the testsuite element.
2847 2848 failelem = doc.createElement('failure')
2848 2849 failelem.setAttribute('message', 'output changed')
2849 2850 failelem.setAttribute('type', 'output-mismatch')
2850 2851 failelem.appendChild(cd)
2851 2852 t.appendChild(failelem)
2852 2853 s.appendChild(t)
2853 2854 for tc, message in result.skipped:
2854 2855 # According to the schema, 'skipped' has no attributes. So store
2855 2856 # the skip message as a text node instead.
2856 2857 t = doc.createElement('testcase')
2857 2858 t.setAttribute('name', tc.name)
2858 2859 binmessage = message.encode('utf-8')
2859 2860 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2860 2861 cd = doc.createCDATASection(message)
2861 2862 skipelem = doc.createElement('skipped')
2862 2863 skipelem.appendChild(cd)
2863 2864 t.appendChild(skipelem)
2864 2865 s.appendChild(t)
2865 2866 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2866 2867
2867 2868 @staticmethod
2868 2869 def _writejson(result, outf):
2869 2870 timesd = {}
2870 2871 for tdata in result.times:
2871 2872 test = tdata[0]
2872 2873 timesd[test] = tdata[1:]
2873 2874
2874 2875 outcome = {}
2875 2876 groups = [
2876 2877 ('success', ((tc, None) for tc in result.successes)),
2877 2878 ('failure', result.failures),
2878 2879 ('skip', result.skipped),
2879 2880 ]
2880 2881 for res, testcases in groups:
2881 2882 for tc, __ in testcases:
2882 2883 if tc.name in timesd:
2883 2884 diff = result.faildata.get(tc.name, b'')
2884 2885 try:
2885 2886 diff = diff.decode('unicode_escape')
2886 2887 except UnicodeDecodeError as e:
2887 2888 diff = '%r decoding diff, sorry' % e
2888 2889 tres = {
2889 2890 'result': res,
2890 2891 'time': ('%0.3f' % timesd[tc.name][2]),
2891 2892 'cuser': ('%0.3f' % timesd[tc.name][0]),
2892 2893 'csys': ('%0.3f' % timesd[tc.name][1]),
2893 2894 'start': ('%0.3f' % timesd[tc.name][3]),
2894 2895 'end': ('%0.3f' % timesd[tc.name][4]),
2895 2896 'diff': diff,
2896 2897 }
2897 2898 else:
2898 2899 # blacklisted test
2899 2900 tres = {'result': res}
2900 2901
2901 2902 outcome[tc.name] = tres
2902 2903 jsonout = json.dumps(
2903 2904 outcome, sort_keys=True, indent=4, separators=(',', ': ')
2904 2905 )
2905 2906 outf.writelines(("testreport =", jsonout))
2906 2907
2907 2908
2908 2909 def sorttests(testdescs, previoustimes, shuffle=False):
2909 2910 """Do an in-place sort of tests."""
2910 2911 if shuffle:
2911 2912 random.shuffle(testdescs)
2912 2913 return
2913 2914
2914 2915 if previoustimes:
2915 2916
2916 2917 def sortkey(f):
2917 2918 f = f['path']
2918 2919 if f in previoustimes:
2919 2920 # Use most recent time as estimate
2920 2921 return -(previoustimes[f][-1])
2921 2922 else:
2922 2923 # Default to a rather arbitrary value of 1 second for new tests
2923 2924 return -1.0
2924 2925
2925 2926 else:
2926 2927 # keywords for slow tests
2927 2928 slow = {
2928 2929 b'svn': 10,
2929 2930 b'cvs': 10,
2930 2931 b'hghave': 10,
2931 2932 b'largefiles-update': 10,
2932 2933 b'run-tests': 10,
2933 2934 b'corruption': 10,
2934 2935 b'race': 10,
2935 2936 b'i18n': 10,
2936 2937 b'check': 100,
2937 2938 b'gendoc': 100,
2938 2939 b'contrib-perf': 200,
2939 2940 b'merge-combination': 100,
2940 2941 }
2941 2942 perf = {}
2942 2943
2943 2944 def sortkey(f):
2944 2945 # run largest tests first, as they tend to take the longest
2945 2946 f = f['path']
2946 2947 try:
2947 2948 return perf[f]
2948 2949 except KeyError:
2949 2950 try:
2950 2951 val = -os.stat(f).st_size
2951 2952 except OSError as e:
2952 2953 if e.errno != errno.ENOENT:
2953 2954 raise
2954 2955 perf[f] = -1e9 # file does not exist, tell early
2955 2956 return -1e9
2956 2957 for kw, mul in slow.items():
2957 2958 if kw in f:
2958 2959 val *= mul
2959 2960 if f.endswith(b'.py'):
2960 2961 val /= 10.0
2961 2962 perf[f] = val / 1000.0
2962 2963 return perf[f]
2963 2964
2964 2965 testdescs.sort(key=sortkey)
2965 2966
2966 2967
2967 2968 class TestRunner(object):
2968 2969 """Holds context for executing tests.
2969 2970
2970 2971 Tests rely on a lot of state. This object holds it for them.
2971 2972 """
2972 2973
2973 2974 # Programs required to run tests.
2974 2975 REQUIREDTOOLS = [
2975 2976 b'diff',
2976 2977 b'grep',
2977 2978 b'unzip',
2978 2979 b'gunzip',
2979 2980 b'bunzip2',
2980 2981 b'sed',
2981 2982 ]
2982 2983
2983 2984 # Maps file extensions to test class.
2984 2985 TESTTYPES = [
2985 2986 (b'.py', PythonTest),
2986 2987 (b'.t', TTest),
2987 2988 ]
2988 2989
2989 2990 def __init__(self):
2990 2991 self.options = None
2991 2992 self._hgroot = None
2992 2993 self._testdir = None
2993 2994 self._outputdir = None
2994 2995 self._hgtmp = None
2995 2996 self._installdir = None
2996 2997 self._bindir = None
2997 2998 self._tmpbindir = None
2998 2999 self._pythondir = None
2999 3000 self._coveragefile = None
3000 3001 self._createdfiles = []
3001 3002 self._hgcommand = None
3002 3003 self._hgpath = None
3003 3004 self._portoffset = 0
3004 3005 self._ports = {}
3005 3006
3006 3007 def run(self, args, parser=None):
3007 3008 """Run the test suite."""
3008 3009 oldmask = os.umask(0o22)
3009 3010 try:
3010 3011 parser = parser or getparser()
3011 3012 options = parseargs(args, parser)
3012 3013 tests = [_sys2bytes(a) for a in options.tests]
3013 3014 if options.test_list is not None:
3014 3015 for listfile in options.test_list:
3015 3016 with open(listfile, 'rb') as f:
3016 3017 tests.extend(t for t in f.read().splitlines() if t)
3017 3018 self.options = options
3018 3019
3019 3020 self._checktools()
3020 3021 testdescs = self.findtests(tests)
3021 3022 if options.profile_runner:
3022 3023 import statprof
3023 3024
3024 3025 statprof.start()
3025 3026 result = self._run(testdescs)
3026 3027 if options.profile_runner:
3027 3028 statprof.stop()
3028 3029 statprof.display()
3029 3030 return result
3030 3031
3031 3032 finally:
3032 3033 os.umask(oldmask)
3033 3034
3034 3035 def _run(self, testdescs):
3035 3036 testdir = getcwdb()
3036 3037 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
3037 3038 # assume all tests in same folder for now
3038 3039 if testdescs:
3039 3040 pathname = os.path.dirname(testdescs[0]['path'])
3040 3041 if pathname:
3041 3042 testdir = os.path.join(testdir, pathname)
3042 3043 self._testdir = osenvironb[b'TESTDIR'] = testdir
3043 3044 if self.options.outputdir:
3044 3045 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3045 3046 else:
3046 3047 self._outputdir = getcwdb()
3047 3048 if testdescs and pathname:
3048 3049 self._outputdir = os.path.join(self._outputdir, pathname)
3049 3050 previoustimes = {}
3050 3051 if self.options.order_by_runtime:
3051 3052 previoustimes = dict(loadtimes(self._outputdir))
3052 3053 sorttests(testdescs, previoustimes, shuffle=self.options.random)
3053 3054
3054 3055 if 'PYTHONHASHSEED' not in os.environ:
3055 3056 # use a random python hash seed all the time
3056 3057 # we do the randomness ourself to know what seed is used
3057 3058 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
3058 3059
3059 3060 # Rayon (Rust crate for multi-threading) will use all logical CPU cores
3060 3061 # by default, causing thrashing on high-cpu-count systems.
3061 3062 # Setting its limit to 3 during tests should still let us uncover
3062 3063 # multi-threading bugs while keeping the thrashing reasonable.
3063 3064 os.environ.setdefault("RAYON_NUM_THREADS", "3")
3064 3065
3065 3066 if self.options.tmpdir:
3066 3067 self.options.keep_tmpdir = True
3067 3068 tmpdir = _sys2bytes(self.options.tmpdir)
3068 3069 if os.path.exists(tmpdir):
3069 3070 # Meaning of tmpdir has changed since 1.3: we used to create
3070 3071 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
3071 3072 # tmpdir already exists.
3072 3073 print("error: temp dir %r already exists" % tmpdir)
3073 3074 return 1
3074 3075
3075 3076 os.makedirs(tmpdir)
3076 3077 else:
3077 3078 d = None
3078 3079 if os.name == 'nt':
3079 3080 # without this, we get the default temp dir location, but
3080 3081 # in all lowercase, which causes troubles with paths (issue3490)
3081 3082 d = osenvironb.get(b'TMP', None)
3082 3083 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
3083 3084
3084 3085 self._hgtmp = osenvironb[b'HGTMP'] = os.path.realpath(tmpdir)
3085 3086
3086 3087 if self.options.with_hg:
3087 3088 self._installdir = None
3088 3089 whg = self.options.with_hg
3089 3090 self._bindir = os.path.dirname(os.path.realpath(whg))
3090 3091 assert isinstance(self._bindir, bytes)
3091 3092 self._hgcommand = os.path.basename(whg)
3092 3093 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
3093 3094 os.makedirs(self._tmpbindir)
3094 3095
3095 3096 normbin = os.path.normpath(os.path.abspath(whg))
3096 3097 normbin = normbin.replace(_sys2bytes(os.sep), b'/')
3097 3098
3098 3099 # Other Python scripts in the test harness need to
3099 3100 # `import mercurial`. If `hg` is a Python script, we assume
3100 3101 # the Mercurial modules are relative to its path and tell the tests
3101 3102 # to load Python modules from its directory.
3102 3103 with open(whg, 'rb') as fh:
3103 3104 initial = fh.read(1024)
3104 3105
3105 3106 if re.match(b'#!.*python', initial):
3106 3107 self._pythondir = self._bindir
3107 3108 # If it looks like our in-repo Rust binary, use the source root.
3108 3109 # This is a bit hacky. But rhg is still not supported outside the
3109 3110 # source directory. So until it is, do the simple thing.
3110 3111 elif re.search(b'/rust/target/[^/]+/hg', normbin):
3111 3112 self._pythondir = os.path.dirname(self._testdir)
3112 3113 # Fall back to the legacy behavior.
3113 3114 else:
3114 3115 self._pythondir = self._bindir
3115 3116
3116 3117 else:
3117 3118 self._installdir = os.path.join(self._hgtmp, b"install")
3118 3119 self._bindir = os.path.join(self._installdir, b"bin")
3119 3120 self._hgcommand = b'hg'
3120 3121 self._tmpbindir = self._bindir
3121 3122 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
3122 3123
3123 3124 # Force the use of hg.exe instead of relying on MSYS to recognize hg is
3124 3125 # a python script and feed it to python.exe. Legacy stdio is force
3125 3126 # enabled by hg.exe, and this is a more realistic way to launch hg
3126 3127 # anyway.
3127 3128 if os.name == 'nt' and not self._hgcommand.endswith(b'.exe'):
3128 3129 self._hgcommand += b'.exe'
3129 3130
3130 3131 # set CHGHG, then replace "hg" command by "chg"
3131 3132 chgbindir = self._bindir
3132 3133 if self.options.chg or self.options.with_chg:
3133 3134 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
3134 3135 else:
3135 3136 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
3136 3137 if self.options.chg:
3137 3138 self._hgcommand = b'chg'
3138 3139 elif self.options.with_chg:
3139 3140 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
3140 3141 self._hgcommand = os.path.basename(self.options.with_chg)
3141 3142
3142 3143 # configure fallback and replace "hg" command by "rhg"
3143 3144 rhgbindir = self._bindir
3144 3145 if self.options.rhg or self.options.with_rhg:
3145 3146 # Affects hghave.py
3146 3147 osenvironb[b'RHG_INSTALLED_AS_HG'] = b'1'
3147 3148 # Affects configuration. Alternatives would be setting configuration through
3148 3149 # `$HGRCPATH` but some tests override that, or changing `_hgcommand` to include
3149 3150 # `--config` but that disrupts tests that print command lines and check expected
3150 3151 # output.
3151 3152 osenvironb[b'RHG_ON_UNSUPPORTED'] = b'fallback'
3152 3153 osenvironb[b'RHG_FALLBACK_EXECUTABLE'] = os.path.join(
3153 3154 self._bindir, self._hgcommand
3154 3155 )
3155 3156 if self.options.rhg:
3156 3157 self._hgcommand = b'rhg'
3157 3158 elif self.options.with_rhg:
3158 3159 rhgbindir = os.path.dirname(os.path.realpath(self.options.with_rhg))
3159 3160 self._hgcommand = os.path.basename(self.options.with_rhg)
3160 3161
3161 3162 osenvironb[b"BINDIR"] = self._bindir
3162 3163 osenvironb[b"PYTHON"] = PYTHON
3163 3164
3164 3165 fileb = _sys2bytes(__file__)
3165 3166 runtestdir = os.path.abspath(os.path.dirname(fileb))
3166 3167 osenvironb[b'RUNTESTDIR'] = runtestdir
3167 3168 if PYTHON3:
3168 3169 sepb = _sys2bytes(os.pathsep)
3169 3170 else:
3170 3171 sepb = os.pathsep
3171 3172 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
3172 3173 if os.path.islink(__file__):
3173 3174 # test helper will likely be at the end of the symlink
3174 3175 realfile = os.path.realpath(fileb)
3175 3176 realdir = os.path.abspath(os.path.dirname(realfile))
3176 3177 path.insert(2, realdir)
3177 3178 if chgbindir != self._bindir:
3178 3179 path.insert(1, chgbindir)
3179 3180 if rhgbindir != self._bindir:
3180 3181 path.insert(1, rhgbindir)
3181 3182 if self._testdir != runtestdir:
3182 3183 path = [self._testdir] + path
3183 3184 if self._tmpbindir != self._bindir:
3184 3185 path = [self._tmpbindir] + path
3185 3186 osenvironb[b"PATH"] = sepb.join(path)
3186 3187
3187 3188 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
3188 3189 # can run .../tests/run-tests.py test-foo where test-foo
3189 3190 # adds an extension to HGRC. Also include run-test.py directory to
3190 3191 # import modules like heredoctest.
3191 3192 pypath = [self._pythondir, self._testdir, runtestdir]
3192 3193 # We have to augment PYTHONPATH, rather than simply replacing
3193 3194 # it, in case external libraries are only available via current
3194 3195 # PYTHONPATH. (In particular, the Subversion bindings on OS X
3195 3196 # are in /opt/subversion.)
3196 3197 oldpypath = osenvironb.get(IMPL_PATH)
3197 3198 if oldpypath:
3198 3199 pypath.append(oldpypath)
3199 3200 osenvironb[IMPL_PATH] = sepb.join(pypath)
3200 3201
3201 3202 if self.options.pure:
3202 3203 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
3203 3204 os.environ["HGMODULEPOLICY"] = "py"
3204 3205 if self.options.rust:
3205 3206 os.environ["HGMODULEPOLICY"] = "rust+c"
3206 3207 if self.options.no_rust:
3207 3208 current_policy = os.environ.get("HGMODULEPOLICY", "")
3208 3209 if current_policy.startswith("rust+"):
3209 3210 os.environ["HGMODULEPOLICY"] = current_policy[len("rust+") :]
3210 3211 os.environ.pop("HGWITHRUSTEXT", None)
3211 3212
3212 3213 if self.options.allow_slow_tests:
3213 3214 os.environ["HGTEST_SLOW"] = "slow"
3214 3215 elif 'HGTEST_SLOW' in os.environ:
3215 3216 del os.environ['HGTEST_SLOW']
3216 3217
3217 3218 self._coveragefile = os.path.join(self._testdir, b'.coverage')
3218 3219
3219 3220 if self.options.exceptions:
3220 3221 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
3221 3222 try:
3222 3223 os.makedirs(exceptionsdir)
3223 3224 except OSError as e:
3224 3225 if e.errno != errno.EEXIST:
3225 3226 raise
3226 3227
3227 3228 # Remove all existing exception reports.
3228 3229 for f in os.listdir(exceptionsdir):
3229 3230 os.unlink(os.path.join(exceptionsdir, f))
3230 3231
3231 3232 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
3232 3233 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
3233 3234 self.options.extra_config_opt.append(
3234 3235 'extensions.logexceptions=%s' % logexceptions.decode('utf-8')
3235 3236 )
3236 3237
3237 3238 vlog("# Using TESTDIR", _bytes2sys(self._testdir))
3238 3239 vlog("# Using RUNTESTDIR", _bytes2sys(osenvironb[b'RUNTESTDIR']))
3239 3240 vlog("# Using HGTMP", _bytes2sys(self._hgtmp))
3240 3241 vlog("# Using PATH", os.environ["PATH"])
3241 3242 vlog(
3242 3243 "# Using",
3243 3244 _bytes2sys(IMPL_PATH),
3244 3245 _bytes2sys(osenvironb[IMPL_PATH]),
3245 3246 )
3246 3247 vlog("# Writing to directory", _bytes2sys(self._outputdir))
3247 3248
3248 3249 try:
3249 3250 return self._runtests(testdescs) or 0
3250 3251 finally:
3251 3252 time.sleep(0.1)
3252 3253 self._cleanup()
3253 3254
3254 3255 def findtests(self, args):
3255 3256 """Finds possible test files from arguments.
3256 3257
3257 3258 If you wish to inject custom tests into the test harness, this would
3258 3259 be a good function to monkeypatch or override in a derived class.
3259 3260 """
3260 3261 if not args:
3261 3262 if self.options.changed:
3262 3263 proc = Popen4(
3263 3264 b'hg st --rev "%s" -man0 .'
3264 3265 % _sys2bytes(self.options.changed),
3265 3266 None,
3266 3267 0,
3267 3268 )
3268 3269 stdout, stderr = proc.communicate()
3269 3270 args = stdout.strip(b'\0').split(b'\0')
3270 3271 else:
3271 3272 args = os.listdir(b'.')
3272 3273
3273 3274 expanded_args = []
3274 3275 for arg in args:
3275 3276 if os.path.isdir(arg):
3276 3277 if not arg.endswith(b'/'):
3277 3278 arg += b'/'
3278 3279 expanded_args.extend([arg + a for a in os.listdir(arg)])
3279 3280 else:
3280 3281 expanded_args.append(arg)
3281 3282 args = expanded_args
3282 3283
3283 3284 testcasepattern = re.compile(br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-.#]+))')
3284 3285 tests = []
3285 3286 for t in args:
3286 3287 case = []
3287 3288
3288 3289 if not (
3289 3290 os.path.basename(t).startswith(b'test-')
3290 3291 and (t.endswith(b'.py') or t.endswith(b'.t'))
3291 3292 ):
3292 3293
3293 3294 m = testcasepattern.match(os.path.basename(t))
3294 3295 if m is not None:
3295 3296 t_basename, casestr = m.groups()
3296 3297 t = os.path.join(os.path.dirname(t), t_basename)
3297 3298 if casestr:
3298 3299 case = casestr.split(b'#')
3299 3300 else:
3300 3301 continue
3301 3302
3302 3303 if t.endswith(b'.t'):
3303 3304 # .t file may contain multiple test cases
3304 3305 casedimensions = parsettestcases(t)
3305 3306 if casedimensions:
3306 3307 cases = []
3307 3308
3308 3309 def addcases(case, casedimensions):
3309 3310 if not casedimensions:
3310 3311 cases.append(case)
3311 3312 else:
3312 3313 for c in casedimensions[0]:
3313 3314 addcases(case + [c], casedimensions[1:])
3314 3315
3315 3316 addcases([], casedimensions)
3316 3317 if case and case in cases:
3317 3318 cases = [case]
3318 3319 elif case:
3319 3320 # Ignore invalid cases
3320 3321 cases = []
3321 3322 else:
3322 3323 pass
3323 3324 tests += [{'path': t, 'case': c} for c in sorted(cases)]
3324 3325 else:
3325 3326 tests.append({'path': t})
3326 3327 else:
3327 3328 tests.append({'path': t})
3328 3329
3329 3330 if self.options.retest:
3330 3331 retest_args = []
3331 3332 for test in tests:
3332 3333 errpath = self._geterrpath(test)
3333 3334 if os.path.exists(errpath):
3334 3335 retest_args.append(test)
3335 3336 tests = retest_args
3336 3337 return tests
3337 3338
3338 3339 def _runtests(self, testdescs):
3339 3340 def _reloadtest(test, i):
3340 3341 # convert a test back to its description dict
3341 3342 desc = {'path': test.path}
3342 3343 case = getattr(test, '_case', [])
3343 3344 if case:
3344 3345 desc['case'] = case
3345 3346 return self._gettest(desc, i)
3346 3347
3347 3348 try:
3348 3349 if self.options.restart:
3349 3350 orig = list(testdescs)
3350 3351 while testdescs:
3351 3352 desc = testdescs[0]
3352 3353 errpath = self._geterrpath(desc)
3353 3354 if os.path.exists(errpath):
3354 3355 break
3355 3356 testdescs.pop(0)
3356 3357 if not testdescs:
3357 3358 print("running all tests")
3358 3359 testdescs = orig
3359 3360
3360 3361 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
3361 3362 num_tests = len(tests) * self.options.runs_per_test
3362 3363
3363 3364 jobs = min(num_tests, self.options.jobs)
3364 3365
3365 3366 failed = False
3366 3367 kws = self.options.keywords
3367 3368 if kws is not None and PYTHON3:
3368 3369 kws = kws.encode('utf-8')
3369 3370
3370 3371 suite = TestSuite(
3371 3372 self._testdir,
3372 3373 jobs=jobs,
3373 3374 whitelist=self.options.whitelisted,
3374 3375 blacklist=self.options.blacklist,
3375 3376 keywords=kws,
3376 3377 loop=self.options.loop,
3377 3378 runs_per_test=self.options.runs_per_test,
3378 3379 showchannels=self.options.showchannels,
3379 3380 tests=tests,
3380 3381 loadtest=_reloadtest,
3381 3382 )
3382 3383 verbosity = 1
3383 3384 if self.options.list_tests:
3384 3385 verbosity = 0
3385 3386 elif self.options.verbose:
3386 3387 verbosity = 2
3387 3388 runner = TextTestRunner(self, verbosity=verbosity)
3388 3389
3389 3390 if self.options.list_tests:
3390 3391 result = runner.listtests(suite)
3391 3392 else:
3392 3393 if self._installdir:
3393 3394 self._installhg()
3394 3395 self._checkhglib("Testing")
3395 3396 else:
3396 3397 self._usecorrectpython()
3397 3398 if self.options.chg:
3398 3399 assert self._installdir
3399 3400 self._installchg()
3400 3401 if self.options.rhg:
3401 3402 assert self._installdir
3402 3403 self._installrhg()
3403 3404
3404 3405 log(
3405 3406 'running %d tests using %d parallel processes'
3406 3407 % (num_tests, jobs)
3407 3408 )
3408 3409
3409 3410 result = runner.run(suite)
3410 3411
3411 3412 if result.failures or result.errors:
3412 3413 failed = True
3413 3414
3414 3415 result.onEnd()
3415 3416
3416 3417 if self.options.anycoverage:
3417 3418 self._outputcoverage()
3418 3419 except KeyboardInterrupt:
3419 3420 failed = True
3420 3421 print("\ninterrupted!")
3421 3422
3422 3423 if failed:
3423 3424 return 1
3424 3425
3425 3426 def _geterrpath(self, test):
3426 3427 # test['path'] is a relative path
3427 3428 if 'case' in test:
3428 3429 # for multiple dimensions test cases
3429 3430 casestr = b'#'.join(test['case'])
3430 3431 errpath = b'%s#%s.err' % (test['path'], casestr)
3431 3432 else:
3432 3433 errpath = b'%s.err' % test['path']
3433 3434 if self.options.outputdir:
3434 3435 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3435 3436 errpath = os.path.join(self._outputdir, errpath)
3436 3437 return errpath
3437 3438
3438 3439 def _getport(self, count):
3439 3440 port = self._ports.get(count) # do we have a cached entry?
3440 3441 if port is None:
3441 3442 portneeded = 3
3442 3443 # above 100 tries we just give up and let test reports failure
3443 3444 for tries in xrange(100):
3444 3445 allfree = True
3445 3446 port = self.options.port + self._portoffset
3446 3447 for idx in xrange(portneeded):
3447 3448 if not checkportisavailable(port + idx):
3448 3449 allfree = False
3449 3450 break
3450 3451 self._portoffset += portneeded
3451 3452 if allfree:
3452 3453 break
3453 3454 self._ports[count] = port
3454 3455 return port
3455 3456
3456 3457 def _gettest(self, testdesc, count):
3457 3458 """Obtain a Test by looking at its filename.
3458 3459
3459 3460 Returns a Test instance. The Test may not be runnable if it doesn't
3460 3461 map to a known type.
3461 3462 """
3462 3463 path = testdesc['path']
3463 3464 lctest = path.lower()
3464 3465 testcls = Test
3465 3466
3466 3467 for ext, cls in self.TESTTYPES:
3467 3468 if lctest.endswith(ext):
3468 3469 testcls = cls
3469 3470 break
3470 3471
3471 3472 refpath = os.path.join(getcwdb(), path)
3472 3473 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
3473 3474
3474 3475 # extra keyword parameters. 'case' is used by .t tests
3475 3476 kwds = {k: testdesc[k] for k in ['case'] if k in testdesc}
3476 3477
3477 3478 t = testcls(
3478 3479 refpath,
3479 3480 self._outputdir,
3480 3481 tmpdir,
3481 3482 keeptmpdir=self.options.keep_tmpdir,
3482 3483 debug=self.options.debug,
3483 3484 first=self.options.first,
3484 3485 timeout=self.options.timeout,
3485 3486 startport=self._getport(count),
3486 3487 extraconfigopts=self.options.extra_config_opt,
3487 3488 shell=self.options.shell,
3488 3489 hgcommand=self._hgcommand,
3489 3490 usechg=bool(self.options.with_chg or self.options.chg),
3490 3491 chgdebug=self.options.chg_debug,
3491 3492 useipv6=useipv6,
3492 3493 **kwds
3493 3494 )
3494 3495 t.should_reload = True
3495 3496 return t
3496 3497
3497 3498 def _cleanup(self):
3498 3499 """Clean up state from this test invocation."""
3499 3500 if self.options.keep_tmpdir:
3500 3501 return
3501 3502
3502 3503 vlog("# Cleaning up HGTMP", _bytes2sys(self._hgtmp))
3503 3504 shutil.rmtree(self._hgtmp, True)
3504 3505 for f in self._createdfiles:
3505 3506 try:
3506 3507 os.remove(f)
3507 3508 except OSError:
3508 3509 pass
3509 3510
3510 3511 def _usecorrectpython(self):
3511 3512 """Configure the environment to use the appropriate Python in tests."""
3512 3513 # Tests must use the same interpreter as us or bad things will happen.
3513 3514 pyexename = sys.platform == 'win32' and b'python.exe' or b'python3'
3514 3515
3515 3516 # os.symlink() is a thing with py3 on Windows, but it requires
3516 3517 # Administrator rights.
3517 3518 if getattr(os, 'symlink', None) and os.name != 'nt':
3518 3519 vlog(
3519 3520 "# Making python executable in test path a symlink to '%s'"
3520 3521 % sysexecutable
3521 3522 )
3522 3523 mypython = os.path.join(self._tmpbindir, pyexename)
3523 3524 try:
3524 3525 if os.readlink(mypython) == sysexecutable:
3525 3526 return
3526 3527 os.unlink(mypython)
3527 3528 except OSError as err:
3528 3529 if err.errno != errno.ENOENT:
3529 3530 raise
3530 3531 if self._findprogram(pyexename) != sysexecutable:
3531 3532 try:
3532 3533 os.symlink(sysexecutable, mypython)
3533 3534 self._createdfiles.append(mypython)
3534 3535 except OSError as err:
3535 3536 # child processes may race, which is harmless
3536 3537 if err.errno != errno.EEXIST:
3537 3538 raise
3538 3539 else:
3539 3540 # Windows doesn't have `python3.exe`, and MSYS cannot understand the
3540 3541 # reparse point with that name provided by Microsoft. Create a
3541 3542 # simple script on PATH with that name that delegates to the py3
3542 3543 # launcher so the shebang lines work.
3543 3544 if os.getenv('MSYSTEM'):
3544 3545 with open(osenvironb[b'RUNTESTDIR'] + b'/python3', 'wb') as f:
3545 3546 f.write(b'#!/bin/sh\n')
3546 3547 f.write(b'py -3 "$@"\n')
3547 3548
3548 3549 exedir, exename = os.path.split(sysexecutable)
3549 3550 vlog(
3550 3551 "# Modifying search path to find %s as %s in '%s'"
3551 3552 % (exename, pyexename, exedir)
3552 3553 )
3553 3554 path = os.environ['PATH'].split(os.pathsep)
3554 3555 while exedir in path:
3555 3556 path.remove(exedir)
3556 3557
3557 3558 # Binaries installed by pip into the user area like pylint.exe may
3558 3559 # not be in PATH by default.
3559 3560 extra_paths = [exedir]
3560 3561 vi = sys.version_info
3561 3562 if 'APPDATA' in os.environ:
3562 3563 scripts_dir = os.path.join(
3563 3564 os.environ['APPDATA'],
3564 3565 'Python',
3565 3566 'Python%d%d' % (vi[0], vi[1]),
3566 3567 'Scripts',
3567 3568 )
3568 3569
3569 3570 if vi.major == 2:
3570 3571 scripts_dir = os.path.join(
3571 3572 os.environ['APPDATA'],
3572 3573 'Python',
3573 3574 'Scripts',
3574 3575 )
3575 3576
3576 3577 extra_paths.append(scripts_dir)
3577 3578
3578 3579 os.environ['PATH'] = os.pathsep.join(extra_paths + path)
3579 3580 if not self._findprogram(pyexename):
3580 3581 print("WARNING: Cannot find %s in search path" % pyexename)
3581 3582
3582 3583 def _installhg(self):
3583 3584 """Install hg into the test environment.
3584 3585
3585 3586 This will also configure hg with the appropriate testing settings.
3586 3587 """
3587 3588 vlog("# Performing temporary installation of HG")
3588 3589 installerrs = os.path.join(self._hgtmp, b"install.err")
3589 3590 compiler = ''
3590 3591 if self.options.compiler:
3591 3592 compiler = '--compiler ' + self.options.compiler
3592 3593 setup_opts = b""
3593 3594 if self.options.pure:
3594 3595 setup_opts = b"--pure"
3595 3596 elif self.options.rust:
3596 3597 setup_opts = b"--rust"
3597 3598 elif self.options.no_rust:
3598 3599 setup_opts = b"--no-rust"
3599 3600
3600 3601 # Run installer in hg root
3601 3602 script = os.path.realpath(sys.argv[0])
3602 3603 exe = sysexecutable
3603 3604 if PYTHON3:
3604 3605 compiler = _sys2bytes(compiler)
3605 3606 script = _sys2bytes(script)
3606 3607 exe = _sys2bytes(exe)
3607 3608 hgroot = os.path.dirname(os.path.dirname(script))
3608 3609 self._hgroot = hgroot
3609 3610 os.chdir(hgroot)
3610 3611 nohome = b'--home=""'
3611 3612 if os.name == 'nt':
3612 3613 # The --home="" trick works only on OS where os.sep == '/'
3613 3614 # because of a distutils convert_path() fast-path. Avoid it at
3614 3615 # least on Windows for now, deal with .pydistutils.cfg bugs
3615 3616 # when they happen.
3616 3617 nohome = b''
3617 3618 cmd = (
3618 3619 b'"%(exe)s" setup.py %(setup_opts)s clean --all'
3619 3620 b' build %(compiler)s --build-base="%(base)s"'
3620 3621 b' install --force --prefix="%(prefix)s"'
3621 3622 b' --install-lib="%(libdir)s"'
3622 3623 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
3623 3624 % {
3624 3625 b'exe': exe,
3625 3626 b'setup_opts': setup_opts,
3626 3627 b'compiler': compiler,
3627 3628 b'base': os.path.join(self._hgtmp, b"build"),
3628 3629 b'prefix': self._installdir,
3629 3630 b'libdir': self._pythondir,
3630 3631 b'bindir': self._bindir,
3631 3632 b'nohome': nohome,
3632 3633 b'logfile': installerrs,
3633 3634 }
3634 3635 )
3635 3636
3636 3637 # setuptools requires install directories to exist.
3637 3638 def makedirs(p):
3638 3639 try:
3639 3640 os.makedirs(p)
3640 3641 except OSError as e:
3641 3642 if e.errno != errno.EEXIST:
3642 3643 raise
3643 3644
3644 3645 makedirs(self._pythondir)
3645 3646 makedirs(self._bindir)
3646 3647
3647 3648 vlog("# Running", cmd.decode("utf-8"))
3648 3649 if subprocess.call(_bytes2sys(cmd), shell=True) == 0:
3649 3650 if not self.options.verbose:
3650 3651 try:
3651 3652 os.remove(installerrs)
3652 3653 except OSError as e:
3653 3654 if e.errno != errno.ENOENT:
3654 3655 raise
3655 3656 else:
3656 3657 with open(installerrs, 'rb') as f:
3657 3658 for line in f:
3658 3659 if PYTHON3:
3659 3660 sys.stdout.buffer.write(line)
3660 3661 else:
3661 3662 sys.stdout.write(line)
3662 3663 sys.exit(1)
3663 3664 os.chdir(self._testdir)
3664 3665
3665 3666 self._usecorrectpython()
3666 3667
3667 3668 hgbat = os.path.join(self._bindir, b'hg.bat')
3668 3669 if os.path.isfile(hgbat):
3669 3670 # hg.bat expects to be put in bin/scripts while run-tests.py
3670 3671 # installation layout put it in bin/ directly. Fix it
3671 3672 with open(hgbat, 'rb') as f:
3672 3673 data = f.read()
3673 3674 if br'"%~dp0..\python" "%~dp0hg" %*' in data:
3674 3675 data = data.replace(
3675 3676 br'"%~dp0..\python" "%~dp0hg" %*',
3676 3677 b'"%~dp0python" "%~dp0hg" %*',
3677 3678 )
3678 3679 with open(hgbat, 'wb') as f:
3679 3680 f.write(data)
3680 3681 else:
3681 3682 print('WARNING: cannot fix hg.bat reference to python.exe')
3682 3683
3683 3684 if self.options.anycoverage:
3684 3685 custom = os.path.join(
3685 3686 osenvironb[b'RUNTESTDIR'], b'sitecustomize.py'
3686 3687 )
3687 3688 target = os.path.join(self._pythondir, b'sitecustomize.py')
3688 3689 vlog('# Installing coverage trigger to %s' % target)
3689 3690 shutil.copyfile(custom, target)
3690 3691 rc = os.path.join(self._testdir, b'.coveragerc')
3691 3692 vlog('# Installing coverage rc to %s' % rc)
3692 3693 osenvironb[b'COVERAGE_PROCESS_START'] = rc
3693 3694 covdir = os.path.join(self._installdir, b'..', b'coverage')
3694 3695 try:
3695 3696 os.mkdir(covdir)
3696 3697 except OSError as e:
3697 3698 if e.errno != errno.EEXIST:
3698 3699 raise
3699 3700
3700 3701 osenvironb[b'COVERAGE_DIR'] = covdir
3701 3702
3702 3703 def _checkhglib(self, verb):
3703 3704 """Ensure that the 'mercurial' package imported by python is
3704 3705 the one we expect it to be. If not, print a warning to stderr."""
3705 3706 if (self._bindir == self._pythondir) and (
3706 3707 self._bindir != self._tmpbindir
3707 3708 ):
3708 3709 # The pythondir has been inferred from --with-hg flag.
3709 3710 # We cannot expect anything sensible here.
3710 3711 return
3711 3712 expecthg = os.path.join(self._pythondir, b'mercurial')
3712 3713 actualhg = self._gethgpath()
3713 3714 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3714 3715 sys.stderr.write(
3715 3716 'warning: %s with unexpected mercurial lib: %s\n'
3716 3717 ' (expected %s)\n' % (verb, actualhg, expecthg)
3717 3718 )
3718 3719
3719 3720 def _gethgpath(self):
3720 3721 """Return the path to the mercurial package that is actually found by
3721 3722 the current Python interpreter."""
3722 3723 if self._hgpath is not None:
3723 3724 return self._hgpath
3724 3725
3725 3726 cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
3726 3727 cmd = cmd % PYTHON
3727 3728 if PYTHON3:
3728 3729 cmd = _bytes2sys(cmd)
3729 3730
3730 3731 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
3731 3732 out, err = p.communicate()
3732 3733
3733 3734 self._hgpath = out.strip()
3734 3735
3735 3736 return self._hgpath
3736 3737
3737 3738 def _installchg(self):
3738 3739 """Install chg into the test environment"""
3739 3740 vlog('# Performing temporary installation of CHG')
3740 3741 assert os.path.dirname(self._bindir) == self._installdir
3741 3742 assert self._hgroot, 'must be called after _installhg()'
3742 3743 cmd = b'"%(make)s" clean install PREFIX="%(prefix)s"' % {
3743 3744 b'make': b'make', # TODO: switch by option or environment?
3744 3745 b'prefix': self._installdir,
3745 3746 }
3746 3747 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3747 3748 vlog("# Running", cmd)
3748 3749 proc = subprocess.Popen(
3749 3750 cmd,
3750 3751 shell=True,
3751 3752 cwd=cwd,
3752 3753 stdin=subprocess.PIPE,
3753 3754 stdout=subprocess.PIPE,
3754 3755 stderr=subprocess.STDOUT,
3755 3756 )
3756 3757 out, _err = proc.communicate()
3757 3758 if proc.returncode != 0:
3758 3759 if PYTHON3:
3759 3760 sys.stdout.buffer.write(out)
3760 3761 else:
3761 3762 sys.stdout.write(out)
3762 3763 sys.exit(1)
3763 3764
3764 3765 def _installrhg(self):
3765 3766 """Install rhg into the test environment"""
3766 3767 vlog('# Performing temporary installation of rhg')
3767 3768 assert os.path.dirname(self._bindir) == self._installdir
3768 3769 assert self._hgroot, 'must be called after _installhg()'
3769 3770 cmd = b'"%(make)s" install-rhg PREFIX="%(prefix)s"' % {
3770 3771 b'make': b'make', # TODO: switch by option or environment?
3771 3772 b'prefix': self._installdir,
3772 3773 }
3773 3774 cwd = self._hgroot
3774 3775 vlog("# Running", cmd)
3775 3776 proc = subprocess.Popen(
3776 3777 cmd,
3777 3778 shell=True,
3778 3779 cwd=cwd,
3779 3780 stdin=subprocess.PIPE,
3780 3781 stdout=subprocess.PIPE,
3781 3782 stderr=subprocess.STDOUT,
3782 3783 )
3783 3784 out, _err = proc.communicate()
3784 3785 if proc.returncode != 0:
3785 3786 if PYTHON3:
3786 3787 sys.stdout.buffer.write(out)
3787 3788 else:
3788 3789 sys.stdout.write(out)
3789 3790 sys.exit(1)
3790 3791
3791 3792 def _outputcoverage(self):
3792 3793 """Produce code coverage output."""
3793 3794 import coverage
3794 3795
3795 3796 coverage = coverage.coverage
3796 3797
3797 3798 vlog('# Producing coverage report')
3798 3799 # chdir is the easiest way to get short, relative paths in the
3799 3800 # output.
3800 3801 os.chdir(self._hgroot)
3801 3802 covdir = os.path.join(_bytes2sys(self._installdir), '..', 'coverage')
3802 3803 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3803 3804
3804 3805 # Map install directory paths back to source directory.
3805 3806 cov.config.paths['srcdir'] = ['.', _bytes2sys(self._pythondir)]
3806 3807
3807 3808 cov.combine()
3808 3809
3809 3810 omit = [
3810 3811 _bytes2sys(os.path.join(x, b'*'))
3811 3812 for x in [self._bindir, self._testdir]
3812 3813 ]
3813 3814 cov.report(ignore_errors=True, omit=omit)
3814 3815
3815 3816 if self.options.htmlcov:
3816 3817 htmldir = os.path.join(_bytes2sys(self._outputdir), 'htmlcov')
3817 3818 cov.html_report(directory=htmldir, omit=omit)
3818 3819 if self.options.annotate:
3819 3820 adir = os.path.join(_bytes2sys(self._outputdir), 'annotated')
3820 3821 if not os.path.isdir(adir):
3821 3822 os.mkdir(adir)
3822 3823 cov.annotate(directory=adir, omit=omit)
3823 3824
3824 3825 def _findprogram(self, program):
3825 3826 """Search PATH for a executable program"""
3826 3827 dpb = _sys2bytes(os.defpath)
3827 3828 sepb = _sys2bytes(os.pathsep)
3828 3829 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3829 3830 name = os.path.join(p, program)
3830 3831 if os.name == 'nt' or os.access(name, os.X_OK):
3831 3832 return _bytes2sys(name)
3832 3833 return None
3833 3834
3834 3835 def _checktools(self):
3835 3836 """Ensure tools required to run tests are present."""
3836 3837 for p in self.REQUIREDTOOLS:
3837 3838 if os.name == 'nt' and not p.endswith(b'.exe'):
3838 3839 p += b'.exe'
3839 3840 found = self._findprogram(p)
3840 3841 p = p.decode("utf-8")
3841 3842 if found:
3842 3843 vlog("# Found prerequisite", p, "at", found)
3843 3844 else:
3844 3845 print("WARNING: Did not find prerequisite tool: %s " % p)
3845 3846
3846 3847
3847 3848 def aggregateexceptions(path):
3848 3849 exceptioncounts = collections.Counter()
3849 3850 testsbyfailure = collections.defaultdict(set)
3850 3851 failuresbytest = collections.defaultdict(set)
3851 3852
3852 3853 for f in os.listdir(path):
3853 3854 with open(os.path.join(path, f), 'rb') as fh:
3854 3855 data = fh.read().split(b'\0')
3855 3856 if len(data) != 5:
3856 3857 continue
3857 3858
3858 3859 exc, mainframe, hgframe, hgline, testname = data
3859 3860 exc = exc.decode('utf-8')
3860 3861 mainframe = mainframe.decode('utf-8')
3861 3862 hgframe = hgframe.decode('utf-8')
3862 3863 hgline = hgline.decode('utf-8')
3863 3864 testname = testname.decode('utf-8')
3864 3865
3865 3866 key = (hgframe, hgline, exc)
3866 3867 exceptioncounts[key] += 1
3867 3868 testsbyfailure[key].add(testname)
3868 3869 failuresbytest[testname].add(key)
3869 3870
3870 3871 # Find test having fewest failures for each failure.
3871 3872 leastfailing = {}
3872 3873 for key, tests in testsbyfailure.items():
3873 3874 fewesttest = None
3874 3875 fewestcount = 99999999
3875 3876 for test in sorted(tests):
3876 3877 if len(failuresbytest[test]) < fewestcount:
3877 3878 fewesttest = test
3878 3879 fewestcount = len(failuresbytest[test])
3879 3880
3880 3881 leastfailing[key] = (fewestcount, fewesttest)
3881 3882
3882 3883 # Create a combined counter so we can sort by total occurrences and
3883 3884 # impacted tests.
3884 3885 combined = {}
3885 3886 for key in exceptioncounts:
3886 3887 combined[key] = (
3887 3888 exceptioncounts[key],
3888 3889 len(testsbyfailure[key]),
3889 3890 leastfailing[key][0],
3890 3891 leastfailing[key][1],
3891 3892 )
3892 3893
3893 3894 return {
3894 3895 'exceptioncounts': exceptioncounts,
3895 3896 'total': sum(exceptioncounts.values()),
3896 3897 'combined': combined,
3897 3898 'leastfailing': leastfailing,
3898 3899 'byfailure': testsbyfailure,
3899 3900 'bytest': failuresbytest,
3900 3901 }
3901 3902
3902 3903
3903 3904 if __name__ == '__main__':
3904 3905 runner = TestRunner()
3905 3906
3906 3907 try:
3907 3908 import msvcrt
3908 3909
3909 3910 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3910 3911 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3911 3912 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3912 3913 except ImportError:
3913 3914 pass
3914 3915
3915 3916 sys.exit(runner.run(sys.argv[1:]))
@@ -1,2034 +1,2038 b''
1 1 This file tests the behavior of run-tests.py itself.
2 2
3 3 Avoid interference from actual test env:
4 4
5 5 $ . "$TESTDIR/helper-runtests.sh"
6 6
7 7 Smoke test with install
8 8 ============
9 9 $ "$PYTHON" $TESTDIR/run-tests.py $HGTEST_RUN_TESTS_PURE -l
10 10 running 0 tests using 0 parallel processes
11 11
12 12 # Ran 0 tests, 0 skipped, 0 failed.
13 13
14 14 Define a helper to avoid the install step
15 15 =============
16 16 $ rt()
17 17 > {
18 18 > "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@"
19 19 > }
20 20
21 21 error paths
22 22
23 23 #if symlink
24 24 $ ln -s `which true` hg
25 25 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
26 26 warning: --with-hg should specify an hg script
27 27 running 0 tests using 0 parallel processes
28 28
29 29 # Ran 0 tests, 0 skipped, 0 failed.
30 30 $ rm hg
31 31 #endif
32 32
33 33 #if execbit
34 34 $ touch hg
35 35 $ "$PYTHON" $TESTDIR/run-tests.py --with-hg=./hg
36 36 usage: run-tests.py [options] [tests]
37 37 run-tests.py: error: --with-hg must specify an executable hg script
38 38 [2]
39 39 $ rm hg
40 40 #endif
41 41
42 42 Features for testing optional lines
43 43 ===================================
44 44
45 45 $ cat > hghaveaddon.py <<EOF
46 46 > import hghave
47 47 > @hghave.check("custom", "custom hghave feature")
48 48 > def has_custom():
49 49 > return True
50 50 > @hghave.check("missing", "missing hghave feature")
51 51 > def has_missing():
52 52 > return False
53 53 > EOF
54 54
55 55 an empty test
56 56 =======================
57 57
58 58 $ touch test-empty.t
59 59 $ rt
60 60 running 1 tests using 1 parallel processes
61 61 .
62 62 # Ran 1 tests, 0 skipped, 0 failed.
63 63 $ rm test-empty.t
64 64
65 65 a succesful test
66 66 =======================
67 67
68 68 $ cat > test-success.t << EOF
69 69 > $ echo babar
70 70 > babar
71 71 > $ echo xyzzy
72 72 > dont_print (?)
73 73 > nothing[42]line (re) (?)
74 74 > never*happens (glob) (?)
75 75 > more_nothing (?)
76 76 > xyzzy
77 77 > nor this (?)
78 78 > $ printf 'abc\ndef\nxyz\n'
79 79 > 123 (?)
80 80 > abc
81 81 > def (?)
82 82 > 456 (?)
83 83 > xyz
84 84 > $ printf 'zyx\nwvu\ntsr\n'
85 85 > abc (?)
86 86 > zyx (custom !)
87 87 > wvu
88 88 > no_print (no-custom !)
89 89 > tsr (no-missing !)
90 90 > missing (missing !)
91 91 > EOF
92 92
93 93 $ rt
94 94 running 1 tests using 1 parallel processes
95 95 .
96 96 # Ran 1 tests, 0 skipped, 0 failed.
97 97
98 98 failing test
99 99 ==================
100 100
101 101 test churn with globs
102 102 $ cat > test-failure.t <<EOF
103 103 > $ echo "bar-baz"; echo "bar-bad"; echo foo
104 104 > bar*bad (glob)
105 105 > bar*baz (glob)
106 106 > | fo (re)
107 107 > EOF
108 108 $ rt test-failure.t
109 109 running 1 tests using 1 parallel processes
110 110
111 111 --- $TESTTMP/test-failure.t
112 112 +++ $TESTTMP/test-failure.t.err
113 113 @@ -1,4 +1,4 @@
114 114 $ echo "bar-baz"; echo "bar-bad"; echo foo
115 115 + bar*baz (glob)
116 116 bar*bad (glob)
117 117 - bar*baz (glob)
118 118 - | fo (re)
119 119 + foo
120 120
121 121 ERROR: test-failure.t output changed
122 122 !
123 123 Failed test-failure.t: output changed
124 124 # Ran 1 tests, 0 skipped, 1 failed.
125 125 python hash seed: * (glob)
126 126 [1]
127 127
128 128 test how multiple globs gets matched with lines in output
129 129 $ cat > test-failure-globs.t <<EOF
130 130 > $ echo "context"; echo "context"; \
131 131 > echo "key: 1"; echo "value: not a"; \
132 132 > echo "key: 2"; echo "value: not b"; \
133 133 > echo "key: 3"; echo "value: c"; \
134 134 > echo "key: 4"; echo "value: d"
135 135 > context
136 136 > context
137 137 > key: 1
138 138 > value: a
139 139 > key: 2
140 140 > value: b
141 141 > key: 3
142 142 > value: * (glob)
143 143 > key: 4
144 144 > value: * (glob)
145 145 > EOF
146 146 $ rt test-failure-globs.t
147 147 running 1 tests using 1 parallel processes
148 148
149 149 --- $TESTTMP/test-failure-globs.t
150 150 +++ $TESTTMP/test-failure-globs.t.err
151 151 @@ -2,9 +2,9 @@
152 152 context
153 153 context
154 154 key: 1
155 155 - value: a
156 156 + value: not a
157 157 key: 2
158 158 - value: b
159 159 + value: not b
160 160 key: 3
161 161 value: * (glob)
162 162 key: 4
163 163
164 164 ERROR: test-failure-globs.t output changed
165 165 !
166 166 Failed test-failure-globs.t: output changed
167 167 # Ran 1 tests, 0 skipped, 1 failed.
168 168 python hash seed: * (glob)
169 169 [1]
170 170 $ rm test-failure-globs.t
171 171
172 172 test diff colorisation
173 173
174 174 #if no-windows pygments
175 175 $ rt test-failure.t --color always
176 176 running 1 tests using 1 parallel processes
177 177
178 178 \x1b[38;5;124m--- $TESTTMP/test-failure.t\x1b[39m (esc)
179 179 \x1b[38;5;34m+++ $TESTTMP/test-failure.t.err\x1b[39m (esc)
180 180 \x1b[38;5;90;01m@@ -1,4 +1,4 @@\x1b[39;00m (esc)
181 181 $ echo "bar-baz"; echo "bar-bad"; echo foo
182 182 \x1b[38;5;34m+ bar*baz (glob)\x1b[39m (esc)
183 183 bar*bad (glob)
184 184 \x1b[38;5;124m- bar*baz (glob)\x1b[39m (esc)
185 185 \x1b[38;5;124m- | fo (re)\x1b[39m (esc)
186 186 \x1b[38;5;34m+ foo\x1b[39m (esc)
187 187
188 188 \x1b[38;5;88mERROR: \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m output changed\x1b[39m (esc)
189 189 !
190 190 \x1b[38;5;88mFailed \x1b[39m\x1b[38;5;9mtest-failure.t\x1b[39m\x1b[38;5;88m: output changed\x1b[39m (esc)
191 191 # Ran 1 tests, 0 skipped, 1 failed.
192 192 python hash seed: * (glob)
193 193 [1]
194 194
195 195 $ rt test-failure.t 2> tmp.log
196 196 running 1 tests using 1 parallel processes
197 197 [1]
198 198 $ cat tmp.log
199 199
200 200 --- $TESTTMP/test-failure.t
201 201 +++ $TESTTMP/test-failure.t.err
202 202 @@ -1,4 +1,4 @@
203 203 $ echo "bar-baz"; echo "bar-bad"; echo foo
204 204 + bar*baz (glob)
205 205 bar*bad (glob)
206 206 - bar*baz (glob)
207 207 - | fo (re)
208 208 + foo
209 209
210 210 ERROR: test-failure.t output changed
211 211 !
212 212 Failed test-failure.t: output changed
213 213 # Ran 1 tests, 0 skipped, 1 failed.
214 214 python hash seed: * (glob)
215 215 #endif
216 216
217 217 $ cat > test-failure.t << EOF
218 218 > $ true
219 219 > should go away (true !)
220 220 > $ true
221 221 > should stay (false !)
222 222 >
223 223 > Should remove first line, not second or third
224 224 > $ echo 'testing'
225 225 > baz*foo (glob) (true !)
226 226 > foobar*foo (glob) (false !)
227 227 > te*ting (glob) (true !)
228 228 >
229 229 > Should keep first two lines, remove third and last
230 230 > $ echo 'testing'
231 231 > test.ng (re) (true !)
232 232 > foo.ar (re) (false !)
233 233 > b.r (re) (true !)
234 234 > missing (?)
235 235 > awol (true !)
236 236 >
237 237 > The "missing" line should stay, even though awol is dropped
238 238 > $ echo 'testing'
239 239 > test.ng (re) (true !)
240 240 > foo.ar (?)
241 241 > awol
242 242 > missing (?)
243 243 > EOF
244 244 $ rt test-failure.t
245 245 running 1 tests using 1 parallel processes
246 246
247 247 --- $TESTTMP/test-failure.t
248 248 +++ $TESTTMP/test-failure.t.err
249 249 @@ -1,11 +1,9 @@
250 250 $ true
251 251 - should go away (true !)
252 252 $ true
253 253 should stay (false !)
254 254
255 255 Should remove first line, not second or third
256 256 $ echo 'testing'
257 257 - baz*foo (glob) (true !)
258 258 foobar*foo (glob) (false !)
259 259 te*ting (glob) (true !)
260 260
261 261 foo.ar (re) (false !)
262 262 missing (?)
263 263 @@ -13,13 +11,10 @@
264 264 $ echo 'testing'
265 265 test.ng (re) (true !)
266 266 foo.ar (re) (false !)
267 267 - b.r (re) (true !)
268 268 missing (?)
269 269 - awol (true !)
270 270
271 271 The "missing" line should stay, even though awol is dropped
272 272 $ echo 'testing'
273 273 test.ng (re) (true !)
274 274 foo.ar (?)
275 275 - awol
276 276 missing (?)
277 277
278 278 ERROR: test-failure.t output changed
279 279 !
280 280 Failed test-failure.t: output changed
281 281 # Ran 1 tests, 0 skipped, 1 failed.
282 282 python hash seed: * (glob)
283 283 [1]
284 284
285 285 basic failing test
286 286 $ cat > test-failure.t << EOF
287 287 > $ echo babar
288 288 > rataxes
289 289 > This is a noop statement so that
290 290 > this test is still more bytes than success.
291 291 > pad pad pad pad............................................................
292 292 > pad pad pad pad............................................................
293 293 > pad pad pad pad............................................................
294 294 > pad pad pad pad............................................................
295 295 > pad pad pad pad............................................................
296 296 > pad pad pad pad............................................................
297 297 > EOF
298 298
299 299 >>> fh = open('test-failure-unicode.t', 'wb')
300 300 >>> fh.write(u' $ echo babar\u03b1\n'.encode('utf-8')) and None
301 301 >>> fh.write(u' l\u03b5\u03b5t\n'.encode('utf-8')) and None
302 302
303 303 $ rt
304 304 running 3 tests using 1 parallel processes
305 305
306 306 --- $TESTTMP/test-failure.t
307 307 +++ $TESTTMP/test-failure.t.err
308 308 @@ -1,5 +1,5 @@
309 309 $ echo babar
310 310 - rataxes
311 311 + babar
312 312 This is a noop statement so that
313 313 this test is still more bytes than success.
314 314 pad pad pad pad............................................................
315 315
316 316 ERROR: test-failure.t output changed
317 317 !.
318 318 --- $TESTTMP/test-failure-unicode.t
319 319 +++ $TESTTMP/test-failure-unicode.t.err
320 320 @@ -1,2 +1,2 @@
321 321 $ echo babar\xce\xb1 (esc)
322 322 - l\xce\xb5\xce\xb5t (esc)
323 323 + babar\xce\xb1 (esc)
324 324
325 325 ERROR: test-failure-unicode.t output changed
326 326 !
327 327 Failed test-failure-unicode.t: output changed
328 328 Failed test-failure.t: output changed
329 329 # Ran 3 tests, 0 skipped, 2 failed.
330 330 python hash seed: * (glob)
331 331 [1]
332 332
333 333 test --outputdir
334 334 $ mkdir output
335 335 $ rt --outputdir output
336 336 running 3 tests using 1 parallel processes
337 337
338 338 --- $TESTTMP/test-failure.t
339 339 +++ $TESTTMP/output/test-failure.t.err
340 340 @@ -1,5 +1,5 @@
341 341 $ echo babar
342 342 - rataxes
343 343 + babar
344 344 This is a noop statement so that
345 345 this test is still more bytes than success.
346 346 pad pad pad pad............................................................
347 347
348 348 ERROR: test-failure.t output changed
349 349 !.
350 350 --- $TESTTMP/test-failure-unicode.t
351 351 +++ $TESTTMP/output/test-failure-unicode.t.err
352 352 @@ -1,2 +1,2 @@
353 353 $ echo babar\xce\xb1 (esc)
354 354 - l\xce\xb5\xce\xb5t (esc)
355 355 + babar\xce\xb1 (esc)
356 356
357 357 ERROR: test-failure-unicode.t output changed
358 358 !
359 359 Failed test-failure-unicode.t: output changed
360 360 Failed test-failure.t: output changed
361 361 # Ran 3 tests, 0 skipped, 2 failed.
362 362 python hash seed: * (glob)
363 363 [1]
364 364 $ ls -a output
365 365 .
366 366 ..
367 367 .testtimes
368 368 test-failure-unicode.t.err
369 369 test-failure.t.err
370 370
371 371 test --xunit support
372 372 $ rt --xunit=xunit.xml
373 373 running 3 tests using 1 parallel processes
374 374
375 375 --- $TESTTMP/test-failure.t
376 376 +++ $TESTTMP/test-failure.t.err
377 377 @@ -1,5 +1,5 @@
378 378 $ echo babar
379 379 - rataxes
380 380 + babar
381 381 This is a noop statement so that
382 382 this test is still more bytes than success.
383 383 pad pad pad pad............................................................
384 384
385 385 ERROR: test-failure.t output changed
386 386 !.
387 387 --- $TESTTMP/test-failure-unicode.t
388 388 +++ $TESTTMP/test-failure-unicode.t.err
389 389 @@ -1,2 +1,2 @@
390 390 $ echo babar\xce\xb1 (esc)
391 391 - l\xce\xb5\xce\xb5t (esc)
392 392 + babar\xce\xb1 (esc)
393 393
394 394 ERROR: test-failure-unicode.t output changed
395 395 !
396 396 Failed test-failure-unicode.t: output changed
397 397 Failed test-failure.t: output changed
398 398 # Ran 3 tests, 0 skipped, 2 failed.
399 399 python hash seed: * (glob)
400 400 [1]
401 401 $ cat xunit.xml
402 402 <?xml version="1.0" encoding="utf-8"?>
403 403 <testsuite errors="0" failures="2" name="run-tests" skipped="0" tests="3">
404 404 <testcase name="test-success.t" time="*"/> (glob)
405 405 <testcase name="test-failure-unicode.t" time="*"> (glob)
406 406 <failure message="output changed" type="output-mismatch"><![CDATA[--- $TESTTMP/test-failure-unicode.t (py38 !)
407 407 <failure message="output changed" type="output-mismatch"> (no-py38 !)
408 408 <![CDATA[--- $TESTTMP/test-failure-unicode.t (no-py38 !)
409 409 +++ $TESTTMP/test-failure-unicode.t.err
410 410 @@ -1,2 +1,2 @@
411 411 $ echo babar\xce\xb1 (esc)
412 412 - l\xce\xb5\xce\xb5t (esc)
413 413 + babar\xce\xb1 (esc)
414 414 ]]></failure> (py38 !)
415 415 ]]> </failure> (no-py38 !)
416 416 </testcase>
417 417 <testcase name="test-failure.t" time="*"> (glob)
418 418 <failure message="output changed" type="output-mismatch"><![CDATA[--- $TESTTMP/test-failure.t (py38 !)
419 419 <failure message="output changed" type="output-mismatch"> (no-py38 !)
420 420 <![CDATA[--- $TESTTMP/test-failure.t (no-py38 !)
421 421 +++ $TESTTMP/test-failure.t.err
422 422 @@ -1,5 +1,5 @@
423 423 $ echo babar
424 424 - rataxes
425 425 + babar
426 426 This is a noop statement so that
427 427 this test is still more bytes than success.
428 428 pad pad pad pad............................................................
429 429 ]]></failure> (py38 !)
430 430 ]]> </failure> (no-py38 !)
431 431 </testcase>
432 432 </testsuite>
433 433
434 434 $ cat .testtimes
435 435 test-empty.t * (glob)
436 436 test-failure-globs.t * (glob)
437 437 test-failure-unicode.t * (glob)
438 438 test-failure.t * (glob)
439 439 test-success.t * (glob)
440 440
441 441 $ rt --list-tests
442 442 test-failure-unicode.t
443 443 test-failure.t
444 444 test-success.t
445 445
446 446 $ rt --list-tests --json
447 447 test-failure-unicode.t
448 448 test-failure.t
449 449 test-success.t
450 450 $ cat report.json
451 451 testreport ={
452 452 "test-failure-unicode.t": {
453 453 "result": "success"
454 454 },
455 455 "test-failure.t": {
456 456 "result": "success"
457 457 },
458 458 "test-success.t": {
459 459 "result": "success"
460 460 }
461 461 } (no-eol)
462 462
463 463 $ rt --list-tests --xunit=xunit.xml
464 464 test-failure-unicode.t
465 465 test-failure.t
466 466 test-success.t
467 467 $ cat xunit.xml
468 468 <?xml version="1.0" encoding="utf-8"?>
469 469 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
470 470 <testcase name="test-failure-unicode.t"/>
471 471 <testcase name="test-failure.t"/>
472 472 <testcase name="test-success.t"/>
473 473 </testsuite>
474 474
475 475 $ rt --list-tests test-failure* --json --xunit=xunit.xml --outputdir output
476 476 test-failure-unicode.t
477 477 test-failure.t
478 478 $ cat output/report.json
479 479 testreport ={
480 480 "test-failure-unicode.t": {
481 481 "result": "success"
482 482 },
483 483 "test-failure.t": {
484 484 "result": "success"
485 485 }
486 486 } (no-eol)
487 487 $ cat xunit.xml
488 488 <?xml version="1.0" encoding="utf-8"?>
489 489 <testsuite errors="0" failures="0" name="run-tests" skipped="0" tests="0">
490 490 <testcase name="test-failure-unicode.t"/>
491 491 <testcase name="test-failure.t"/>
492 492 </testsuite>
493 493
494 494 $ rm test-failure-unicode.t
495 495
496 496 test for --retest
497 497 ====================
498 498
499 499 $ rt --retest
500 500 running 1 tests using 1 parallel processes
501 501
502 502 --- $TESTTMP/test-failure.t
503 503 +++ $TESTTMP/test-failure.t.err
504 504 @@ -1,5 +1,5 @@
505 505 $ echo babar
506 506 - rataxes
507 507 + babar
508 508 This is a noop statement so that
509 509 this test is still more bytes than success.
510 510 pad pad pad pad............................................................
511 511
512 512 ERROR: test-failure.t output changed
513 513 !
514 514 Failed test-failure.t: output changed
515 515 # Ran 1 tests, 0 skipped, 1 failed.
516 516 python hash seed: * (glob)
517 517 [1]
518 518
519 519 --retest works with --outputdir
520 520 $ rm -r output
521 521 $ mkdir output
522 522 $ mv test-failure.t.err output
523 523 $ rt --retest --outputdir output
524 524 running 1 tests using 1 parallel processes
525 525
526 526 --- $TESTTMP/test-failure.t
527 527 +++ $TESTTMP/output/test-failure.t.err
528 528 @@ -1,5 +1,5 @@
529 529 $ echo babar
530 530 - rataxes
531 531 + babar
532 532 This is a noop statement so that
533 533 this test is still more bytes than success.
534 534 pad pad pad pad............................................................
535 535
536 536 ERROR: test-failure.t output changed
537 537 !
538 538 Failed test-failure.t: output changed
539 539 # Ran 1 tests, 0 skipped, 1 failed.
540 540 python hash seed: * (glob)
541 541 [1]
542 542
543 543 Selecting Tests To Run
544 544 ======================
545 545
546 546 successful
547 547
548 548 $ rt test-success.t
549 549 running 1 tests using 1 parallel processes
550 550 .
551 551 # Ran 1 tests, 0 skipped, 0 failed.
552 552
553 553 success w/ keyword
554 554 $ rt -k xyzzy
555 555 running 2 tests using 1 parallel processes
556 556 .
557 557 # Ran 2 tests, 1 skipped, 0 failed.
558 558
559 559 failed
560 560
561 561 $ rt test-failure.t
562 562 running 1 tests using 1 parallel processes
563 563
564 564 --- $TESTTMP/test-failure.t
565 565 +++ $TESTTMP/test-failure.t.err
566 566 @@ -1,5 +1,5 @@
567 567 $ echo babar
568 568 - rataxes
569 569 + babar
570 570 This is a noop statement so that
571 571 this test is still more bytes than success.
572 572 pad pad pad pad............................................................
573 573
574 574 ERROR: test-failure.t output changed
575 575 !
576 576 Failed test-failure.t: output changed
577 577 # Ran 1 tests, 0 skipped, 1 failed.
578 578 python hash seed: * (glob)
579 579 [1]
580 580
581 581 failure w/ keyword
582 582 $ rt -k rataxes
583 583 running 2 tests using 1 parallel processes
584 584
585 585 --- $TESTTMP/test-failure.t
586 586 +++ $TESTTMP/test-failure.t.err
587 587 @@ -1,5 +1,5 @@
588 588 $ echo babar
589 589 - rataxes
590 590 + babar
591 591 This is a noop statement so that
592 592 this test is still more bytes than success.
593 593 pad pad pad pad............................................................
594 594
595 595 ERROR: test-failure.t output changed
596 596 !
597 597 Failed test-failure.t: output changed
598 598 # Ran 2 tests, 1 skipped, 1 failed.
599 599 python hash seed: * (glob)
600 600 [1]
601 601
602 602 Verify that when a process fails to start we show a useful message
603 603 ==================================================================
604 604
605 605 $ cat > test-serve-fail.t <<EOF
606 606 > $ echo 'abort: child process failed to start blah'
607 607 > EOF
608 608 $ rt test-serve-fail.t
609 609 running 1 tests using 1 parallel processes
610 610
611 611 --- $TESTTMP/test-serve-fail.t
612 612 +++ $TESTTMP/test-serve-fail.t.err
613 613 @@ -1* +1,2 @@ (glob)
614 614 $ echo 'abort: child process failed to start blah'
615 615 + abort: child process failed to start blah
616 616
617 617 ERROR: test-serve-fail.t output changed
618 618 !
619 619 Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob)
620 620 # Ran 1 tests, 0 skipped, 1 failed.
621 621 python hash seed: * (glob)
622 622 [1]
623 623 $ rm test-serve-fail.t
624 624
625 625 Verify that we can try other ports
626 626 ===================================
627 627
628 628 Extensions aren't inherited by the invoked run-tests.py. An extension
629 629 introducing a repository requirement could cause this to fail. So we force
630 630 HGRCPATH to get a clean environment.
631 631
632 632 $ HGRCPATH= hg init inuse
633 633 $ hg serve -R inuse -p $HGPORT -d --pid-file=blocks.pid
634 634 $ cat blocks.pid >> $DAEMON_PIDS
635 635 $ cat > test-serve-inuse.t <<EOF
636 636 > $ hg serve -R `pwd`/inuse -p \$HGPORT -d --pid-file=hg.pid
637 637 > $ cat hg.pid >> \$DAEMON_PIDS
638 638 > EOF
639 639 $ rt test-serve-inuse.t
640 640 running 1 tests using 1 parallel processes
641 641 .
642 642 # Ran 1 tests, 0 skipped, 0 failed.
643 643 $ rm test-serve-inuse.t
644 644 $ killdaemons.py $DAEMON_PIDS
645 645
646 646 Running In Debug Mode
647 647 ======================
648 648
649 649 $ rt --debug 2>&1 | grep -v pwd
650 650 running 2 tests using 1 parallel processes
651 651 + alias hg=hg.exe (windows !)
652 652 + echo *SALT* 0 0 (glob)
653 653 *SALT* 0 0 (glob)
654 654 + echo babar
655 655 babar
656 656 + echo *SALT* 10 0 (glob)
657 657 *SALT* 10 0 (glob)
658 658 .+ alias hg=hg.exe (windows !)
659 659 *+ echo *SALT* 0 0 (glob)
660 660 *SALT* 0 0 (glob)
661 661 + echo babar
662 662 babar
663 663 + echo *SALT* 2 0 (glob)
664 664 *SALT* 2 0 (glob)
665 665 + echo xyzzy
666 666 xyzzy
667 667 + echo *SALT* 9 0 (glob)
668 668 *SALT* 9 0 (glob)
669 669 + printf *abc\ndef\nxyz\n* (glob)
670 670 abc
671 671 def
672 672 xyz
673 673 + echo *SALT* 15 0 (glob)
674 674 *SALT* 15 0 (glob)
675 675 + printf *zyx\nwvu\ntsr\n* (glob)
676 676 zyx
677 677 wvu
678 678 tsr
679 679 + echo *SALT* 22 0 (glob)
680 680 *SALT* 22 0 (glob)
681 681 .
682 682 # Ran 2 tests, 0 skipped, 0 failed.
683 683
684 684 Parallel runs
685 685 ==============
686 686
687 687 (duplicate the failing test to get predictable output)
688 688 $ cp test-failure.t test-failure-copy.t
689 689
690 690 $ rt --jobs 2 test-failure*.t -n
691 691 running 2 tests using 2 parallel processes
692 692 !!
693 693 Failed test-failure*.t: output changed (glob)
694 694 Failed test-failure*.t: output changed (glob)
695 695 # Ran 2 tests, 0 skipped, 2 failed.
696 696 python hash seed: * (glob)
697 697 [1]
698 698
699 699 failures in parallel with --first should only print one failure
700 700 $ rt --jobs 2 --first test-failure*.t
701 701 running 2 tests using 2 parallel processes
702 702
703 703 --- $TESTTMP/test-failure*.t (glob)
704 704 +++ $TESTTMP/test-failure*.t.err (glob)
705 705 @@ -1,5 +1,5 @@
706 706 $ echo babar
707 707 - rataxes
708 708 + babar
709 709 This is a noop statement so that
710 710 this test is still more bytes than success.
711 711 pad pad pad pad............................................................
712 712
713 713 Failed test-failure*.t: output changed (glob)
714 714 Failed test-failure*.t: output changed (glob)
715 715 # Ran 2 tests, 0 skipped, 2 failed.
716 716 python hash seed: * (glob)
717 717 [1]
718 718
719 719
720 720 (delete the duplicated test file)
721 721 $ rm test-failure-copy.t
722 722
723 723 multiple runs per test should be parallelized
724 724
725 725 $ rt --jobs 2 --runs-per-test 2 test-success.t
726 726 running 2 tests using 2 parallel processes
727 727 ..
728 728 # Ran 2 tests, 0 skipped, 0 failed.
729 729
730 730 Interactive run
731 731 ===============
732 732
733 733 (backup the failing test)
734 734 $ cp test-failure.t backup
735 735
736 736 Refuse the fix
737 737
738 738 $ echo 'n' | rt -i
739 739 running 2 tests using 1 parallel processes
740 740
741 741 --- $TESTTMP/test-failure.t
742 742 +++ $TESTTMP/test-failure.t.err
743 743 @@ -1,5 +1,5 @@
744 744 $ echo babar
745 745 - rataxes
746 746 + babar
747 747 This is a noop statement so that
748 748 this test is still more bytes than success.
749 749 pad pad pad pad............................................................
750 750 Accept this change? [y/N]
751 751 ERROR: test-failure.t output changed
752 752 !.
753 753 Failed test-failure.t: output changed
754 754 # Ran 2 tests, 0 skipped, 1 failed.
755 755 python hash seed: * (glob)
756 756 [1]
757 757
758 758 $ cat test-failure.t
759 759 $ echo babar
760 760 rataxes
761 761 This is a noop statement so that
762 762 this test is still more bytes than success.
763 763 pad pad pad pad............................................................
764 764 pad pad pad pad............................................................
765 765 pad pad pad pad............................................................
766 766 pad pad pad pad............................................................
767 767 pad pad pad pad............................................................
768 768 pad pad pad pad............................................................
769 769
770 770 Interactive with custom view
771 771
772 772 $ echo 'n' | rt -i --view echo
773 773 running 2 tests using 1 parallel processes
774 774 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
775 775 Accept this change? [y/N]* (glob)
776 776 ERROR: test-failure.t output changed
777 777 !.
778 778 Failed test-failure.t: output changed
779 779 # Ran 2 tests, 0 skipped, 1 failed.
780 780 python hash seed: * (glob)
781 781 [1]
782 782
783 783 View the fix
784 784
785 785 $ echo 'y' | rt --view echo
786 786 running 2 tests using 1 parallel processes
787 787 $TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
788 788
789 789 ERROR: test-failure.t output changed
790 790 !.
791 791 Failed test-failure.t: output changed
792 792 # Ran 2 tests, 0 skipped, 1 failed.
793 793 python hash seed: * (glob)
794 794 [1]
795 795
796 796 Accept the fix
797 797
798 798 $ cat >> test-failure.t <<EOF
799 799 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
800 800 > saved backup bundle to \$TESTTMP/foo.hg
801 801 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
802 802 > saved backup bundle to $TESTTMP\\foo.hg
803 803 > $ echo 'saved backup bundle to \$TESTTMP/foo.hg'
804 804 > saved backup bundle to \$TESTTMP/*.hg (glob)
805 805 > EOF
806 806 $ echo 'y' | rt -i 2>&1
807 807 running 2 tests using 1 parallel processes
808 808
809 809 --- $TESTTMP/test-failure.t
810 810 +++ $TESTTMP/test-failure.t.err
811 811 @@ -1,5 +1,5 @@
812 812 $ echo babar
813 813 - rataxes
814 814 + babar
815 815 This is a noop statement so that
816 816 this test is still more bytes than success.
817 817 pad pad pad pad............................................................
818 818 @@ -11,6 +11,6 @@
819 819 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
820 820 saved backup bundle to $TESTTMP/foo.hg
821 821 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
822 822 - saved backup bundle to $TESTTMP\foo.hg
823 823 + saved backup bundle to $TESTTMP/foo.hg
824 824 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
825 825 saved backup bundle to $TESTTMP/*.hg (glob)
826 826 Accept this change? [y/N] ..
827 827 # Ran 2 tests, 0 skipped, 0 failed.
828 828
829 829 $ sed -e 's,(glob)$,&<,g' test-failure.t
830 830 $ echo babar
831 831 babar
832 832 This is a noop statement so that
833 833 this test is still more bytes than success.
834 834 pad pad pad pad............................................................
835 835 pad pad pad pad............................................................
836 836 pad pad pad pad............................................................
837 837 pad pad pad pad............................................................
838 838 pad pad pad pad............................................................
839 839 pad pad pad pad............................................................
840 840 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
841 841 saved backup bundle to $TESTTMP/foo.hg
842 842 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
843 843 saved backup bundle to $TESTTMP/foo.hg
844 844 $ echo 'saved backup bundle to $TESTTMP/foo.hg'
845 845 saved backup bundle to $TESTTMP/*.hg (glob)<
846 846
847 847 $ rm test-failure.t
848 848
849 849 Race condition - test file was modified when test is running
850 850
851 851 $ TESTRACEDIR=`pwd`
852 852 $ export TESTRACEDIR
853 853 $ cat > test-race.t <<EOF
854 854 > $ echo 1
855 855 > $ echo "# a new line" >> $TESTRACEDIR/test-race.t
856 856 > EOF
857 857
858 858 $ rt -i test-race.t
859 859 running 1 tests using 1 parallel processes
860 860
861 861 --- $TESTTMP/test-race.t
862 862 +++ $TESTTMP/test-race.t.err
863 863 @@ -1,2 +1,3 @@
864 864 $ echo 1
865 865 + 1
866 866 $ echo "# a new line" >> $TESTTMP/test-race.t
867 867 Reference output has changed (run again to prompt changes)
868 868 ERROR: test-race.t output changed
869 869 !
870 870 Failed test-race.t: output changed
871 871 # Ran 1 tests, 0 skipped, 1 failed.
872 872 python hash seed: * (glob)
873 873 [1]
874 874
875 875 $ rm test-race.t
876 876
877 877 When "#testcases" is used in .t files
878 878
879 879 $ cat >> test-cases.t <<EOF
880 880 > #testcases a b
881 881 > #if a
882 882 > $ echo 1
883 883 > #endif
884 884 > #if b
885 885 > $ echo 2
886 886 > #endif
887 887 > EOF
888 888
889 889 $ cat <<EOF | rt -i test-cases.t 2>&1
890 890 > y
891 891 > y
892 892 > EOF
893 893 running 2 tests using 1 parallel processes
894 894
895 895 --- $TESTTMP/test-cases.t
896 896 +++ $TESTTMP/test-cases.t#a.err
897 897 @@ -1,6 +1,7 @@
898 898 #testcases a b
899 899 #if a
900 900 $ echo 1
901 901 + 1
902 902 #endif
903 903 #if b
904 904 $ echo 2
905 905 Accept this change? [y/N] .
906 906 --- $TESTTMP/test-cases.t
907 907 +++ $TESTTMP/test-cases.t#b.err
908 908 @@ -5,4 +5,5 @@
909 909 #endif
910 910 #if b
911 911 $ echo 2
912 912 + 2
913 913 #endif
914 914 Accept this change? [y/N] .
915 915 # Ran 2 tests, 0 skipped, 0 failed.
916 916
917 917 $ cat test-cases.t
918 918 #testcases a b
919 919 #if a
920 920 $ echo 1
921 921 1
922 922 #endif
923 923 #if b
924 924 $ echo 2
925 925 2
926 926 #endif
927 927
928 928 $ cat >> test-cases.t <<'EOF'
929 929 > #if a
930 930 > $ NAME=A
931 931 > #else
932 932 > $ NAME=B
933 933 > #endif
934 934 > $ echo $NAME
935 935 > A (a !)
936 936 > B (b !)
937 937 > EOF
938 938 $ rt test-cases.t
939 939 running 2 tests using 1 parallel processes
940 940 ..
941 941 # Ran 2 tests, 0 skipped, 0 failed.
942 942
943 943 When using multiple dimensions of "#testcases" in .t files
944 944
945 945 $ cat > test-cases.t <<'EOF'
946 946 > #testcases a b
947 947 > #testcases c d
948 948 > #if a d
949 949 > $ echo $TESTCASE
950 950 > a#d
951 951 > #endif
952 952 > #if b c
953 953 > $ echo yes
954 954 > no
955 955 > #endif
956 956 > EOF
957 957 $ rt test-cases.t
958 958 running 4 tests using 1 parallel processes
959 959 ..
960 960 --- $TESTTMP/test-cases.t
961 961 +++ $TESTTMP/test-cases.t#b#c.err
962 962 @@ -6,5 +6,5 @@
963 963 #endif
964 964 #if b c
965 965 $ echo yes
966 966 - no
967 967 + yes
968 968 #endif
969 969
970 970 ERROR: test-cases.t#b#c output changed
971 971 !.
972 972 Failed test-cases.t#b#c: output changed
973 973 # Ran 4 tests, 0 skipped, 1 failed.
974 974 python hash seed: * (glob)
975 975 [1]
976 976
977 977 $ rt --retest
978 978 running 1 tests using 1 parallel processes
979 979
980 980 --- $TESTTMP/test-cases.t
981 981 +++ $TESTTMP/test-cases.t#b#c.err
982 982 @@ -6,5 +6,5 @@
983 983 #endif
984 984 #if b c
985 985 $ echo yes
986 986 - no
987 987 + yes
988 988 #endif
989 989
990 990 ERROR: test-cases.t#b#c output changed
991 991 !
992 992 Failed test-cases.t#b#c: output changed
993 993 # Ran 1 tests, 0 skipped, 1 failed.
994 994 python hash seed: * (glob)
995 995 [1]
996 996 $ rm test-cases.t#b#c.err
997 997 $ rm test-cases.t
998 998
999 999 (reinstall)
1000 1000 $ mv backup test-failure.t
1001 1001
1002 1002 No Diff
1003 1003 ===============
1004 1004
1005 1005 $ rt --nodiff
1006 1006 running 2 tests using 1 parallel processes
1007 1007 !.
1008 1008 Failed test-failure.t: output changed
1009 1009 # Ran 2 tests, 0 skipped, 1 failed.
1010 1010 python hash seed: * (glob)
1011 1011 [1]
1012 1012
1013 1013 test --tmpdir support
1014 1014 $ rt --tmpdir=$TESTTMP/keep test-success.t
1015 1015 running 1 tests using 1 parallel processes
1016 1016
1017 1017 Keeping testtmp dir: $TESTTMP/keep/child1/test-success.t
1018 1018 Keeping threadtmp dir: $TESTTMP/keep/child1
1019 1019 .
1020 1020 # Ran 1 tests, 0 skipped, 0 failed.
1021 1021
1022 1022 timeouts
1023 1023 ========
1024 1024 $ cat > test-timeout.t <<EOF
1025 1025 > $ sleep 2
1026 1026 > $ echo pass
1027 1027 > pass
1028 1028 > EOF
1029 1029 > echo '#require slow' > test-slow-timeout.t
1030 1030 > cat test-timeout.t >> test-slow-timeout.t
1031 1031 $ rt --timeout=1 --slowtimeout=3 test-timeout.t test-slow-timeout.t
1032 1032 running 2 tests using 1 parallel processes
1033 1033 st
1034 1034 Skipped test-slow-timeout.t: missing feature: allow slow tests (use --allow-slow-tests)
1035 1035 Failed test-timeout.t: timed out
1036 1036 # Ran 1 tests, 1 skipped, 1 failed.
1037 1037 python hash seed: * (glob)
1038 1038 [1]
1039 1039 $ rt --timeout=1 --slowtimeout=3 \
1040 1040 > test-timeout.t test-slow-timeout.t --allow-slow-tests
1041 1041 running 2 tests using 1 parallel processes
1042 1042 .t
1043 1043 Failed test-timeout.t: timed out
1044 1044 # Ran 2 tests, 0 skipped, 1 failed.
1045 1045 python hash seed: * (glob)
1046 1046 [1]
1047 1047 $ rm test-timeout.t test-slow-timeout.t
1048 1048
1049 1049 test for --time
1050 1050 ==================
1051 1051
1052 1052 $ rt test-success.t --time
1053 1053 running 1 tests using 1 parallel processes
1054 1054 .
1055 1055 # Ran 1 tests, 0 skipped, 0 failed.
1056 1056 # Producing time report
1057 1057 start end cuser csys real Test
1058 1058 \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} test-success.t (re)
1059 1059
1060 1060 test for --time with --job enabled
1061 1061 ====================================
1062 1062
1063 1063 $ rt test-success.t --time --jobs 2
1064 1064 running 1 tests using 1 parallel processes
1065 1065 .
1066 1066 # Ran 1 tests, 0 skipped, 0 failed.
1067 1067 # Producing time report
1068 1068 start end cuser csys real Test
1069 1069 \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} test-success.t (re)
1070 1070
1071 1071 Skips
1072 1072 ================
1073 1073 $ cat > test-skip.t <<EOF
1074 1074 > $ echo xyzzy
1075 1075 > #if true
1076 1076 > #require false
1077 1077 > #end
1078 1078 > EOF
1079 1079 $ cat > test-noskip.t <<EOF
1080 1080 > #if false
1081 1081 > #require false
1082 1082 > #endif
1083 1083 > EOF
1084 1084 $ rt --nodiff
1085 1085 running 4 tests using 1 parallel processes
1086 1086 !.s.
1087 1087 Skipped test-skip.t: missing feature: nail clipper
1088 1088 Failed test-failure.t: output changed
1089 1089 # Ran 3 tests, 1 skipped, 1 failed.
1090 1090 python hash seed: * (glob)
1091 1091 [1]
1092 1092
1093 1093 $ rm test-noskip.t
1094 1094 $ rt --keyword xyzzy
1095 1095 running 3 tests using 1 parallel processes
1096 1096 .s
1097 1097 Skipped test-skip.t: missing feature: nail clipper
1098 1098 # Ran 2 tests, 2 skipped, 0 failed.
1099 1099
1100 1100 Skips with xml
1101 1101 $ rt --keyword xyzzy \
1102 1102 > --xunit=xunit.xml
1103 1103 running 3 tests using 1 parallel processes
1104 1104 .s
1105 1105 Skipped test-skip.t: missing feature: nail clipper
1106 1106 # Ran 2 tests, 2 skipped, 0 failed.
1107 1107 $ cat xunit.xml
1108 1108 <?xml version="1.0" encoding="utf-8"?>
1109 1109 <testsuite errors="0" failures="0" name="run-tests" skipped="2" tests="2">
1110 1110 <testcase name="test-success.t" time="*"/> (glob)
1111 1111 <testcase name="test-skip.t">
1112 1112 <skipped><![CDATA[missing feature: nail clipper]]></skipped> (py38 !)
1113 1113 <skipped> (no-py38 !)
1114 1114 <![CDATA[missing feature: nail clipper]]> </skipped> (no-py38 !)
1115 1115 </testcase>
1116 1116 </testsuite>
1117 1117
1118 1118 Missing skips or blacklisted skips don't count as executed:
1119 $ echo test-failure.t > blacklist
1119 $ mkdir tests
1120 $ echo tests/test-failure.t > blacklist
1121 $ cp test-failure.t tests
1120 1122 $ rt --blacklist=blacklist --json\
1121 > test-failure.t test-bogus.t
1123 > tests/test-failure.t tests/test-bogus.t
1122 1124 running 2 tests using 1 parallel processes
1123 1125 ss
1124 1126 Skipped test-bogus.t: Doesn't exist
1125 1127 Skipped test-failure.t: blacklisted
1126 1128 # Ran 0 tests, 2 skipped, 0 failed.
1127 $ cat report.json
1129 $ cat tests/report.json
1128 1130 testreport ={
1129 1131 "test-bogus.t": {
1130 1132 "result": "skip"
1131 1133 },
1132 1134 "test-failure.t": {
1133 1135 "result": "skip"
1134 1136 }
1135 1137 } (no-eol)
1138 $ rm -r tests
1139 $ echo test-failure.t > blacklist
1136 1140
1137 1141 Whitelist trumps blacklist
1138 1142 $ echo test-failure.t > whitelist
1139 1143 $ rt --blacklist=blacklist --whitelist=whitelist --json\
1140 1144 > test-failure.t test-bogus.t
1141 1145 running 2 tests using 1 parallel processes
1142 1146 s
1143 1147 --- $TESTTMP/test-failure.t
1144 1148 +++ $TESTTMP/test-failure.t.err
1145 1149 @@ -1,5 +1,5 @@
1146 1150 $ echo babar
1147 1151 - rataxes
1148 1152 + babar
1149 1153 This is a noop statement so that
1150 1154 this test is still more bytes than success.
1151 1155 pad pad pad pad............................................................
1152 1156
1153 1157 ERROR: test-failure.t output changed
1154 1158 !
1155 1159 Skipped test-bogus.t: Doesn't exist
1156 1160 Failed test-failure.t: output changed
1157 1161 # Ran 1 tests, 1 skipped, 1 failed.
1158 1162 python hash seed: * (glob)
1159 1163 [1]
1160 1164
1161 1165 Ensure that --test-list causes only the tests listed in that file to
1162 1166 be executed.
1163 1167 $ echo test-success.t >> onlytest
1164 1168 $ rt --test-list=onlytest
1165 1169 running 1 tests using 1 parallel processes
1166 1170 .
1167 1171 # Ran 1 tests, 0 skipped, 0 failed.
1168 1172 $ echo test-bogus.t >> anothertest
1169 1173 $ rt --test-list=onlytest --test-list=anothertest
1170 1174 running 2 tests using 1 parallel processes
1171 1175 s.
1172 1176 Skipped test-bogus.t: Doesn't exist
1173 1177 # Ran 1 tests, 1 skipped, 0 failed.
1174 1178 $ rm onlytest anothertest
1175 1179
1176 1180 test for --json
1177 1181 ==================
1178 1182
1179 1183 $ rt --json
1180 1184 running 3 tests using 1 parallel processes
1181 1185
1182 1186 --- $TESTTMP/test-failure.t
1183 1187 +++ $TESTTMP/test-failure.t.err
1184 1188 @@ -1,5 +1,5 @@
1185 1189 $ echo babar
1186 1190 - rataxes
1187 1191 + babar
1188 1192 This is a noop statement so that
1189 1193 this test is still more bytes than success.
1190 1194 pad pad pad pad............................................................
1191 1195
1192 1196 ERROR: test-failure.t output changed
1193 1197 !.s
1194 1198 Skipped test-skip.t: missing feature: nail clipper
1195 1199 Failed test-failure.t: output changed
1196 1200 # Ran 2 tests, 1 skipped, 1 failed.
1197 1201 python hash seed: * (glob)
1198 1202 [1]
1199 1203
1200 1204 $ cat report.json
1201 1205 testreport ={
1202 1206 "test-failure.t": [\{] (re)
1203 1207 "csys": "\s*\d+\.\d{3,4}", ? (re)
1204 1208 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1205 1209 "diff": "---.+\+\+\+.+", ? (re)
1206 1210 "end": "\s*\d+\.\d{3,4}", ? (re)
1207 1211 "result": "failure", ? (re)
1208 1212 "start": "\s*\d+\.\d{3,4}", ? (re)
1209 1213 "time": "\s*\d+\.\d{3,4}" (re)
1210 1214 }, ? (re)
1211 1215 "test-skip.t": {
1212 1216 "csys": "\s*\d+\.\d{3,4}", ? (re)
1213 1217 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1214 1218 "diff": "", ? (re)
1215 1219 "end": "\s*\d+\.\d{3,4}", ? (re)
1216 1220 "result": "skip", ? (re)
1217 1221 "start": "\s*\d+\.\d{3,4}", ? (re)
1218 1222 "time": "\s*\d+\.\d{3,4}" (re)
1219 1223 }, ? (re)
1220 1224 "test-success.t": [\{] (re)
1221 1225 "csys": "\s*\d+\.\d{3,4}", ? (re)
1222 1226 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1223 1227 "diff": "", ? (re)
1224 1228 "end": "\s*\d+\.\d{3,4}", ? (re)
1225 1229 "result": "success", ? (re)
1226 1230 "start": "\s*\d+\.\d{3,4}", ? (re)
1227 1231 "time": "\s*\d+\.\d{3,4}" (re)
1228 1232 }
1229 1233 } (no-eol)
1230 1234 --json with --outputdir
1231 1235
1232 1236 $ rm report.json
1233 1237 $ rm -r output
1234 1238 $ mkdir output
1235 1239 $ rt --json --outputdir output
1236 1240 running 3 tests using 1 parallel processes
1237 1241
1238 1242 --- $TESTTMP/test-failure.t
1239 1243 +++ $TESTTMP/output/test-failure.t.err
1240 1244 @@ -1,5 +1,5 @@
1241 1245 $ echo babar
1242 1246 - rataxes
1243 1247 + babar
1244 1248 This is a noop statement so that
1245 1249 this test is still more bytes than success.
1246 1250 pad pad pad pad............................................................
1247 1251
1248 1252 ERROR: test-failure.t output changed
1249 1253 !.s
1250 1254 Skipped test-skip.t: missing feature: nail clipper
1251 1255 Failed test-failure.t: output changed
1252 1256 # Ran 2 tests, 1 skipped, 1 failed.
1253 1257 python hash seed: * (glob)
1254 1258 [1]
1255 1259 $ f report.json
1256 1260 report.json: file not found
1257 1261 $ cat output/report.json
1258 1262 testreport ={
1259 1263 "test-failure.t": [\{] (re)
1260 1264 "csys": "\s*\d+\.\d{3,4}", ? (re)
1261 1265 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1262 1266 "diff": "---.+\+\+\+.+", ? (re)
1263 1267 "end": "\s*\d+\.\d{3,4}", ? (re)
1264 1268 "result": "failure", ? (re)
1265 1269 "start": "\s*\d+\.\d{3,4}", ? (re)
1266 1270 "time": "\s*\d+\.\d{3,4}" (re)
1267 1271 }, ? (re)
1268 1272 "test-skip.t": {
1269 1273 "csys": "\s*\d+\.\d{3,4}", ? (re)
1270 1274 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1271 1275 "diff": "", ? (re)
1272 1276 "end": "\s*\d+\.\d{3,4}", ? (re)
1273 1277 "result": "skip", ? (re)
1274 1278 "start": "\s*\d+\.\d{3,4}", ? (re)
1275 1279 "time": "\s*\d+\.\d{3,4}" (re)
1276 1280 }, ? (re)
1277 1281 "test-success.t": [\{] (re)
1278 1282 "csys": "\s*\d+\.\d{3,4}", ? (re)
1279 1283 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1280 1284 "diff": "", ? (re)
1281 1285 "end": "\s*\d+\.\d{3,4}", ? (re)
1282 1286 "result": "success", ? (re)
1283 1287 "start": "\s*\d+\.\d{3,4}", ? (re)
1284 1288 "time": "\s*\d+\.\d{3,4}" (re)
1285 1289 }
1286 1290 } (no-eol)
1287 1291 $ ls -a output
1288 1292 .
1289 1293 ..
1290 1294 .testtimes
1291 1295 report.json
1292 1296 test-failure.t.err
1293 1297
1294 1298 Test that failed test accepted through interactive are properly reported:
1295 1299
1296 1300 $ cp test-failure.t backup
1297 1301 $ echo y | rt --json -i
1298 1302 running 3 tests using 1 parallel processes
1299 1303
1300 1304 --- $TESTTMP/test-failure.t
1301 1305 +++ $TESTTMP/test-failure.t.err
1302 1306 @@ -1,5 +1,5 @@
1303 1307 $ echo babar
1304 1308 - rataxes
1305 1309 + babar
1306 1310 This is a noop statement so that
1307 1311 this test is still more bytes than success.
1308 1312 pad pad pad pad............................................................
1309 1313 Accept this change? [y/N] ..s
1310 1314 Skipped test-skip.t: missing feature: nail clipper
1311 1315 # Ran 2 tests, 1 skipped, 0 failed.
1312 1316
1313 1317 $ cat report.json
1314 1318 testreport ={
1315 1319 "test-failure.t": [\{] (re)
1316 1320 "csys": "\s*\d+\.\d{3,4}", ? (re)
1317 1321 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1318 1322 "diff": "", ? (re)
1319 1323 "end": "\s*\d+\.\d{3,4}", ? (re)
1320 1324 "result": "success", ? (re)
1321 1325 "start": "\s*\d+\.\d{3,4}", ? (re)
1322 1326 "time": "\s*\d+\.\d{3,4}" (re)
1323 1327 }, ? (re)
1324 1328 "test-skip.t": {
1325 1329 "csys": "\s*\d+\.\d{3,4}", ? (re)
1326 1330 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1327 1331 "diff": "", ? (re)
1328 1332 "end": "\s*\d+\.\d{3,4}", ? (re)
1329 1333 "result": "skip", ? (re)
1330 1334 "start": "\s*\d+\.\d{3,4}", ? (re)
1331 1335 "time": "\s*\d+\.\d{3,4}" (re)
1332 1336 }, ? (re)
1333 1337 "test-success.t": [\{] (re)
1334 1338 "csys": "\s*\d+\.\d{3,4}", ? (re)
1335 1339 "cuser": "\s*\d+\.\d{3,4}", ? (re)
1336 1340 "diff": "", ? (re)
1337 1341 "end": "\s*\d+\.\d{3,4}", ? (re)
1338 1342 "result": "success", ? (re)
1339 1343 "start": "\s*\d+\.\d{3,4}", ? (re)
1340 1344 "time": "\s*\d+\.\d{3,4}" (re)
1341 1345 }
1342 1346 } (no-eol)
1343 1347 $ mv backup test-failure.t
1344 1348
1345 1349 backslash on end of line with glob matching is handled properly
1346 1350
1347 1351 $ cat > test-glob-backslash.t << EOF
1348 1352 > $ echo 'foo bar \\'
1349 1353 > foo * \ (glob)
1350 1354 > EOF
1351 1355
1352 1356 $ rt test-glob-backslash.t
1353 1357 running 1 tests using 1 parallel processes
1354 1358 .
1355 1359 # Ran 1 tests, 0 skipped, 0 failed.
1356 1360
1357 1361 $ rm -f test-glob-backslash.t
1358 1362
1359 1363 Test globbing of local IP addresses
1360 1364 $ echo 172.16.18.1
1361 1365 $LOCALIP (glob)
1362 1366 $ echo dead:beef::1
1363 1367 $LOCALIP (glob)
1364 1368
1365 1369 Add support for external test formatter
1366 1370 =======================================
1367 1371
1368 1372 $ CUSTOM_TEST_RESULT=basic_test_result "$PYTHON" $TESTDIR/run-tests.py --with-hg=`which hg` -j1 "$@" test-success.t test-failure.t
1369 1373 running 2 tests using 1 parallel processes
1370 1374
1371 1375 # Ran 2 tests, 0 skipped, 0 failed.
1372 1376 ON_START! <__main__.TestSuite tests=[<__main__.TTest testMethod=test-failure.t>, <__main__.TTest testMethod=test-success.t>]>
1373 1377 FAILURE! test-failure.t output changed
1374 1378 SUCCESS! test-success.t
1375 1379 ON_END!
1376 1380
1377 1381 Test reusability for third party tools
1378 1382 ======================================
1379 1383
1380 1384 $ mkdir "$TESTTMP"/anothertests
1381 1385 $ cd "$TESTTMP"/anothertests
1382 1386
1383 1387 test that `run-tests.py` can execute hghave, even if it runs not in
1384 1388 Mercurial source tree.
1385 1389
1386 1390 $ cat > test-hghave.t <<EOF
1387 1391 > #require true
1388 1392 > $ echo foo
1389 1393 > foo
1390 1394 > EOF
1391 1395 $ rt test-hghave.t
1392 1396 running 1 tests using 1 parallel processes
1393 1397 .
1394 1398 # Ran 1 tests, 0 skipped, 0 failed.
1395 1399
1396 1400 test that RUNTESTDIR refers the directory, in which `run-tests.py` now
1397 1401 running is placed.
1398 1402
1399 1403 $ cat > test-runtestdir.t <<EOF
1400 1404 > - $TESTDIR, in which test-run-tests.t is placed
1401 1405 > - \$TESTDIR, in which test-runtestdir.t is placed (expanded at runtime)
1402 1406 > - \$RUNTESTDIR, in which run-tests.py is placed (expanded at runtime)
1403 1407 >
1404 1408 > #if windows
1405 1409 > $ test "\$TESTDIR" = "$TESTTMP\anothertests"
1406 1410 > #else
1407 1411 > $ test "\$TESTDIR" = "$TESTTMP"/anothertests
1408 1412 > #endif
1409 1413 > If this prints a path, that means RUNTESTDIR didn't equal
1410 1414 > TESTDIR as it should have.
1411 1415 > $ test "\$RUNTESTDIR" = "$TESTDIR" || echo "\$RUNTESTDIR"
1412 1416 > This should print the start of check-code. If this passes but the
1413 1417 > previous check failed, that means we found a copy of check-code at whatever
1414 1418 > RUNTESTSDIR ended up containing, even though it doesn't match TESTDIR.
1415 1419 > $ head -n 3 "\$RUNTESTDIR"/../contrib/check-code.py | sed 's@.!.*python3@#!USRBINENVPY@'
1416 1420 > #!USRBINENVPY
1417 1421 > #
1418 1422 > # check-code - a style and portability checker for Mercurial
1419 1423 > EOF
1420 1424 $ rt test-runtestdir.t
1421 1425 running 1 tests using 1 parallel processes
1422 1426 .
1423 1427 # Ran 1 tests, 0 skipped, 0 failed.
1424 1428
1425 1429 #if execbit
1426 1430
1427 1431 test that TESTDIR is referred in PATH
1428 1432
1429 1433 $ cat > custom-command.sh <<EOF
1430 1434 > #!/bin/sh
1431 1435 > echo "hello world"
1432 1436 > EOF
1433 1437 $ chmod +x custom-command.sh
1434 1438 $ cat > test-testdir-path.t <<EOF
1435 1439 > $ custom-command.sh
1436 1440 > hello world
1437 1441 > EOF
1438 1442 $ rt test-testdir-path.t
1439 1443 running 1 tests using 1 parallel processes
1440 1444 .
1441 1445 # Ran 1 tests, 0 skipped, 0 failed.
1442 1446
1443 1447 #endif
1444 1448
1445 1449 test support for --allow-slow-tests
1446 1450 $ cat > test-very-slow-test.t <<EOF
1447 1451 > #require slow
1448 1452 > $ echo pass
1449 1453 > pass
1450 1454 > EOF
1451 1455 $ rt test-very-slow-test.t
1452 1456 running 1 tests using 1 parallel processes
1453 1457 s
1454 1458 Skipped test-very-slow-test.t: missing feature: allow slow tests (use --allow-slow-tests)
1455 1459 # Ran 0 tests, 1 skipped, 0 failed.
1456 1460 $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t
1457 1461 running 1 tests using 1 parallel processes
1458 1462 .
1459 1463 # Ran 1 tests, 0 skipped, 0 failed.
1460 1464
1461 1465 support for running a test outside the current directory
1462 1466 $ mkdir nonlocal
1463 1467 $ cat > nonlocal/test-is-not-here.t << EOF
1464 1468 > $ echo pass
1465 1469 > pass
1466 1470 > EOF
1467 1471 $ rt nonlocal/test-is-not-here.t
1468 1472 running 1 tests using 1 parallel processes
1469 1473 .
1470 1474 # Ran 1 tests, 0 skipped, 0 failed.
1471 1475
1472 1476 support for automatically discovering test if arg is a folder
1473 1477 $ mkdir tmp && cd tmp
1474 1478
1475 1479 $ cat > test-uno.t << EOF
1476 1480 > $ echo line
1477 1481 > line
1478 1482 > EOF
1479 1483
1480 1484 $ cp test-uno.t test-dos.t
1481 1485 $ cd ..
1482 1486 $ cp -R tmp tmpp
1483 1487 $ cp tmp/test-uno.t test-solo.t
1484 1488
1485 1489 $ rt tmp/ test-solo.t tmpp
1486 1490 running 5 tests using 1 parallel processes
1487 1491 .....
1488 1492 # Ran 5 tests, 0 skipped, 0 failed.
1489 1493 $ rm -rf tmp tmpp
1490 1494
1491 1495 support for running run-tests.py from another directory
1492 1496 $ mkdir tmp && cd tmp
1493 1497
1494 1498 $ cat > useful-file.sh << EOF
1495 1499 > important command
1496 1500 > EOF
1497 1501
1498 1502 $ cat > test-folder.t << EOF
1499 1503 > $ cat \$TESTDIR/useful-file.sh
1500 1504 > important command
1501 1505 > EOF
1502 1506
1503 1507 $ cat > test-folder-fail.t << EOF
1504 1508 > $ cat \$TESTDIR/useful-file.sh
1505 1509 > important commando
1506 1510 > EOF
1507 1511
1508 1512 $ cd ..
1509 1513 $ rt tmp/test-*.t
1510 1514 running 2 tests using 1 parallel processes
1511 1515
1512 1516 --- $TESTTMP/anothertests/tmp/test-folder-fail.t
1513 1517 +++ $TESTTMP/anothertests/tmp/test-folder-fail.t.err
1514 1518 @@ -1,2 +1,2 @@
1515 1519 $ cat $TESTDIR/useful-file.sh
1516 1520 - important commando
1517 1521 + important command
1518 1522
1519 1523 ERROR: test-folder-fail.t output changed
1520 1524 !.
1521 1525 Failed test-folder-fail.t: output changed
1522 1526 # Ran 2 tests, 0 skipped, 1 failed.
1523 1527 python hash seed: * (glob)
1524 1528 [1]
1525 1529
1526 1530 support for bisecting failed tests automatically
1527 1531 $ hg init bisect
1528 1532 $ cd bisect
1529 1533 $ cat >> test-bisect.t <<EOF
1530 1534 > $ echo pass
1531 1535 > pass
1532 1536 > EOF
1533 1537 $ hg add test-bisect.t
1534 1538 $ hg ci -m 'good'
1535 1539 $ cat >> test-bisect.t <<EOF
1536 1540 > $ echo pass
1537 1541 > fail
1538 1542 > EOF
1539 1543 $ hg ci -m 'bad'
1540 1544 $ rt --known-good-rev=0 test-bisect.t
1541 1545 running 1 tests using 1 parallel processes
1542 1546
1543 1547 --- $TESTTMP/anothertests/bisect/test-bisect.t
1544 1548 +++ $TESTTMP/anothertests/bisect/test-bisect.t.err
1545 1549 @@ -1,4 +1,4 @@
1546 1550 $ echo pass
1547 1551 pass
1548 1552 $ echo pass
1549 1553 - fail
1550 1554 + pass
1551 1555
1552 1556 ERROR: test-bisect.t output changed
1553 1557 !
1554 1558 Failed test-bisect.t: output changed
1555 1559 test-bisect.t broken by 72cbf122d116 (bad)
1556 1560 # Ran 1 tests, 0 skipped, 1 failed.
1557 1561 python hash seed: * (glob)
1558 1562 [1]
1559 1563
1560 1564 $ cd ..
1561 1565
1562 1566 support bisecting a separate repo
1563 1567
1564 1568 $ hg init bisect-dependent
1565 1569 $ cd bisect-dependent
1566 1570 $ cat > test-bisect-dependent.t <<EOF
1567 1571 > $ tail -1 \$TESTDIR/../bisect/test-bisect.t
1568 1572 > pass
1569 1573 > EOF
1570 1574 $ hg commit -Am dependent test-bisect-dependent.t
1571 1575
1572 1576 $ rt --known-good-rev=0 test-bisect-dependent.t
1573 1577 running 1 tests using 1 parallel processes
1574 1578
1575 1579 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1576 1580 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1577 1581 @@ -1,2 +1,2 @@
1578 1582 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1579 1583 - pass
1580 1584 + fail
1581 1585
1582 1586 ERROR: test-bisect-dependent.t output changed
1583 1587 !
1584 1588 Failed test-bisect-dependent.t: output changed
1585 1589 Failed to identify failure point for test-bisect-dependent.t
1586 1590 # Ran 1 tests, 0 skipped, 1 failed.
1587 1591 python hash seed: * (glob)
1588 1592 [1]
1589 1593
1590 1594 $ rt --bisect-repo=../test-bisect test-bisect-dependent.t
1591 1595 usage: run-tests.py [options] [tests]
1592 1596 run-tests.py: error: --bisect-repo cannot be used without --known-good-rev
1593 1597 [2]
1594 1598
1595 1599 $ rt --known-good-rev=0 --bisect-repo=../bisect test-bisect-dependent.t
1596 1600 running 1 tests using 1 parallel processes
1597 1601
1598 1602 --- $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t
1599 1603 +++ $TESTTMP/anothertests/bisect-dependent/test-bisect-dependent.t.err
1600 1604 @@ -1,2 +1,2 @@
1601 1605 $ tail -1 $TESTDIR/../bisect/test-bisect.t
1602 1606 - pass
1603 1607 + fail
1604 1608
1605 1609 ERROR: test-bisect-dependent.t output changed
1606 1610 !
1607 1611 Failed test-bisect-dependent.t: output changed
1608 1612 test-bisect-dependent.t broken by 72cbf122d116 (bad)
1609 1613 # Ran 1 tests, 0 skipped, 1 failed.
1610 1614 python hash seed: * (glob)
1611 1615 [1]
1612 1616
1613 1617 $ cd ..
1614 1618
1615 1619 Test a broken #if statement doesn't break run-tests threading.
1616 1620 ==============================================================
1617 1621 $ mkdir broken
1618 1622 $ cd broken
1619 1623 $ cat > test-broken.t <<EOF
1620 1624 > true
1621 1625 > #if notarealhghavefeature
1622 1626 > $ false
1623 1627 > #endif
1624 1628 > EOF
1625 1629 $ for f in 1 2 3 4 ; do
1626 1630 > cat > test-works-$f.t <<EOF
1627 1631 > This is test case $f
1628 1632 > $ sleep 1
1629 1633 > EOF
1630 1634 > done
1631 1635 $ rt -j 2
1632 1636 running 5 tests using 2 parallel processes
1633 1637 ....
1634 1638 # Ran 5 tests, 0 skipped, 0 failed.
1635 1639 skipped: unknown feature: notarealhghavefeature
1636 1640
1637 1641 $ cd ..
1638 1642 $ rm -rf broken
1639 1643
1640 1644 Test cases in .t files
1641 1645 ======================
1642 1646 $ mkdir cases
1643 1647 $ cd cases
1644 1648 $ cat > test-cases-abc.t <<'EOF'
1645 1649 > #testcases A B C
1646 1650 > $ V=B
1647 1651 > #if A
1648 1652 > $ V=A
1649 1653 > #endif
1650 1654 > #if C
1651 1655 > $ V=C
1652 1656 > #endif
1653 1657 > $ echo $V | sed 's/A/C/'
1654 1658 > C
1655 1659 > #if C
1656 1660 > $ [ $V = C ]
1657 1661 > #endif
1658 1662 > #if A
1659 1663 > $ [ $V = C ]
1660 1664 > [1]
1661 1665 > #endif
1662 1666 > #if no-C
1663 1667 > $ [ $V = C ]
1664 1668 > [1]
1665 1669 > #endif
1666 1670 > $ [ $V = D ]
1667 1671 > [1]
1668 1672 > EOF
1669 1673 $ rt
1670 1674 running 3 tests using 1 parallel processes
1671 1675 .
1672 1676 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1673 1677 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1674 1678 @@ -7,7 +7,7 @@
1675 1679 $ V=C
1676 1680 #endif
1677 1681 $ echo $V | sed 's/A/C/'
1678 1682 - C
1679 1683 + B
1680 1684 #if C
1681 1685 $ [ $V = C ]
1682 1686 #endif
1683 1687
1684 1688 ERROR: test-cases-abc.t#B output changed
1685 1689 !.
1686 1690 Failed test-cases-abc.t#B: output changed
1687 1691 # Ran 3 tests, 0 skipped, 1 failed.
1688 1692 python hash seed: * (glob)
1689 1693 [1]
1690 1694
1691 1695 --restart works
1692 1696
1693 1697 $ rt --restart
1694 1698 running 2 tests using 1 parallel processes
1695 1699
1696 1700 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1697 1701 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1698 1702 @@ -7,7 +7,7 @@
1699 1703 $ V=C
1700 1704 #endif
1701 1705 $ echo $V | sed 's/A/C/'
1702 1706 - C
1703 1707 + B
1704 1708 #if C
1705 1709 $ [ $V = C ]
1706 1710 #endif
1707 1711
1708 1712 ERROR: test-cases-abc.t#B output changed
1709 1713 !.
1710 1714 Failed test-cases-abc.t#B: output changed
1711 1715 # Ran 2 tests, 0 skipped, 1 failed.
1712 1716 python hash seed: * (glob)
1713 1717 [1]
1714 1718
1715 1719 --restart works with outputdir
1716 1720
1717 1721 $ mkdir output
1718 1722 $ mv test-cases-abc.t#B.err output
1719 1723 $ rt --restart --outputdir output
1720 1724 running 2 tests using 1 parallel processes
1721 1725
1722 1726 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1723 1727 +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err
1724 1728 @@ -7,7 +7,7 @@
1725 1729 $ V=C
1726 1730 #endif
1727 1731 $ echo $V | sed 's/A/C/'
1728 1732 - C
1729 1733 + B
1730 1734 #if C
1731 1735 $ [ $V = C ]
1732 1736 #endif
1733 1737
1734 1738 ERROR: test-cases-abc.t#B output changed
1735 1739 !.
1736 1740 Failed test-cases-abc.t#B: output changed
1737 1741 # Ran 2 tests, 0 skipped, 1 failed.
1738 1742 python hash seed: * (glob)
1739 1743 [1]
1740 1744
1741 1745 Test TESTCASE variable
1742 1746
1743 1747 $ cat > test-cases-ab.t <<'EOF'
1744 1748 > $ dostuff() {
1745 1749 > > echo "In case $TESTCASE"
1746 1750 > > }
1747 1751 > #testcases A B
1748 1752 > #if A
1749 1753 > $ dostuff
1750 1754 > In case A
1751 1755 > #endif
1752 1756 > #if B
1753 1757 > $ dostuff
1754 1758 > In case B
1755 1759 > #endif
1756 1760 > EOF
1757 1761 $ rt test-cases-ab.t
1758 1762 running 2 tests using 1 parallel processes
1759 1763 ..
1760 1764 # Ran 2 tests, 0 skipped, 0 failed.
1761 1765
1762 1766 Support running a specific test case
1763 1767
1764 1768 $ rt "test-cases-abc.t#B"
1765 1769 running 1 tests using 1 parallel processes
1766 1770
1767 1771 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1768 1772 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1769 1773 @@ -7,7 +7,7 @@
1770 1774 $ V=C
1771 1775 #endif
1772 1776 $ echo $V | sed 's/A/C/'
1773 1777 - C
1774 1778 + B
1775 1779 #if C
1776 1780 $ [ $V = C ]
1777 1781 #endif
1778 1782
1779 1783 ERROR: test-cases-abc.t#B output changed
1780 1784 !
1781 1785 Failed test-cases-abc.t#B: output changed
1782 1786 # Ran 1 tests, 0 skipped, 1 failed.
1783 1787 python hash seed: * (glob)
1784 1788 [1]
1785 1789
1786 1790 Support running multiple test cases in the same file
1787 1791
1788 1792 $ rt test-cases-abc.t#B test-cases-abc.t#C
1789 1793 running 2 tests using 1 parallel processes
1790 1794
1791 1795 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1792 1796 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1793 1797 @@ -7,7 +7,7 @@
1794 1798 $ V=C
1795 1799 #endif
1796 1800 $ echo $V | sed 's/A/C/'
1797 1801 - C
1798 1802 + B
1799 1803 #if C
1800 1804 $ [ $V = C ]
1801 1805 #endif
1802 1806
1803 1807 ERROR: test-cases-abc.t#B output changed
1804 1808 !.
1805 1809 Failed test-cases-abc.t#B: output changed
1806 1810 # Ran 2 tests, 0 skipped, 1 failed.
1807 1811 python hash seed: * (glob)
1808 1812 [1]
1809 1813
1810 1814 Support ignoring invalid test cases
1811 1815
1812 1816 $ rt test-cases-abc.t#B test-cases-abc.t#D
1813 1817 running 1 tests using 1 parallel processes
1814 1818
1815 1819 --- $TESTTMP/anothertests/cases/test-cases-abc.t
1816 1820 +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err
1817 1821 @@ -7,7 +7,7 @@
1818 1822 $ V=C
1819 1823 #endif
1820 1824 $ echo $V | sed 's/A/C/'
1821 1825 - C
1822 1826 + B
1823 1827 #if C
1824 1828 $ [ $V = C ]
1825 1829 #endif
1826 1830
1827 1831 ERROR: test-cases-abc.t#B output changed
1828 1832 !
1829 1833 Failed test-cases-abc.t#B: output changed
1830 1834 # Ran 1 tests, 0 skipped, 1 failed.
1831 1835 python hash seed: * (glob)
1832 1836 [1]
1833 1837
1834 1838 Support running complex test cases names
1835 1839
1836 1840 $ cat > test-cases-advanced-cases.t <<'EOF'
1837 1841 > #testcases simple case-with-dashes casewith_-.chars
1838 1842 > $ echo $TESTCASE
1839 1843 > simple
1840 1844 > EOF
1841 1845
1842 1846 $ cat test-cases-advanced-cases.t
1843 1847 #testcases simple case-with-dashes casewith_-.chars
1844 1848 $ echo $TESTCASE
1845 1849 simple
1846 1850
1847 1851 $ rt test-cases-advanced-cases.t
1848 1852 running 3 tests using 1 parallel processes
1849 1853
1850 1854 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1851 1855 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1852 1856 @@ -1,3 +1,3 @@
1853 1857 #testcases simple case-with-dashes casewith_-.chars
1854 1858 $ echo $TESTCASE
1855 1859 - simple
1856 1860 + case-with-dashes
1857 1861
1858 1862 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1859 1863 !
1860 1864 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1861 1865 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1862 1866 @@ -1,3 +1,3 @@
1863 1867 #testcases simple case-with-dashes casewith_-.chars
1864 1868 $ echo $TESTCASE
1865 1869 - simple
1866 1870 + casewith_-.chars
1867 1871
1868 1872 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1869 1873 !.
1870 1874 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1871 1875 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1872 1876 # Ran 3 tests, 0 skipped, 2 failed.
1873 1877 python hash seed: * (glob)
1874 1878 [1]
1875 1879
1876 1880 $ rt "test-cases-advanced-cases.t#case-with-dashes"
1877 1881 running 1 tests using 1 parallel processes
1878 1882
1879 1883 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1880 1884 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err
1881 1885 @@ -1,3 +1,3 @@
1882 1886 #testcases simple case-with-dashes casewith_-.chars
1883 1887 $ echo $TESTCASE
1884 1888 - simple
1885 1889 + case-with-dashes
1886 1890
1887 1891 ERROR: test-cases-advanced-cases.t#case-with-dashes output changed
1888 1892 !
1889 1893 Failed test-cases-advanced-cases.t#case-with-dashes: output changed
1890 1894 # Ran 1 tests, 0 skipped, 1 failed.
1891 1895 python hash seed: * (glob)
1892 1896 [1]
1893 1897
1894 1898 $ rt "test-cases-advanced-cases.t#casewith_-.chars"
1895 1899 running 1 tests using 1 parallel processes
1896 1900
1897 1901 --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t
1898 1902 +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err
1899 1903 @@ -1,3 +1,3 @@
1900 1904 #testcases simple case-with-dashes casewith_-.chars
1901 1905 $ echo $TESTCASE
1902 1906 - simple
1903 1907 + casewith_-.chars
1904 1908
1905 1909 ERROR: test-cases-advanced-cases.t#casewith_-.chars output changed
1906 1910 !
1907 1911 Failed test-cases-advanced-cases.t#casewith_-.chars: output changed
1908 1912 # Ran 1 tests, 0 skipped, 1 failed.
1909 1913 python hash seed: * (glob)
1910 1914 [1]
1911 1915
1912 1916 Test automatic pattern replacement
1913 1917 ==================================
1914 1918
1915 1919 $ cat << EOF >> common-pattern.py
1916 1920 > substitutions = [
1917 1921 > (br'foo-(.*)\\b',
1918 1922 > br'\$XXX=\\1\$'),
1919 1923 > (br'bar\\n',
1920 1924 > br'\$YYY$\\n'),
1921 1925 > ]
1922 1926 > EOF
1923 1927
1924 1928 $ cat << EOF >> test-substitution.t
1925 1929 > $ echo foo-12
1926 1930 > \$XXX=12$
1927 1931 > $ echo foo-42
1928 1932 > \$XXX=42$
1929 1933 > $ echo bar prior
1930 1934 > bar prior
1931 1935 > $ echo lastbar
1932 1936 > last\$YYY$
1933 1937 > $ echo foo-bar foo-baz
1934 1938 > EOF
1935 1939
1936 1940 $ rt test-substitution.t
1937 1941 running 1 tests using 1 parallel processes
1938 1942
1939 1943 --- $TESTTMP/anothertests/cases/test-substitution.t
1940 1944 +++ $TESTTMP/anothertests/cases/test-substitution.t.err
1941 1945 @@ -7,3 +7,4 @@
1942 1946 $ echo lastbar
1943 1947 last$YYY$
1944 1948 $ echo foo-bar foo-baz
1945 1949 + $XXX=bar foo-baz$
1946 1950
1947 1951 ERROR: test-substitution.t output changed
1948 1952 !
1949 1953 Failed test-substitution.t: output changed
1950 1954 # Ran 1 tests, 0 skipped, 1 failed.
1951 1955 python hash seed: * (glob)
1952 1956 [1]
1953 1957
1954 1958 --extra-config-opt works
1955 1959
1956 1960 $ cat << EOF >> test-config-opt.t
1957 1961 > $ hg init test-config-opt
1958 1962 > $ hg -R test-config-opt purge
1959 1963 > $ echo "HGTESTEXTRAEXTENSIONS: \$HGTESTEXTRAEXTENSIONS"
1960 1964 > HGTESTEXTRAEXTENSIONS: purge
1961 1965 > EOF
1962 1966
1963 1967 $ rt --extra-config-opt extensions.purge= \
1964 1968 > --extra-config-opt not.an.extension=True test-config-opt.t
1965 1969 running 1 tests using 1 parallel processes
1966 1970 .
1967 1971 # Ran 1 tests, 0 skipped, 0 failed.
1968 1972
1969 1973 Test conditional output matching
1970 1974 ================================
1971 1975
1972 1976 $ cat << EOF >> test-conditional-matching.t
1973 1977 > #testcases foo bar
1974 1978 > $ echo richtig
1975 1979 > richtig (true !)
1976 1980 > $ echo falsch
1977 1981 > falsch (false !)
1978 1982 > #if foo
1979 1983 > $ echo arthur
1980 1984 > arthur (bar !)
1981 1985 > #endif
1982 1986 > $ echo celeste
1983 1987 > celeste (foo !)
1984 1988 > $ echo zephir
1985 1989 > zephir (bar !)
1986 1990 > EOF
1987 1991
1988 1992 $ rt test-conditional-matching.t
1989 1993 running 2 tests using 1 parallel processes
1990 1994
1991 1995 --- $TESTTMP/anothertests/cases/test-conditional-matching.t
1992 1996 +++ $TESTTMP/anothertests/cases/test-conditional-matching.t#bar.err
1993 1997 @@ -3,11 +3,13 @@
1994 1998 richtig (true !)
1995 1999 $ echo falsch
1996 2000 falsch (false !)
1997 2001 + falsch
1998 2002 #if foo
1999 2003 $ echo arthur
2000 2004 arthur \(bar !\) (re)
2001 2005 #endif
2002 2006 $ echo celeste
2003 2007 celeste \(foo !\) (re)
2004 2008 + celeste
2005 2009 $ echo zephir
2006 2010 zephir \(bar !\) (re)
2007 2011
2008 2012 ERROR: test-conditional-matching.t#bar output changed
2009 2013 !
2010 2014 --- $TESTTMP/anothertests/cases/test-conditional-matching.t
2011 2015 +++ $TESTTMP/anothertests/cases/test-conditional-matching.t#foo.err
2012 2016 @@ -3,11 +3,14 @@
2013 2017 richtig (true !)
2014 2018 $ echo falsch
2015 2019 falsch (false !)
2016 2020 + falsch
2017 2021 #if foo
2018 2022 $ echo arthur
2019 2023 arthur \(bar !\) (re)
2020 2024 + arthur
2021 2025 #endif
2022 2026 $ echo celeste
2023 2027 celeste \(foo !\) (re)
2024 2028 $ echo zephir
2025 2029 zephir \(bar !\) (re)
2026 2030 + zephir
2027 2031
2028 2032 ERROR: test-conditional-matching.t#foo output changed
2029 2033 !
2030 2034 Failed test-conditional-matching.t#bar: output changed
2031 2035 Failed test-conditional-matching.t#foo: output changed
2032 2036 # Ran 2 tests, 0 skipped, 2 failed.
2033 2037 python hash seed: * (glob)
2034 2038 [1]
General Comments 0
You need to be logged in to leave comments. Login now