##// END OF EJS Templates
py3: remove xrange() compatibility code...
Manuel Jacob -
r50180:56f98406 default
parent child Browse files
Show More
@@ -1,73 +1,68
1 1 # bruterebase.py - brute force rebase testing
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 from mercurial import (
10 10 error,
11 11 registrar,
12 12 revsetlang,
13 13 )
14 14
15 15 from hgext import rebase
16 16
17 try:
18 xrange
19 except NameError:
20 xrange = range
21
22 17 cmdtable = {}
23 18 command = registrar.command(cmdtable)
24 19
25 20
26 21 @command(b'debugbruterebase')
27 22 def debugbruterebase(ui, repo, source, dest):
28 23 """for every non-empty subset of source, run rebase -r subset -d dest
29 24
30 25 Print one line summary for each subset. Assume obsstore is enabled.
31 26 """
32 27 srevs = list(repo.revs(source))
33 28
34 29 with repo.wlock(), repo.lock():
35 30 repolen = len(repo)
36 31 cl = repo.changelog
37 32
38 33 def getdesc(rev):
39 34 result = cl.changelogrevision(rev).description
40 35 if rev >= repolen:
41 36 result += b"'"
42 37 return result
43 38
44 for i in xrange(1, 2 ** len(srevs)):
39 for i in range(1, 2 ** len(srevs)):
45 40 subset = [rev for j, rev in enumerate(srevs) if i & (1 << j) != 0]
46 41 spec = revsetlang.formatspec(b'%ld', subset)
47 42 tr = repo.transaction(b'rebase')
48 43 tr._report = lambda x: 0 # hide "transaction abort"
49 44
50 45 with ui.silent():
51 46 try:
52 47 rebase.rebase(ui, repo, dest=dest, rev=[spec])
53 48 except error.Abort as ex:
54 49 summary = b'ABORT: %s' % ex.message
55 50 except Exception as ex:
56 51 summary = b'CRASH: %s' % ex
57 52 else:
58 53 # short summary about new nodes
59 54 cl = repo.changelog
60 55 descs = []
61 for rev in xrange(repolen, len(repo)):
56 for rev in range(repolen, len(repo)):
62 57 desc = b'%s:' % getdesc(rev)
63 58 for prev in cl.parentrevs(rev):
64 59 if prev > -1:
65 60 desc += getdesc(prev)
66 61 descs.append(desc)
67 62 descs.sort()
68 63 summary = b' '.join(descs)
69 64 repo.vfs.tryunlink(b'rebasestate')
70 65
71 66 subsetdesc = b''.join(getdesc(rev) for rev in subset)
72 67 ui.write(b'%s: %s\n' % (subsetdesc.rjust(len(srevs)), summary))
73 68 tr.abort()
@@ -1,158 +1,157
1 1 #!/usr/bin/env python3
2 2
3 3 # fsmonitor-run-tests.py - Run Mercurial tests with fsmonitor enabled
4 4 #
5 5 # Copyright 2017 Facebook, Inc.
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9 #
10 10 # This is a wrapper around run-tests.py that spins up an isolated instance of
11 11 # Watchman and runs the Mercurial tests against it. This ensures that the global
12 12 # version of Watchman isn't affected by anything this test does.
13 13
14 14
15 15 import argparse
16 16 import contextlib
17 17 import json
18 18 import os
19 19 import shutil
20 20 import subprocess
21 21 import sys
22 22 import tempfile
23 23 import uuid
24 24
25 25 osenvironb = getattr(os, 'environb', os.environ)
26 26
27 27 if sys.version_info > (3, 5, 0):
28 28 PYTHON3 = True
29 xrange = range # we use xrange in one place, and we'd rather not use range
30 29
31 30 def _sys2bytes(p):
32 31 return p.encode('utf-8')
33 32
34 33
35 34 elif sys.version_info >= (3, 0, 0):
36 35 print(
37 36 '%s is only supported on Python 3.5+ and 2.7, not %s'
38 37 % (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))
39 38 )
40 39 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
41 40 else:
42 41 PYTHON3 = False
43 42
44 43 # In python 2.x, path operations are generally done using
45 44 # bytestrings by default, so we don't have to do any extra
46 45 # fiddling there. We define the wrapper functions anyway just to
47 46 # help keep code consistent between platforms.
48 47 def _sys2bytes(p):
49 48 return p
50 49
51 50
52 51 def getparser():
53 52 """Obtain the argument parser used by the CLI."""
54 53 parser = argparse.ArgumentParser(
55 54 description='Run tests with fsmonitor enabled.',
56 55 epilog='Unrecognized options are passed to run-tests.py.',
57 56 )
58 57 # - keep these sorted
59 58 # - none of these options should conflict with any in run-tests.py
60 59 parser.add_argument(
61 60 '--keep-fsmonitor-tmpdir',
62 61 action='store_true',
63 62 help='keep temporary directory with fsmonitor state',
64 63 )
65 64 parser.add_argument(
66 65 '--watchman',
67 66 help='location of watchman binary (default: watchman in PATH)',
68 67 default='watchman',
69 68 )
70 69
71 70 return parser
72 71
73 72
74 73 @contextlib.contextmanager
75 74 def watchman(args):
76 75 basedir = tempfile.mkdtemp(prefix='hg-fsmonitor')
77 76 try:
78 77 # Much of this configuration is borrowed from Watchman's test harness.
79 78 cfgfile = os.path.join(basedir, 'config.json')
80 79 # TODO: allow setting a config
81 80 with open(cfgfile, 'w') as f:
82 81 f.write(json.dumps({}))
83 82
84 83 logfile = os.path.join(basedir, 'log')
85 84 clilogfile = os.path.join(basedir, 'cli-log')
86 85 if os.name == 'nt':
87 86 sockfile = '\\\\.\\pipe\\watchman-test-%s' % uuid.uuid4().hex
88 87 else:
89 88 sockfile = os.path.join(basedir, 'sock')
90 89 pidfile = os.path.join(basedir, 'pid')
91 90 statefile = os.path.join(basedir, 'state')
92 91
93 92 argv = [
94 93 args.watchman,
95 94 '--sockname',
96 95 sockfile,
97 96 '--logfile',
98 97 logfile,
99 98 '--pidfile',
100 99 pidfile,
101 100 '--statefile',
102 101 statefile,
103 102 '--foreground',
104 103 '--log-level=2', # debug logging for watchman
105 104 ]
106 105
107 106 envb = osenvironb.copy()
108 107 envb[b'WATCHMAN_CONFIG_FILE'] = _sys2bytes(cfgfile)
109 108 with open(clilogfile, 'wb') as f:
110 109 proc = subprocess.Popen(
111 110 argv, env=envb, stdin=None, stdout=f, stderr=f
112 111 )
113 112 try:
114 113 yield sockfile
115 114 finally:
116 115 proc.terminate()
117 116 proc.kill()
118 117 finally:
119 118 if args.keep_fsmonitor_tmpdir:
120 119 print('fsmonitor dir available at %s' % basedir)
121 120 else:
122 121 shutil.rmtree(basedir, ignore_errors=True)
123 122
124 123
125 124 def run():
126 125 parser = getparser()
127 126 args, runtestsargv = parser.parse_known_args()
128 127
129 128 with watchman(args) as sockfile:
130 129 osenvironb[b'WATCHMAN_SOCK'] = _sys2bytes(sockfile)
131 130 # Indicate to hghave that we're running with fsmonitor enabled.
132 131 osenvironb[b'HGFSMONITOR_TESTS'] = b'1'
133 132
134 133 runtestdir = os.path.dirname(__file__)
135 134 runtests = os.path.join(runtestdir, 'run-tests.py')
136 135 blacklist = os.path.join(runtestdir, 'blacklists', 'fsmonitor')
137 136
138 137 runtestsargv.insert(0, runtests)
139 138 runtestsargv.extend(
140 139 [
141 140 '--extra-config',
142 141 'extensions.fsmonitor=',
143 142 # specify fsmonitor.mode=paranoid always in order to force
144 143 # fsmonitor extension execute "paranoid" code path
145 144 #
146 145 # TODO: make fsmonitor-run-tests.py accept specific options
147 146 '--extra-config',
148 147 'fsmonitor.mode=paranoid',
149 148 '--blacklist',
150 149 blacklist,
151 150 ]
152 151 )
153 152
154 153 return subprocess.call(runtestsargv)
155 154
156 155
157 156 if __name__ == '__main__':
158 157 sys.exit(run())
@@ -1,4009 +1,4006
1 1 #!/usr/bin/env python3
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Olivia Mackall <olivia@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46
47 47 import argparse
48 48 import collections
49 49 import contextlib
50 50 import difflib
51 51 import distutils.version as version
52 52 import errno
53 53 import functools
54 54 import json
55 55 import multiprocessing
56 56 import os
57 57 import platform
58 58 import queue
59 59 import random
60 60 import re
61 61 import shlex
62 62 import shutil
63 63 import signal
64 64 import socket
65 65 import subprocess
66 66 import sys
67 67 import sysconfig
68 68 import tempfile
69 69 import threading
70 70 import time
71 71 import unittest
72 72 import uuid
73 73 import xml.dom.minidom as minidom
74 74
75 75 if sys.version_info < (3, 5, 0):
76 76 print(
77 77 '%s is only supported on Python 3.5+, not %s'
78 78 % (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))
79 79 )
80 80 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
81 81
82 82 WINDOWS = os.name == r'nt'
83 83 shellquote = shlex.quote
84 84
85 85
86 86 processlock = threading.Lock()
87 87
88 88 pygmentspresent = False
89 89 try: # is pygments installed
90 90 import pygments
91 91 import pygments.lexers as lexers
92 92 import pygments.lexer as lexer
93 93 import pygments.formatters as formatters
94 94 import pygments.token as token
95 95 import pygments.style as style
96 96
97 97 if WINDOWS:
98 98 hgpath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
99 99 sys.path.append(hgpath)
100 100 try:
101 101 from mercurial import win32 # pytype: disable=import-error
102 102
103 103 # Don't check the result code because it fails on heptapod, but
104 104 # something is able to convert to color anyway.
105 105 win32.enablevtmode()
106 106 finally:
107 107 sys.path = sys.path[:-1]
108 108
109 109 pygmentspresent = True
110 110 difflexer = lexers.DiffLexer()
111 111 terminal256formatter = formatters.Terminal256Formatter()
112 112 except ImportError:
113 113 pass
114 114
115 115 if pygmentspresent:
116 116
117 117 class TestRunnerStyle(style.Style):
118 118 default_style = ""
119 119 skipped = token.string_to_tokentype("Token.Generic.Skipped")
120 120 failed = token.string_to_tokentype("Token.Generic.Failed")
121 121 skippedname = token.string_to_tokentype("Token.Generic.SName")
122 122 failedname = token.string_to_tokentype("Token.Generic.FName")
123 123 styles = {
124 124 skipped: '#e5e5e5',
125 125 skippedname: '#00ffff',
126 126 failed: '#7f0000',
127 127 failedname: '#ff0000',
128 128 }
129 129
130 130 class TestRunnerLexer(lexer.RegexLexer):
131 131 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
132 132 tokens = {
133 133 'root': [
134 134 (r'^Skipped', token.Generic.Skipped, 'skipped'),
135 135 (r'^Failed ', token.Generic.Failed, 'failed'),
136 136 (r'^ERROR: ', token.Generic.Failed, 'failed'),
137 137 ],
138 138 'skipped': [
139 139 (testpattern, token.Generic.SName),
140 140 (r':.*', token.Generic.Skipped),
141 141 ],
142 142 'failed': [
143 143 (testpattern, token.Generic.FName),
144 144 (r'(:| ).*', token.Generic.Failed),
145 145 ],
146 146 }
147 147
148 148 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
149 149 runnerlexer = TestRunnerLexer()
150 150
151 151 origenviron = os.environ.copy()
152 152
153 153
154 xrange = range # we use xrange in one place, and we'd rather not use range
155
156
157 154 def _sys2bytes(p):
158 155 if p is None:
159 156 return p
160 157 return p.encode('utf-8')
161 158
162 159
163 160 def _bytes2sys(p):
164 161 if p is None:
165 162 return p
166 163 return p.decode('utf-8')
167 164
168 165
169 166 osenvironb = getattr(os, 'environb', None)
170 167 if osenvironb is None:
171 168 # Windows lacks os.environb, for instance. A proxy over the real thing
172 169 # instead of a copy allows the environment to be updated via bytes on
173 170 # all platforms.
174 171 class environbytes:
175 172 def __init__(self, strenv):
176 173 self.__len__ = strenv.__len__
177 174 self.clear = strenv.clear
178 175 self._strenv = strenv
179 176
180 177 def __getitem__(self, k):
181 178 v = self._strenv.__getitem__(_bytes2sys(k))
182 179 return _sys2bytes(v)
183 180
184 181 def __setitem__(self, k, v):
185 182 self._strenv.__setitem__(_bytes2sys(k), _bytes2sys(v))
186 183
187 184 def __delitem__(self, k):
188 185 self._strenv.__delitem__(_bytes2sys(k))
189 186
190 187 def __contains__(self, k):
191 188 return self._strenv.__contains__(_bytes2sys(k))
192 189
193 190 def __iter__(self):
194 191 return iter([_sys2bytes(k) for k in iter(self._strenv)])
195 192
196 193 def get(self, k, default=None):
197 194 v = self._strenv.get(_bytes2sys(k), _bytes2sys(default))
198 195 return _sys2bytes(v)
199 196
200 197 def pop(self, k, default=None):
201 198 v = self._strenv.pop(_bytes2sys(k), _bytes2sys(default))
202 199 return _sys2bytes(v)
203 200
204 201 osenvironb = environbytes(os.environ)
205 202
206 203 getcwdb = getattr(os, 'getcwdb')
207 204 if not getcwdb or WINDOWS:
208 205 getcwdb = lambda: _sys2bytes(os.getcwd())
209 206
210 207
211 208 if WINDOWS:
212 209 _getcwdb = getcwdb
213 210
214 211 def getcwdb():
215 212 cwd = _getcwdb()
216 213 if re.match(b'^[a-z]:', cwd):
217 214 # os.getcwd() is inconsistent on the capitalization of the drive
218 215 # letter, so adjust it. see https://bugs.python.org/issue40368
219 216 cwd = cwd[0:1].upper() + cwd[1:]
220 217 return cwd
221 218
222 219
223 220 # For Windows support
224 221 wifexited = getattr(os, "WIFEXITED", lambda x: False)
225 222
226 223 # Whether to use IPv6
227 224 def checksocketfamily(name, port=20058):
228 225 """return true if we can listen on localhost using family=name
229 226
230 227 name should be either 'AF_INET', or 'AF_INET6'.
231 228 port being used is okay - EADDRINUSE is considered as successful.
232 229 """
233 230 family = getattr(socket, name, None)
234 231 if family is None:
235 232 return False
236 233 try:
237 234 s = socket.socket(family, socket.SOCK_STREAM)
238 235 s.bind(('localhost', port))
239 236 s.close()
240 237 return True
241 238 except (socket.error, OSError) as exc:
242 239 if exc.errno == errno.EADDRINUSE:
243 240 return True
244 241 elif exc.errno in (
245 242 errno.EADDRNOTAVAIL,
246 243 errno.EPROTONOSUPPORT,
247 244 errno.EAFNOSUPPORT,
248 245 ):
249 246 return False
250 247 else:
251 248 raise
252 249 else:
253 250 return False
254 251
255 252
256 253 # useipv6 will be set by parseargs
257 254 useipv6 = None
258 255
259 256
260 257 def checkportisavailable(port):
261 258 """return true if a port seems free to bind on localhost"""
262 259 if useipv6:
263 260 family = socket.AF_INET6
264 261 else:
265 262 family = socket.AF_INET
266 263 try:
267 264 with contextlib.closing(socket.socket(family, socket.SOCK_STREAM)) as s:
268 265 s.bind(('localhost', port))
269 266 return True
270 267 except socket.error as exc:
271 268 if WINDOWS and exc.errno == errno.WSAEACCES:
272 269 return False
273 270 # TODO: make a proper exception handler after dropping py2. This
274 271 # works because socket.error is an alias for OSError on py3,
275 272 # which is also the baseclass of PermissionError.
276 273 elif isinstance(exc, PermissionError):
277 274 return False
278 275 if exc.errno not in (
279 276 errno.EADDRINUSE,
280 277 errno.EADDRNOTAVAIL,
281 278 errno.EPROTONOSUPPORT,
282 279 ):
283 280 raise
284 281 return False
285 282
286 283
287 284 closefds = os.name == 'posix'
288 285
289 286
290 287 def Popen4(cmd, wd, timeout, env=None):
291 288 processlock.acquire()
292 289 p = subprocess.Popen(
293 290 _bytes2sys(cmd),
294 291 shell=True,
295 292 bufsize=-1,
296 293 cwd=_bytes2sys(wd),
297 294 env=env,
298 295 close_fds=closefds,
299 296 stdin=subprocess.PIPE,
300 297 stdout=subprocess.PIPE,
301 298 stderr=subprocess.STDOUT,
302 299 )
303 300 processlock.release()
304 301
305 302 p.fromchild = p.stdout
306 303 p.tochild = p.stdin
307 304 p.childerr = p.stderr
308 305
309 306 p.timeout = False
310 307 if timeout:
311 308
312 309 def t():
313 310 start = time.time()
314 311 while time.time() - start < timeout and p.returncode is None:
315 312 time.sleep(0.1)
316 313 p.timeout = True
317 314 vlog('# Timout reached for process %d' % p.pid)
318 315 if p.returncode is None:
319 316 terminate(p)
320 317
321 318 threading.Thread(target=t).start()
322 319
323 320 return p
324 321
325 322
326 323 if sys.executable:
327 324 sysexecutable = sys.executable
328 325 elif os.environ.get('PYTHONEXECUTABLE'):
329 326 sysexecutable = os.environ['PYTHONEXECUTABLE']
330 327 elif os.environ.get('PYTHON'):
331 328 sysexecutable = os.environ['PYTHON']
332 329 else:
333 330 raise AssertionError('Could not find Python interpreter')
334 331
335 332 PYTHON = _sys2bytes(sysexecutable.replace('\\', '/'))
336 333 IMPL_PATH = b'PYTHONPATH'
337 334 if 'java' in sys.platform:
338 335 IMPL_PATH = b'JYTHONPATH'
339 336
340 337 default_defaults = {
341 338 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
342 339 'timeout': ('HGTEST_TIMEOUT', 360),
343 340 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 1500),
344 341 'port': ('HGTEST_PORT', 20059),
345 342 'shell': ('HGTEST_SHELL', 'sh'),
346 343 }
347 344
348 345 defaults = default_defaults.copy()
349 346
350 347
351 348 def canonpath(path):
352 349 return os.path.realpath(os.path.expanduser(path))
353 350
354 351
355 352 def which(exe):
356 353 # shutil.which only accept bytes from 3.8
357 354 cmd = _bytes2sys(exe)
358 355 real_exec = shutil.which(cmd)
359 356 return _sys2bytes(real_exec)
360 357
361 358
362 359 def parselistfiles(files, listtype, warn=True):
363 360 entries = dict()
364 361 for filename in files:
365 362 try:
366 363 path = os.path.expanduser(os.path.expandvars(filename))
367 364 f = open(path, "rb")
368 365 except IOError as err:
369 366 if err.errno != errno.ENOENT:
370 367 raise
371 368 if warn:
372 369 print("warning: no such %s file: %s" % (listtype, filename))
373 370 continue
374 371
375 372 for line in f.readlines():
376 373 line = line.split(b'#', 1)[0].strip()
377 374 if line:
378 375 # Ensure path entries are compatible with os.path.relpath()
379 376 entries[os.path.normpath(line)] = filename
380 377
381 378 f.close()
382 379 return entries
383 380
384 381
385 382 def parsettestcases(path):
386 383 """read a .t test file, return a set of test case names
387 384
388 385 If path does not exist, return an empty set.
389 386 """
390 387 cases = []
391 388 try:
392 389 with open(path, 'rb') as f:
393 390 for l in f:
394 391 if l.startswith(b'#testcases '):
395 392 cases.append(sorted(l[11:].split()))
396 393 except IOError as ex:
397 394 if ex.errno != errno.ENOENT:
398 395 raise
399 396 return cases
400 397
401 398
402 399 def getparser():
403 400 """Obtain the OptionParser used by the CLI."""
404 401 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
405 402
406 403 selection = parser.add_argument_group('Test Selection')
407 404 selection.add_argument(
408 405 '--allow-slow-tests',
409 406 action='store_true',
410 407 help='allow extremely slow tests',
411 408 )
412 409 selection.add_argument(
413 410 "--blacklist",
414 411 action="append",
415 412 help="skip tests listed in the specified blacklist file",
416 413 )
417 414 selection.add_argument(
418 415 "--changed",
419 416 help="run tests that are changed in parent rev or working directory",
420 417 )
421 418 selection.add_argument(
422 419 "-k", "--keywords", help="run tests matching keywords"
423 420 )
424 421 selection.add_argument(
425 422 "-r", "--retest", action="store_true", help="retest failed tests"
426 423 )
427 424 selection.add_argument(
428 425 "--test-list",
429 426 action="append",
430 427 help="read tests to run from the specified file",
431 428 )
432 429 selection.add_argument(
433 430 "--whitelist",
434 431 action="append",
435 432 help="always run tests listed in the specified whitelist file",
436 433 )
437 434 selection.add_argument(
438 435 'tests', metavar='TESTS', nargs='*', help='Tests to run'
439 436 )
440 437
441 438 harness = parser.add_argument_group('Test Harness Behavior')
442 439 harness.add_argument(
443 440 '--bisect-repo',
444 441 metavar='bisect_repo',
445 442 help=(
446 443 "Path of a repo to bisect. Use together with " "--known-good-rev"
447 444 ),
448 445 )
449 446 harness.add_argument(
450 447 "-d",
451 448 "--debug",
452 449 action="store_true",
453 450 help="debug mode: write output of test scripts to console"
454 451 " rather than capturing and diffing it (disables timeout)",
455 452 )
456 453 harness.add_argument(
457 454 "-f",
458 455 "--first",
459 456 action="store_true",
460 457 help="exit on the first test failure",
461 458 )
462 459 harness.add_argument(
463 460 "-i",
464 461 "--interactive",
465 462 action="store_true",
466 463 help="prompt to accept changed output",
467 464 )
468 465 harness.add_argument(
469 466 "-j",
470 467 "--jobs",
471 468 type=int,
472 469 help="number of jobs to run in parallel"
473 470 " (default: $%s or %d)" % defaults['jobs'],
474 471 )
475 472 harness.add_argument(
476 473 "--keep-tmpdir",
477 474 action="store_true",
478 475 help="keep temporary directory after running tests",
479 476 )
480 477 harness.add_argument(
481 478 '--known-good-rev',
482 479 metavar="known_good_rev",
483 480 help=(
484 481 "Automatically bisect any failures using this "
485 482 "revision as a known-good revision."
486 483 ),
487 484 )
488 485 harness.add_argument(
489 486 "--list-tests",
490 487 action="store_true",
491 488 help="list tests instead of running them",
492 489 )
493 490 harness.add_argument(
494 491 "--loop", action="store_true", help="loop tests repeatedly"
495 492 )
496 493 harness.add_argument(
497 494 '--random', action="store_true", help='run tests in random order'
498 495 )
499 496 harness.add_argument(
500 497 '--order-by-runtime',
501 498 action="store_true",
502 499 help='run slowest tests first, according to .testtimes',
503 500 )
504 501 harness.add_argument(
505 502 "-p",
506 503 "--port",
507 504 type=int,
508 505 help="port on which servers should listen"
509 506 " (default: $%s or %d)" % defaults['port'],
510 507 )
511 508 harness.add_argument(
512 509 '--profile-runner',
513 510 action='store_true',
514 511 help='run statprof on run-tests',
515 512 )
516 513 harness.add_argument(
517 514 "-R", "--restart", action="store_true", help="restart at last error"
518 515 )
519 516 harness.add_argument(
520 517 "--runs-per-test",
521 518 type=int,
522 519 dest="runs_per_test",
523 520 help="run each test N times (default=1)",
524 521 default=1,
525 522 )
526 523 harness.add_argument(
527 524 "--shell", help="shell to use (default: $%s or %s)" % defaults['shell']
528 525 )
529 526 harness.add_argument(
530 527 '--showchannels', action='store_true', help='show scheduling channels'
531 528 )
532 529 harness.add_argument(
533 530 "--slowtimeout",
534 531 type=int,
535 532 help="kill errant slow tests after SLOWTIMEOUT seconds"
536 533 " (default: $%s or %d)" % defaults['slowtimeout'],
537 534 )
538 535 harness.add_argument(
539 536 "-t",
540 537 "--timeout",
541 538 type=int,
542 539 help="kill errant tests after TIMEOUT seconds"
543 540 " (default: $%s or %d)" % defaults['timeout'],
544 541 )
545 542 harness.add_argument(
546 543 "--tmpdir",
547 544 help="run tests in the given temporary directory"
548 545 " (implies --keep-tmpdir)",
549 546 )
550 547 harness.add_argument(
551 548 "-v", "--verbose", action="store_true", help="output verbose messages"
552 549 )
553 550
554 551 hgconf = parser.add_argument_group('Mercurial Configuration')
555 552 hgconf.add_argument(
556 553 "--chg",
557 554 action="store_true",
558 555 help="install and use chg wrapper in place of hg",
559 556 )
560 557 hgconf.add_argument(
561 558 "--chg-debug",
562 559 action="store_true",
563 560 help="show chg debug logs",
564 561 )
565 562 hgconf.add_argument(
566 563 "--rhg",
567 564 action="store_true",
568 565 help="install and use rhg Rust implementation in place of hg",
569 566 )
570 567 hgconf.add_argument(
571 568 "--pyoxidized",
572 569 action="store_true",
573 570 help="build the hg binary using pyoxidizer",
574 571 )
575 572 hgconf.add_argument("--compiler", help="compiler to build with")
576 573 hgconf.add_argument(
577 574 '--extra-config-opt',
578 575 action="append",
579 576 default=[],
580 577 help='set the given config opt in the test hgrc',
581 578 )
582 579 hgconf.add_argument(
583 580 "-l",
584 581 "--local",
585 582 action="store_true",
586 583 help="shortcut for --with-hg=<testdir>/../hg, "
587 584 "--with-rhg=<testdir>/../rust/target/release/rhg if --rhg is set, "
588 585 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set",
589 586 )
590 587 hgconf.add_argument(
591 588 "--ipv6",
592 589 action="store_true",
593 590 help="prefer IPv6 to IPv4 for network related tests",
594 591 )
595 592 hgconf.add_argument(
596 593 "--pure",
597 594 action="store_true",
598 595 help="use pure Python code instead of C extensions",
599 596 )
600 597 hgconf.add_argument(
601 598 "--rust",
602 599 action="store_true",
603 600 help="use Rust code alongside C extensions",
604 601 )
605 602 hgconf.add_argument(
606 603 "--no-rust",
607 604 action="store_true",
608 605 help="do not use Rust code even if compiled",
609 606 )
610 607 hgconf.add_argument(
611 608 "--with-chg",
612 609 metavar="CHG",
613 610 help="use specified chg wrapper in place of hg",
614 611 )
615 612 hgconf.add_argument(
616 613 "--with-rhg",
617 614 metavar="RHG",
618 615 help="use specified rhg Rust implementation in place of hg",
619 616 )
620 617 hgconf.add_argument(
621 618 "--with-hg",
622 619 metavar="HG",
623 620 help="test using specified hg script rather than a "
624 621 "temporary installation",
625 622 )
626 623
627 624 reporting = parser.add_argument_group('Results Reporting')
628 625 reporting.add_argument(
629 626 "-C",
630 627 "--annotate",
631 628 action="store_true",
632 629 help="output files annotated with coverage",
633 630 )
634 631 reporting.add_argument(
635 632 "--color",
636 633 choices=["always", "auto", "never"],
637 634 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
638 635 help="colorisation: always|auto|never (default: auto)",
639 636 )
640 637 reporting.add_argument(
641 638 "-c",
642 639 "--cover",
643 640 action="store_true",
644 641 help="print a test coverage report",
645 642 )
646 643 reporting.add_argument(
647 644 '--exceptions',
648 645 action='store_true',
649 646 help='log all exceptions and generate an exception report',
650 647 )
651 648 reporting.add_argument(
652 649 "-H",
653 650 "--htmlcov",
654 651 action="store_true",
655 652 help="create an HTML report of the coverage of the files",
656 653 )
657 654 reporting.add_argument(
658 655 "--json",
659 656 action="store_true",
660 657 help="store test result data in 'report.json' file",
661 658 )
662 659 reporting.add_argument(
663 660 "--outputdir",
664 661 help="directory to write error logs to (default=test directory)",
665 662 )
666 663 reporting.add_argument(
667 664 "-n", "--nodiff", action="store_true", help="skip showing test changes"
668 665 )
669 666 reporting.add_argument(
670 667 "-S",
671 668 "--noskips",
672 669 action="store_true",
673 670 help="don't report skip tests verbosely",
674 671 )
675 672 reporting.add_argument(
676 673 "--time", action="store_true", help="time how long each test takes"
677 674 )
678 675 reporting.add_argument("--view", help="external diff viewer")
679 676 reporting.add_argument(
680 677 "--xunit", help="record xunit results at specified path"
681 678 )
682 679
683 680 for option, (envvar, default) in defaults.items():
684 681 defaults[option] = type(default)(os.environ.get(envvar, default))
685 682 parser.set_defaults(**defaults)
686 683
687 684 return parser
688 685
689 686
690 687 def parseargs(args, parser):
691 688 """Parse arguments with our OptionParser and validate results."""
692 689 options = parser.parse_args(args)
693 690
694 691 # jython is always pure
695 692 if 'java' in sys.platform or '__pypy__' in sys.modules:
696 693 options.pure = True
697 694
698 695 if platform.python_implementation() != 'CPython' and options.rust:
699 696 parser.error('Rust extensions are only available with CPython')
700 697
701 698 if options.pure and options.rust:
702 699 parser.error('--rust cannot be used with --pure')
703 700
704 701 if options.rust and options.no_rust:
705 702 parser.error('--rust cannot be used with --no-rust')
706 703
707 704 if options.local:
708 705 if options.with_hg or options.with_rhg or options.with_chg:
709 706 parser.error(
710 707 '--local cannot be used with --with-hg or --with-rhg or --with-chg'
711 708 )
712 709 if options.pyoxidized:
713 710 parser.error('--pyoxidized does not work with --local (yet)')
714 711 testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
715 712 reporootdir = os.path.dirname(testdir)
716 713 pathandattrs = [(b'hg', 'with_hg')]
717 714 if options.chg:
718 715 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
719 716 if options.rhg:
720 717 pathandattrs.append((b'rust/target/release/rhg', 'with_rhg'))
721 718 for relpath, attr in pathandattrs:
722 719 binpath = os.path.join(reporootdir, relpath)
723 720 if not (WINDOWS or os.access(binpath, os.X_OK)):
724 721 parser.error(
725 722 '--local specified, but %r not found or '
726 723 'not executable' % binpath
727 724 )
728 725 setattr(options, attr, _bytes2sys(binpath))
729 726
730 727 if options.with_hg:
731 728 options.with_hg = canonpath(_sys2bytes(options.with_hg))
732 729 if not (
733 730 os.path.isfile(options.with_hg)
734 731 and os.access(options.with_hg, os.X_OK)
735 732 ):
736 733 parser.error('--with-hg must specify an executable hg script')
737 734 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
738 735 msg = 'warning: --with-hg should specify an hg script, not: %s\n'
739 736 msg %= _bytes2sys(os.path.basename(options.with_hg))
740 737 sys.stderr.write(msg)
741 738 sys.stderr.flush()
742 739
743 740 if (options.chg or options.with_chg) and WINDOWS:
744 741 parser.error('chg does not work on %s' % os.name)
745 742 if (options.rhg or options.with_rhg) and WINDOWS:
746 743 parser.error('rhg does not work on %s' % os.name)
747 744 if options.pyoxidized and not WINDOWS:
748 745 parser.error('--pyoxidized is currently Windows only')
749 746 if options.with_chg:
750 747 options.chg = False # no installation to temporary location
751 748 options.with_chg = canonpath(_sys2bytes(options.with_chg))
752 749 if not (
753 750 os.path.isfile(options.with_chg)
754 751 and os.access(options.with_chg, os.X_OK)
755 752 ):
756 753 parser.error('--with-chg must specify a chg executable')
757 754 if options.with_rhg:
758 755 options.rhg = False # no installation to temporary location
759 756 options.with_rhg = canonpath(_sys2bytes(options.with_rhg))
760 757 if not (
761 758 os.path.isfile(options.with_rhg)
762 759 and os.access(options.with_rhg, os.X_OK)
763 760 ):
764 761 parser.error('--with-rhg must specify a rhg executable')
765 762 if options.chg and options.with_hg:
766 763 # chg shares installation location with hg
767 764 parser.error(
768 765 '--chg does not work when --with-hg is specified '
769 766 '(use --with-chg instead)'
770 767 )
771 768 if options.rhg and options.with_hg:
772 769 # rhg shares installation location with hg
773 770 parser.error(
774 771 '--rhg does not work when --with-hg is specified '
775 772 '(use --with-rhg instead)'
776 773 )
777 774 if options.rhg and options.chg:
778 775 parser.error('--rhg and --chg do not work together')
779 776
780 777 if options.color == 'always' and not pygmentspresent:
781 778 sys.stderr.write(
782 779 'warning: --color=always ignored because '
783 780 'pygments is not installed\n'
784 781 )
785 782
786 783 if options.bisect_repo and not options.known_good_rev:
787 784 parser.error("--bisect-repo cannot be used without --known-good-rev")
788 785
789 786 global useipv6
790 787 if options.ipv6:
791 788 useipv6 = checksocketfamily('AF_INET6')
792 789 else:
793 790 # only use IPv6 if IPv4 is unavailable and IPv6 is available
794 791 useipv6 = (not checksocketfamily('AF_INET')) and checksocketfamily(
795 792 'AF_INET6'
796 793 )
797 794
798 795 options.anycoverage = options.cover or options.annotate or options.htmlcov
799 796 if options.anycoverage:
800 797 try:
801 798 import coverage
802 799
803 800 covver = version.StrictVersion(coverage.__version__).version
804 801 if covver < (3, 3):
805 802 parser.error('coverage options require coverage 3.3 or later')
806 803 except ImportError:
807 804 parser.error('coverage options now require the coverage package')
808 805
809 806 if options.anycoverage and options.local:
810 807 # this needs some path mangling somewhere, I guess
811 808 parser.error(
812 809 "sorry, coverage options do not work when --local " "is specified"
813 810 )
814 811
815 812 if options.anycoverage and options.with_hg:
816 813 parser.error(
817 814 "sorry, coverage options do not work when --with-hg " "is specified"
818 815 )
819 816
820 817 global verbose
821 818 if options.verbose:
822 819 verbose = ''
823 820
824 821 if options.tmpdir:
825 822 options.tmpdir = canonpath(options.tmpdir)
826 823
827 824 if options.jobs < 1:
828 825 parser.error('--jobs must be positive')
829 826 if options.interactive and options.debug:
830 827 parser.error("-i/--interactive and -d/--debug are incompatible")
831 828 if options.debug:
832 829 if options.timeout != defaults['timeout']:
833 830 sys.stderr.write('warning: --timeout option ignored with --debug\n')
834 831 if options.slowtimeout != defaults['slowtimeout']:
835 832 sys.stderr.write(
836 833 'warning: --slowtimeout option ignored with --debug\n'
837 834 )
838 835 options.timeout = 0
839 836 options.slowtimeout = 0
840 837
841 838 if options.blacklist:
842 839 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
843 840 if options.whitelist:
844 841 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
845 842 else:
846 843 options.whitelisted = {}
847 844
848 845 if options.showchannels:
849 846 options.nodiff = True
850 847
851 848 return options
852 849
853 850
854 851 def rename(src, dst):
855 852 """Like os.rename(), trade atomicity and opened files friendliness
856 853 for existing destination support.
857 854 """
858 855 shutil.copy(src, dst)
859 856 os.remove(src)
860 857
861 858
862 859 def makecleanable(path):
863 860 """Try to fix directory permission recursively so that the entire tree
864 861 can be deleted"""
865 862 for dirpath, dirnames, _filenames in os.walk(path, topdown=True):
866 863 for d in dirnames:
867 864 p = os.path.join(dirpath, d)
868 865 try:
869 866 os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx
870 867 except OSError:
871 868 pass
872 869
873 870
874 871 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
875 872
876 873
877 874 def getdiff(expected, output, ref, err):
878 875 servefail = False
879 876 lines = []
880 877 for line in _unified_diff(expected, output, ref, err):
881 878 if line.startswith(b'+++') or line.startswith(b'---'):
882 879 line = line.replace(b'\\', b'/')
883 880 if line.endswith(b' \n'):
884 881 line = line[:-2] + b'\n'
885 882 lines.append(line)
886 883 if not servefail and line.startswith(
887 884 b'+ abort: child process failed to start'
888 885 ):
889 886 servefail = True
890 887
891 888 return servefail, lines
892 889
893 890
894 891 verbose = False
895 892
896 893
897 894 def vlog(*msg):
898 895 """Log only when in verbose mode."""
899 896 if verbose is False:
900 897 return
901 898
902 899 return log(*msg)
903 900
904 901
905 902 # Bytes that break XML even in a CDATA block: control characters 0-31
906 903 # sans \t, \n and \r
907 904 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
908 905
909 906 # Match feature conditionalized output lines in the form, capturing the feature
910 907 # list in group 2, and the preceeding line output in group 1:
911 908 #
912 909 # output..output (feature !)\n
913 910 optline = re.compile(br'(.*) \((.+?) !\)\n$')
914 911
915 912
916 913 def cdatasafe(data):
917 914 """Make a string safe to include in a CDATA block.
918 915
919 916 Certain control characters are illegal in a CDATA block, and
920 917 there's no way to include a ]]> in a CDATA either. This function
921 918 replaces illegal bytes with ? and adds a space between the ]] so
922 919 that it won't break the CDATA block.
923 920 """
924 921 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
925 922
926 923
927 924 def log(*msg):
928 925 """Log something to stdout.
929 926
930 927 Arguments are strings to print.
931 928 """
932 929 with iolock:
933 930 if verbose:
934 931 print(verbose, end=' ')
935 932 for m in msg:
936 933 print(m, end=' ')
937 934 print()
938 935 sys.stdout.flush()
939 936
940 937
941 938 def highlightdiff(line, color):
942 939 if not color:
943 940 return line
944 941 assert pygmentspresent
945 942 return pygments.highlight(
946 943 line.decode('latin1'), difflexer, terminal256formatter
947 944 ).encode('latin1')
948 945
949 946
950 947 def highlightmsg(msg, color):
951 948 if not color:
952 949 return msg
953 950 assert pygmentspresent
954 951 return pygments.highlight(msg, runnerlexer, runnerformatter)
955 952
956 953
957 954 def terminate(proc):
958 955 """Terminate subprocess"""
959 956 vlog('# Terminating process %d' % proc.pid)
960 957 try:
961 958 proc.terminate()
962 959 except OSError:
963 960 pass
964 961
965 962
966 963 def killdaemons(pidfile):
967 964 import killdaemons as killmod
968 965
969 966 return killmod.killdaemons(pidfile, tryhard=False, remove=True, logfn=vlog)
970 967
971 968
972 969 # sysconfig is not thread-safe (https://github.com/python/cpython/issues/92452)
973 970 sysconfiglock = threading.Lock()
974 971
975 972
976 973 class Test(unittest.TestCase):
977 974 """Encapsulates a single, runnable test.
978 975
979 976 While this class conforms to the unittest.TestCase API, it differs in that
980 977 instances need to be instantiated manually. (Typically, unittest.TestCase
981 978 classes are instantiated automatically by scanning modules.)
982 979 """
983 980
984 981 # Status code reserved for skipped tests (used by hghave).
985 982 SKIPPED_STATUS = 80
986 983
987 984 def __init__(
988 985 self,
989 986 path,
990 987 outputdir,
991 988 tmpdir,
992 989 keeptmpdir=False,
993 990 debug=False,
994 991 first=False,
995 992 timeout=None,
996 993 startport=None,
997 994 extraconfigopts=None,
998 995 shell=None,
999 996 hgcommand=None,
1000 997 slowtimeout=None,
1001 998 usechg=False,
1002 999 chgdebug=False,
1003 1000 useipv6=False,
1004 1001 ):
1005 1002 """Create a test from parameters.
1006 1003
1007 1004 path is the full path to the file defining the test.
1008 1005
1009 1006 tmpdir is the main temporary directory to use for this test.
1010 1007
1011 1008 keeptmpdir determines whether to keep the test's temporary directory
1012 1009 after execution. It defaults to removal (False).
1013 1010
1014 1011 debug mode will make the test execute verbosely, with unfiltered
1015 1012 output.
1016 1013
1017 1014 timeout controls the maximum run time of the test. It is ignored when
1018 1015 debug is True. See slowtimeout for tests with #require slow.
1019 1016
1020 1017 slowtimeout overrides timeout if the test has #require slow.
1021 1018
1022 1019 startport controls the starting port number to use for this test. Each
1023 1020 test will reserve 3 port numbers for execution. It is the caller's
1024 1021 responsibility to allocate a non-overlapping port range to Test
1025 1022 instances.
1026 1023
1027 1024 extraconfigopts is an iterable of extra hgrc config options. Values
1028 1025 must have the form "key=value" (something understood by hgrc). Values
1029 1026 of the form "foo.key=value" will result in "[foo] key=value".
1030 1027
1031 1028 shell is the shell to execute tests in.
1032 1029 """
1033 1030 if timeout is None:
1034 1031 timeout = defaults['timeout']
1035 1032 if startport is None:
1036 1033 startport = defaults['port']
1037 1034 if slowtimeout is None:
1038 1035 slowtimeout = defaults['slowtimeout']
1039 1036 self.path = path
1040 1037 self.relpath = os.path.relpath(path)
1041 1038 self.bname = os.path.basename(path)
1042 1039 self.name = _bytes2sys(self.bname)
1043 1040 self._testdir = os.path.dirname(path)
1044 1041 self._outputdir = outputdir
1045 1042 self._tmpname = os.path.basename(path)
1046 1043 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
1047 1044
1048 1045 self._threadtmp = tmpdir
1049 1046 self._keeptmpdir = keeptmpdir
1050 1047 self._debug = debug
1051 1048 self._first = first
1052 1049 self._timeout = timeout
1053 1050 self._slowtimeout = slowtimeout
1054 1051 self._startport = startport
1055 1052 self._extraconfigopts = extraconfigopts or []
1056 1053 self._shell = _sys2bytes(shell)
1057 1054 self._hgcommand = hgcommand or b'hg'
1058 1055 self._usechg = usechg
1059 1056 self._chgdebug = chgdebug
1060 1057 self._useipv6 = useipv6
1061 1058
1062 1059 self._aborted = False
1063 1060 self._daemonpids = []
1064 1061 self._finished = None
1065 1062 self._ret = None
1066 1063 self._out = None
1067 1064 self._skipped = None
1068 1065 self._testtmp = None
1069 1066 self._chgsockdir = None
1070 1067
1071 1068 self._refout = self.readrefout()
1072 1069
1073 1070 def readrefout(self):
1074 1071 """read reference output"""
1075 1072 # If we're not in --debug mode and reference output file exists,
1076 1073 # check test output against it.
1077 1074 if self._debug:
1078 1075 return None # to match "out is None"
1079 1076 elif os.path.exists(self.refpath):
1080 1077 with open(self.refpath, 'rb') as f:
1081 1078 return f.read().splitlines(True)
1082 1079 else:
1083 1080 return []
1084 1081
1085 1082 # needed to get base class __repr__ running
1086 1083 @property
1087 1084 def _testMethodName(self):
1088 1085 return self.name
1089 1086
1090 1087 def __str__(self):
1091 1088 return self.name
1092 1089
1093 1090 def shortDescription(self):
1094 1091 return self.name
1095 1092
1096 1093 def setUp(self):
1097 1094 """Tasks to perform before run()."""
1098 1095 self._finished = False
1099 1096 self._ret = None
1100 1097 self._out = None
1101 1098 self._skipped = None
1102 1099
1103 1100 try:
1104 1101 os.mkdir(self._threadtmp)
1105 1102 except OSError as e:
1106 1103 if e.errno != errno.EEXIST:
1107 1104 raise
1108 1105
1109 1106 name = self._tmpname
1110 1107 self._testtmp = os.path.join(self._threadtmp, name)
1111 1108 os.mkdir(self._testtmp)
1112 1109
1113 1110 # Remove any previous output files.
1114 1111 if os.path.exists(self.errpath):
1115 1112 try:
1116 1113 os.remove(self.errpath)
1117 1114 except OSError as e:
1118 1115 # We might have raced another test to clean up a .err
1119 1116 # file, so ignore ENOENT when removing a previous .err
1120 1117 # file.
1121 1118 if e.errno != errno.ENOENT:
1122 1119 raise
1123 1120
1124 1121 if self._usechg:
1125 1122 self._chgsockdir = os.path.join(
1126 1123 self._threadtmp, b'%s.chgsock' % name
1127 1124 )
1128 1125 os.mkdir(self._chgsockdir)
1129 1126
1130 1127 def run(self, result):
1131 1128 """Run this test and report results against a TestResult instance."""
1132 1129 # This function is extremely similar to unittest.TestCase.run(). Once
1133 1130 # we require Python 2.7 (or at least its version of unittest), this
1134 1131 # function can largely go away.
1135 1132 self._result = result
1136 1133 result.startTest(self)
1137 1134 try:
1138 1135 try:
1139 1136 self.setUp()
1140 1137 except (KeyboardInterrupt, SystemExit):
1141 1138 self._aborted = True
1142 1139 raise
1143 1140 except Exception:
1144 1141 result.addError(self, sys.exc_info())
1145 1142 return
1146 1143
1147 1144 success = False
1148 1145 try:
1149 1146 self.runTest()
1150 1147 except KeyboardInterrupt:
1151 1148 self._aborted = True
1152 1149 raise
1153 1150 except unittest.SkipTest as e:
1154 1151 result.addSkip(self, str(e))
1155 1152 # The base class will have already counted this as a
1156 1153 # test we "ran", but we want to exclude skipped tests
1157 1154 # from those we count towards those run.
1158 1155 result.testsRun -= 1
1159 1156 except self.failureException as e:
1160 1157 # This differs from unittest in that we don't capture
1161 1158 # the stack trace. This is for historical reasons and
1162 1159 # this decision could be revisited in the future,
1163 1160 # especially for PythonTest instances.
1164 1161 if result.addFailure(self, str(e)):
1165 1162 success = True
1166 1163 except Exception:
1167 1164 result.addError(self, sys.exc_info())
1168 1165 else:
1169 1166 success = True
1170 1167
1171 1168 try:
1172 1169 self.tearDown()
1173 1170 except (KeyboardInterrupt, SystemExit):
1174 1171 self._aborted = True
1175 1172 raise
1176 1173 except Exception:
1177 1174 result.addError(self, sys.exc_info())
1178 1175 success = False
1179 1176
1180 1177 if success:
1181 1178 result.addSuccess(self)
1182 1179 finally:
1183 1180 result.stopTest(self, interrupted=self._aborted)
1184 1181
1185 1182 def runTest(self):
1186 1183 """Run this test instance.
1187 1184
1188 1185 This will return a tuple describing the result of the test.
1189 1186 """
1190 1187 env = self._getenv()
1191 1188 self._genrestoreenv(env)
1192 1189 self._daemonpids.append(env['DAEMON_PIDS'])
1193 1190 self._createhgrc(env['HGRCPATH'])
1194 1191
1195 1192 vlog('# Test', self.name)
1196 1193
1197 1194 ret, out = self._run(env)
1198 1195 self._finished = True
1199 1196 self._ret = ret
1200 1197 self._out = out
1201 1198
1202 1199 def describe(ret):
1203 1200 if ret < 0:
1204 1201 return 'killed by signal: %d' % -ret
1205 1202 return 'returned error code %d' % ret
1206 1203
1207 1204 self._skipped = False
1208 1205
1209 1206 if ret == self.SKIPPED_STATUS:
1210 1207 if out is None: # Debug mode, nothing to parse.
1211 1208 missing = ['unknown']
1212 1209 failed = None
1213 1210 else:
1214 1211 missing, failed = TTest.parsehghaveoutput(out)
1215 1212
1216 1213 if not missing:
1217 1214 missing = ['skipped']
1218 1215
1219 1216 if failed:
1220 1217 self.fail('hg have failed checking for %s' % failed[-1])
1221 1218 else:
1222 1219 self._skipped = True
1223 1220 raise unittest.SkipTest(missing[-1])
1224 1221 elif ret == 'timeout':
1225 1222 self.fail('timed out')
1226 1223 elif ret is False:
1227 1224 self.fail('no result code from test')
1228 1225 elif out != self._refout:
1229 1226 # Diff generation may rely on written .err file.
1230 1227 if (
1231 1228 (ret != 0 or out != self._refout)
1232 1229 and not self._skipped
1233 1230 and not self._debug
1234 1231 ):
1235 1232 with open(self.errpath, 'wb') as f:
1236 1233 for line in out:
1237 1234 f.write(line)
1238 1235
1239 1236 # The result object handles diff calculation for us.
1240 1237 with firstlock:
1241 1238 if self._result.addOutputMismatch(self, ret, out, self._refout):
1242 1239 # change was accepted, skip failing
1243 1240 return
1244 1241 if self._first:
1245 1242 global firsterror
1246 1243 firsterror = True
1247 1244
1248 1245 if ret:
1249 1246 msg = 'output changed and ' + describe(ret)
1250 1247 else:
1251 1248 msg = 'output changed'
1252 1249
1253 1250 self.fail(msg)
1254 1251 elif ret:
1255 1252 self.fail(describe(ret))
1256 1253
1257 1254 def tearDown(self):
1258 1255 """Tasks to perform after run()."""
1259 1256 for entry in self._daemonpids:
1260 1257 killdaemons(entry)
1261 1258 self._daemonpids = []
1262 1259
1263 1260 if self._keeptmpdir:
1264 1261 log(
1265 1262 '\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s'
1266 1263 % (
1267 1264 _bytes2sys(self._testtmp),
1268 1265 _bytes2sys(self._threadtmp),
1269 1266 )
1270 1267 )
1271 1268 else:
1272 1269 try:
1273 1270 shutil.rmtree(self._testtmp)
1274 1271 except OSError:
1275 1272 # unreadable directory may be left in $TESTTMP; fix permission
1276 1273 # and try again
1277 1274 makecleanable(self._testtmp)
1278 1275 shutil.rmtree(self._testtmp, True)
1279 1276 shutil.rmtree(self._threadtmp, True)
1280 1277
1281 1278 if self._usechg:
1282 1279 # chgservers will stop automatically after they find the socket
1283 1280 # files are deleted
1284 1281 shutil.rmtree(self._chgsockdir, True)
1285 1282
1286 1283 if (
1287 1284 (self._ret != 0 or self._out != self._refout)
1288 1285 and not self._skipped
1289 1286 and not self._debug
1290 1287 and self._out
1291 1288 ):
1292 1289 with open(self.errpath, 'wb') as f:
1293 1290 for line in self._out:
1294 1291 f.write(line)
1295 1292
1296 1293 vlog("# Ret was:", self._ret, '(%s)' % self.name)
1297 1294
1298 1295 def _run(self, env):
1299 1296 # This should be implemented in child classes to run tests.
1300 1297 raise unittest.SkipTest('unknown test type')
1301 1298
1302 1299 def abort(self):
1303 1300 """Terminate execution of this test."""
1304 1301 self._aborted = True
1305 1302
1306 1303 def _portmap(self, i):
1307 1304 offset = b'' if i == 0 else b'%d' % i
1308 1305 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
1309 1306
1310 1307 def _getreplacements(self):
1311 1308 """Obtain a mapping of text replacements to apply to test output.
1312 1309
1313 1310 Test output needs to be normalized so it can be compared to expected
1314 1311 output. This function defines how some of that normalization will
1315 1312 occur.
1316 1313 """
1317 1314 r = [
1318 1315 # This list should be parallel to defineport in _getenv
1319 1316 self._portmap(0),
1320 1317 self._portmap(1),
1321 1318 self._portmap(2),
1322 1319 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
1323 1320 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
1324 1321 ]
1325 1322 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
1326 1323 if WINDOWS:
1327 1324 # JSON output escapes backslashes in Windows paths, so also catch a
1328 1325 # double-escape.
1329 1326 replaced = self._testtmp.replace(b'\\', br'\\')
1330 1327 r.append((self._escapepath(replaced), b'$STR_REPR_TESTTMP'))
1331 1328
1332 1329 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1333 1330
1334 1331 if os.path.exists(replacementfile):
1335 1332 data = {}
1336 1333 with open(replacementfile, mode='rb') as source:
1337 1334 # the intermediate 'compile' step help with debugging
1338 1335 code = compile(source.read(), replacementfile, 'exec')
1339 1336 exec(code, data)
1340 1337 for value in data.get('substitutions', ()):
1341 1338 if len(value) != 2:
1342 1339 msg = 'malformatted substitution in %s: %r'
1343 1340 msg %= (replacementfile, value)
1344 1341 raise ValueError(msg)
1345 1342 r.append(value)
1346 1343 return r
1347 1344
1348 1345 def _escapepath(self, p):
1349 1346 if WINDOWS:
1350 1347 return b''.join(
1351 1348 c.isalpha()
1352 1349 and b'[%s%s]' % (c.lower(), c.upper())
1353 1350 or c in b'/\\'
1354 1351 and br'[/\\]'
1355 1352 or c.isdigit()
1356 1353 and c
1357 1354 or b'\\' + c
1358 1355 for c in [p[i : i + 1] for i in range(len(p))]
1359 1356 )
1360 1357 else:
1361 1358 return re.escape(p)
1362 1359
1363 1360 def _localip(self):
1364 1361 if self._useipv6:
1365 1362 return b'::1'
1366 1363 else:
1367 1364 return b'127.0.0.1'
1368 1365
1369 1366 def _genrestoreenv(self, testenv):
1370 1367 """Generate a script that can be used by tests to restore the original
1371 1368 environment."""
1372 1369 # Put the restoreenv script inside self._threadtmp
1373 1370 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1374 1371 testenv['HGTEST_RESTOREENV'] = _bytes2sys(scriptpath)
1375 1372
1376 1373 # Only restore environment variable names that the shell allows
1377 1374 # us to export.
1378 1375 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1379 1376
1380 1377 # Do not restore these variables; otherwise tests would fail.
1381 1378 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1382 1379
1383 1380 with open(scriptpath, 'w') as envf:
1384 1381 for name, value in origenviron.items():
1385 1382 if not name_regex.match(name):
1386 1383 # Skip environment variables with unusual names not
1387 1384 # allowed by most shells.
1388 1385 continue
1389 1386 if name in reqnames:
1390 1387 continue
1391 1388 envf.write('%s=%s\n' % (name, shellquote(value)))
1392 1389
1393 1390 for name in testenv:
1394 1391 if name in origenviron or name in reqnames:
1395 1392 continue
1396 1393 envf.write('unset %s\n' % (name,))
1397 1394
1398 1395 def _getenv(self):
1399 1396 """Obtain environment variables to use during test execution."""
1400 1397
1401 1398 def defineport(i):
1402 1399 offset = '' if i == 0 else '%s' % i
1403 1400 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1404 1401
1405 1402 env = os.environ.copy()
1406 1403 with sysconfiglock:
1407 1404 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1408 1405 env['HGEMITWARNINGS'] = '1'
1409 1406 env['TESTTMP'] = _bytes2sys(self._testtmp)
1410 1407 # the FORWARD_SLASH version is useful when running `sh` on non unix
1411 1408 # system (e.g. Windows)
1412 1409 env['TESTTMP_FORWARD_SLASH'] = env['TESTTMP'].replace(os.sep, '/')
1413 1410 uid_file = os.path.join(_bytes2sys(self._testtmp), 'UID')
1414 1411 env['HGTEST_UUIDFILE'] = uid_file
1415 1412 env['TESTNAME'] = self.name
1416 1413 env['HOME'] = _bytes2sys(self._testtmp)
1417 1414 if WINDOWS:
1418 1415 env['REALUSERPROFILE'] = env['USERPROFILE']
1419 1416 # py3.8+ ignores HOME: https://bugs.python.org/issue36264
1420 1417 env['USERPROFILE'] = env['HOME']
1421 1418 formated_timeout = _bytes2sys(b"%d" % default_defaults['timeout'][1])
1422 1419 env['HGTEST_TIMEOUT_DEFAULT'] = formated_timeout
1423 1420 env['HGTEST_TIMEOUT'] = _bytes2sys(b"%d" % self._timeout)
1424 1421 # This number should match portneeded in _getport
1425 for port in xrange(3):
1422 for port in range(3):
1426 1423 # This list should be parallel to _portmap in _getreplacements
1427 1424 defineport(port)
1428 1425 env["HGRCPATH"] = _bytes2sys(os.path.join(self._threadtmp, b'.hgrc'))
1429 1426 env["DAEMON_PIDS"] = _bytes2sys(
1430 1427 os.path.join(self._threadtmp, b'daemon.pids')
1431 1428 )
1432 1429 env["HGEDITOR"] = (
1433 1430 '"' + sysexecutable + '"' + ' -c "import sys; sys.exit(0)"'
1434 1431 )
1435 1432 env["HGUSER"] = "test"
1436 1433 env["HGENCODING"] = "ascii"
1437 1434 env["HGENCODINGMODE"] = "strict"
1438 1435 env["HGHOSTNAME"] = "test-hostname"
1439 1436 env['HGIPV6'] = str(int(self._useipv6))
1440 1437 # See contrib/catapipe.py for how to use this functionality.
1441 1438 if 'HGTESTCATAPULTSERVERPIPE' not in env:
1442 1439 # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
1443 1440 # non-test one in as a default, otherwise set to devnull
1444 1441 env['HGTESTCATAPULTSERVERPIPE'] = env.get(
1445 1442 'HGCATAPULTSERVERPIPE', os.devnull
1446 1443 )
1447 1444
1448 1445 extraextensions = []
1449 1446 for opt in self._extraconfigopts:
1450 1447 section, key = opt.split('.', 1)
1451 1448 if section != 'extensions':
1452 1449 continue
1453 1450 name = key.split('=', 1)[0]
1454 1451 extraextensions.append(name)
1455 1452
1456 1453 if extraextensions:
1457 1454 env['HGTESTEXTRAEXTENSIONS'] = ' '.join(extraextensions)
1458 1455
1459 1456 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1460 1457 # IP addresses.
1461 1458 env['LOCALIP'] = _bytes2sys(self._localip())
1462 1459
1463 1460 # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
1464 1461 # but this is needed for testing python instances like dummyssh,
1465 1462 # dummysmtpd.py, and dumbhttp.py.
1466 1463 if WINDOWS:
1467 1464 env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
1468 1465
1469 1466 # Modified HOME in test environment can confuse Rust tools. So set
1470 1467 # CARGO_HOME and RUSTUP_HOME automatically if a Rust toolchain is
1471 1468 # present and these variables aren't already defined.
1472 1469 cargo_home_path = os.path.expanduser('~/.cargo')
1473 1470 rustup_home_path = os.path.expanduser('~/.rustup')
1474 1471
1475 1472 if os.path.exists(cargo_home_path) and b'CARGO_HOME' not in osenvironb:
1476 1473 env['CARGO_HOME'] = cargo_home_path
1477 1474 if (
1478 1475 os.path.exists(rustup_home_path)
1479 1476 and b'RUSTUP_HOME' not in osenvironb
1480 1477 ):
1481 1478 env['RUSTUP_HOME'] = rustup_home_path
1482 1479
1483 1480 # Reset some environment variables to well-known values so that
1484 1481 # the tests produce repeatable output.
1485 1482 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1486 1483 env['TZ'] = 'GMT'
1487 1484 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1488 1485 env['COLUMNS'] = '80'
1489 1486 env['TERM'] = 'xterm'
1490 1487
1491 1488 dropped = [
1492 1489 'CDPATH',
1493 1490 'CHGDEBUG',
1494 1491 'EDITOR',
1495 1492 'GREP_OPTIONS',
1496 1493 'HG',
1497 1494 'HGMERGE',
1498 1495 'HGPLAIN',
1499 1496 'HGPLAINEXCEPT',
1500 1497 'HGPROF',
1501 1498 'http_proxy',
1502 1499 'no_proxy',
1503 1500 'NO_PROXY',
1504 1501 'PAGER',
1505 1502 'VISUAL',
1506 1503 ]
1507 1504
1508 1505 for k in dropped:
1509 1506 if k in env:
1510 1507 del env[k]
1511 1508
1512 1509 # unset env related to hooks
1513 1510 for k in list(env):
1514 1511 if k.startswith('HG_'):
1515 1512 del env[k]
1516 1513
1517 1514 if self._usechg:
1518 1515 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1519 1516 if self._chgdebug:
1520 1517 env['CHGDEBUG'] = 'true'
1521 1518
1522 1519 return env
1523 1520
1524 1521 def _createhgrc(self, path):
1525 1522 """Create an hgrc file for this test."""
1526 1523 with open(path, 'wb') as hgrc:
1527 1524 hgrc.write(b'[ui]\n')
1528 1525 hgrc.write(b'slash = True\n')
1529 1526 hgrc.write(b'interactive = False\n')
1530 1527 hgrc.write(b'detailed-exit-code = True\n')
1531 1528 hgrc.write(b'merge = internal:merge\n')
1532 1529 hgrc.write(b'mergemarkers = detailed\n')
1533 1530 hgrc.write(b'promptecho = True\n')
1534 1531 dummyssh = os.path.join(self._testdir, b'dummyssh')
1535 1532 hgrc.write(b'ssh = "%s" "%s"\n' % (PYTHON, dummyssh))
1536 1533 hgrc.write(b'timeout.warn=15\n')
1537 1534 hgrc.write(b'[chgserver]\n')
1538 1535 hgrc.write(b'idletimeout=60\n')
1539 1536 hgrc.write(b'[defaults]\n')
1540 1537 hgrc.write(b'[devel]\n')
1541 1538 hgrc.write(b'all-warnings = true\n')
1542 1539 hgrc.write(b'default-date = 0 0\n')
1543 1540 hgrc.write(b'[largefiles]\n')
1544 1541 hgrc.write(
1545 1542 b'usercache = %s\n'
1546 1543 % (os.path.join(self._testtmp, b'.cache/largefiles'))
1547 1544 )
1548 1545 hgrc.write(b'[lfs]\n')
1549 1546 hgrc.write(
1550 1547 b'usercache = %s\n'
1551 1548 % (os.path.join(self._testtmp, b'.cache/lfs'))
1552 1549 )
1553 1550 hgrc.write(b'[web]\n')
1554 1551 hgrc.write(b'address = localhost\n')
1555 1552 hgrc.write(b'ipv6 = %r\n' % self._useipv6)
1556 1553 hgrc.write(b'server-header = testing stub value\n')
1557 1554
1558 1555 for opt in self._extraconfigopts:
1559 1556 section, key = _sys2bytes(opt).split(b'.', 1)
1560 1557 assert b'=' in key, (
1561 1558 'extra config opt %s must ' 'have an = for assignment' % opt
1562 1559 )
1563 1560 hgrc.write(b'[%s]\n%s\n' % (section, key))
1564 1561
1565 1562 def fail(self, msg):
1566 1563 # unittest differentiates between errored and failed.
1567 1564 # Failed is denoted by AssertionError (by default at least).
1568 1565 raise AssertionError(msg)
1569 1566
1570 1567 def _runcommand(self, cmd, env, normalizenewlines=False):
1571 1568 """Run command in a sub-process, capturing the output (stdout and
1572 1569 stderr).
1573 1570
1574 1571 Return a tuple (exitcode, output). output is None in debug mode.
1575 1572 """
1576 1573 if self._debug:
1577 1574 proc = subprocess.Popen(
1578 1575 _bytes2sys(cmd),
1579 1576 shell=True,
1580 1577 close_fds=closefds,
1581 1578 cwd=_bytes2sys(self._testtmp),
1582 1579 env=env,
1583 1580 )
1584 1581 ret = proc.wait()
1585 1582 return (ret, None)
1586 1583
1587 1584 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1588 1585
1589 1586 def cleanup():
1590 1587 terminate(proc)
1591 1588 ret = proc.wait()
1592 1589 if ret == 0:
1593 1590 ret = signal.SIGTERM << 8
1594 1591 killdaemons(env['DAEMON_PIDS'])
1595 1592 return ret
1596 1593
1597 1594 proc.tochild.close()
1598 1595
1599 1596 try:
1600 1597 output = proc.fromchild.read()
1601 1598 except KeyboardInterrupt:
1602 1599 vlog('# Handling keyboard interrupt')
1603 1600 cleanup()
1604 1601 raise
1605 1602
1606 1603 ret = proc.wait()
1607 1604 if wifexited(ret):
1608 1605 ret = os.WEXITSTATUS(ret)
1609 1606
1610 1607 if proc.timeout:
1611 1608 ret = 'timeout'
1612 1609
1613 1610 if ret:
1614 1611 killdaemons(env['DAEMON_PIDS'])
1615 1612
1616 1613 for s, r in self._getreplacements():
1617 1614 output = re.sub(s, r, output)
1618 1615
1619 1616 if normalizenewlines:
1620 1617 output = output.replace(b'\r\n', b'\n')
1621 1618
1622 1619 return ret, output.splitlines(True)
1623 1620
1624 1621
1625 1622 class PythonTest(Test):
1626 1623 """A Python-based test."""
1627 1624
1628 1625 @property
1629 1626 def refpath(self):
1630 1627 return os.path.join(self._testdir, b'%s.out' % self.bname)
1631 1628
1632 1629 def _run(self, env):
1633 1630 # Quote the python(3) executable for Windows
1634 1631 cmd = b'"%s" "%s"' % (PYTHON, self.path)
1635 1632 vlog("# Running", cmd.decode("utf-8"))
1636 1633 result = self._runcommand(cmd, env, normalizenewlines=WINDOWS)
1637 1634 if self._aborted:
1638 1635 raise KeyboardInterrupt()
1639 1636
1640 1637 return result
1641 1638
1642 1639
1643 1640 # Some glob patterns apply only in some circumstances, so the script
1644 1641 # might want to remove (glob) annotations that otherwise should be
1645 1642 # retained.
1646 1643 checkcodeglobpats = [
1647 1644 # On Windows it looks like \ doesn't require a (glob), but we know
1648 1645 # better.
1649 1646 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1650 1647 re.compile(br'^moving \S+/.*[^)]$'),
1651 1648 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1652 1649 # Not all platforms have 127.0.0.1 as loopback (though most do),
1653 1650 # so we always glob that too.
1654 1651 re.compile(br'.*\$LOCALIP.*$'),
1655 1652 ]
1656 1653
1657 1654 bchr = lambda x: bytes([x])
1658 1655
1659 1656 WARN_UNDEFINED = 1
1660 1657 WARN_YES = 2
1661 1658 WARN_NO = 3
1662 1659
1663 1660 MARK_OPTIONAL = b" (?)\n"
1664 1661
1665 1662
1666 1663 def isoptional(line):
1667 1664 return line.endswith(MARK_OPTIONAL)
1668 1665
1669 1666
1670 1667 class TTest(Test):
1671 1668 """A "t test" is a test backed by a .t file."""
1672 1669
1673 1670 SKIPPED_PREFIX = b'skipped: '
1674 1671 FAILED_PREFIX = b'hghave check failed: '
1675 1672 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1676 1673
1677 1674 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1678 1675 ESCAPEMAP = {bchr(i): br'\x%02x' % i for i in range(256)}
1679 1676 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1680 1677
1681 1678 def __init__(self, path, *args, **kwds):
1682 1679 # accept an extra "case" parameter
1683 1680 case = kwds.pop('case', [])
1684 1681 self._case = case
1685 1682 self._allcases = {x for y in parsettestcases(path) for x in y}
1686 1683 super(TTest, self).__init__(path, *args, **kwds)
1687 1684 if case:
1688 1685 casepath = b'#'.join(case)
1689 1686 self.name = '%s#%s' % (self.name, _bytes2sys(casepath))
1690 1687 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1691 1688 self._tmpname += b'-%s' % casepath.replace(b'#', b'-')
1692 1689 self._have = {}
1693 1690
1694 1691 @property
1695 1692 def refpath(self):
1696 1693 return os.path.join(self._testdir, self.bname)
1697 1694
1698 1695 def _run(self, env):
1699 1696 with open(self.path, 'rb') as f:
1700 1697 lines = f.readlines()
1701 1698
1702 1699 # .t file is both reference output and the test input, keep reference
1703 1700 # output updated with the the test input. This avoids some race
1704 1701 # conditions where the reference output does not match the actual test.
1705 1702 if self._refout is not None:
1706 1703 self._refout = lines
1707 1704
1708 1705 salt, script, after, expected = self._parsetest(lines)
1709 1706
1710 1707 # Write out the generated script.
1711 1708 fname = b'%s.sh' % self._testtmp
1712 1709 with open(fname, 'wb') as f:
1713 1710 for l in script:
1714 1711 f.write(l)
1715 1712
1716 1713 cmd = b'%s "%s"' % (self._shell, fname)
1717 1714 vlog("# Running", cmd.decode("utf-8"))
1718 1715
1719 1716 exitcode, output = self._runcommand(cmd, env)
1720 1717
1721 1718 if self._aborted:
1722 1719 raise KeyboardInterrupt()
1723 1720
1724 1721 # Do not merge output if skipped. Return hghave message instead.
1725 1722 # Similarly, with --debug, output is None.
1726 1723 if exitcode == self.SKIPPED_STATUS or output is None:
1727 1724 return exitcode, output
1728 1725
1729 1726 return self._processoutput(exitcode, output, salt, after, expected)
1730 1727
1731 1728 def _hghave(self, reqs):
1732 1729 allreqs = b' '.join(reqs)
1733 1730
1734 1731 self._detectslow(reqs)
1735 1732
1736 1733 if allreqs in self._have:
1737 1734 return self._have.get(allreqs)
1738 1735
1739 1736 # TODO do something smarter when all other uses of hghave are gone.
1740 1737 runtestdir = osenvironb[b'RUNTESTDIR']
1741 1738 tdir = runtestdir.replace(b'\\', b'/')
1742 1739 proc = Popen4(
1743 1740 b'%s -c "%s/hghave %s"' % (self._shell, tdir, allreqs),
1744 1741 self._testtmp,
1745 1742 0,
1746 1743 self._getenv(),
1747 1744 )
1748 1745 stdout, stderr = proc.communicate()
1749 1746 ret = proc.wait()
1750 1747 if wifexited(ret):
1751 1748 ret = os.WEXITSTATUS(ret)
1752 1749 if ret == 2:
1753 1750 print(stdout.decode('utf-8'))
1754 1751 sys.exit(1)
1755 1752
1756 1753 if ret != 0:
1757 1754 self._have[allreqs] = (False, stdout)
1758 1755 return False, stdout
1759 1756
1760 1757 self._have[allreqs] = (True, None)
1761 1758 return True, None
1762 1759
1763 1760 def _detectslow(self, reqs):
1764 1761 """update the timeout of slow test when appropriate"""
1765 1762 if b'slow' in reqs:
1766 1763 self._timeout = self._slowtimeout
1767 1764
1768 1765 def _iftest(self, args):
1769 1766 # implements "#if"
1770 1767 reqs = []
1771 1768 for arg in args:
1772 1769 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1773 1770 if arg[3:] in self._case:
1774 1771 return False
1775 1772 elif arg in self._allcases:
1776 1773 if arg not in self._case:
1777 1774 return False
1778 1775 else:
1779 1776 reqs.append(arg)
1780 1777 self._detectslow(reqs)
1781 1778 return self._hghave(reqs)[0]
1782 1779
1783 1780 def _parsetest(self, lines):
1784 1781 # We generate a shell script which outputs unique markers to line
1785 1782 # up script results with our source. These markers include input
1786 1783 # line number and the last return code.
1787 1784 salt = b"SALT%d" % time.time()
1788 1785
1789 1786 def addsalt(line, inpython):
1790 1787 if inpython:
1791 1788 script.append(b'%s %d 0\n' % (salt, line))
1792 1789 else:
1793 1790 script.append(b'echo %s %d $?\n' % (salt, line))
1794 1791
1795 1792 activetrace = []
1796 1793 session = str(uuid.uuid4()).encode('ascii')
1797 1794 hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE') or os.getenv(
1798 1795 'HGCATAPULTSERVERPIPE'
1799 1796 )
1800 1797
1801 1798 def toggletrace(cmd=None):
1802 1799 if not hgcatapult or hgcatapult == os.devnull:
1803 1800 return
1804 1801
1805 1802 if activetrace:
1806 1803 script.append(
1807 1804 b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1808 1805 % (session, activetrace[0])
1809 1806 )
1810 1807 if cmd is None:
1811 1808 return
1812 1809
1813 1810 if isinstance(cmd, str):
1814 1811 quoted = shellquote(cmd.strip())
1815 1812 else:
1816 1813 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1817 1814 quoted = quoted.replace(b'\\', b'\\\\')
1818 1815 script.append(
1819 1816 b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1820 1817 % (session, quoted)
1821 1818 )
1822 1819 activetrace[0:] = [quoted]
1823 1820
1824 1821 script = []
1825 1822
1826 1823 # After we run the shell script, we re-unify the script output
1827 1824 # with non-active parts of the source, with synchronization by our
1828 1825 # SALT line number markers. The after table contains the non-active
1829 1826 # components, ordered by line number.
1830 1827 after = {}
1831 1828
1832 1829 # Expected shell script output.
1833 1830 expected = {}
1834 1831
1835 1832 pos = prepos = -1
1836 1833
1837 1834 # True or False when in a true or false conditional section
1838 1835 skipping = None
1839 1836
1840 1837 # We keep track of whether or not we're in a Python block so we
1841 1838 # can generate the surrounding doctest magic.
1842 1839 inpython = False
1843 1840
1844 1841 if self._debug:
1845 1842 script.append(b'set -x\n')
1846 1843 if os.getenv('MSYSTEM'):
1847 1844 script.append(b'alias pwd="pwd -W"\n')
1848 1845
1849 1846 if hgcatapult and hgcatapult != os.devnull:
1850 1847 hgcatapult = hgcatapult.encode('utf8')
1851 1848 cataname = self.name.encode('utf8')
1852 1849
1853 1850 # Kludge: use a while loop to keep the pipe from getting
1854 1851 # closed by our echo commands. The still-running file gets
1855 1852 # reaped at the end of the script, which causes the while
1856 1853 # loop to exit and closes the pipe. Sigh.
1857 1854 script.append(
1858 1855 b'rtendtracing() {\n'
1859 1856 b' echo END %(session)s %(name)s >> %(catapult)s\n'
1860 1857 b' rm -f "$TESTTMP/.still-running"\n'
1861 1858 b'}\n'
1862 1859 b'trap "rtendtracing" 0\n'
1863 1860 b'touch "$TESTTMP/.still-running"\n'
1864 1861 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1865 1862 b'> %(catapult)s &\n'
1866 1863 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1867 1864 b'echo START %(session)s %(name)s >> %(catapult)s\n'
1868 1865 % {
1869 1866 b'name': cataname,
1870 1867 b'session': session,
1871 1868 b'catapult': hgcatapult,
1872 1869 }
1873 1870 )
1874 1871
1875 1872 if self._case:
1876 1873 casestr = b'#'.join(self._case)
1877 1874 if isinstance(casestr, str):
1878 1875 quoted = shellquote(casestr)
1879 1876 else:
1880 1877 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1881 1878 script.append(b'TESTCASE=%s\n' % quoted)
1882 1879 script.append(b'export TESTCASE\n')
1883 1880
1884 1881 n = 0
1885 1882 for n, l in enumerate(lines):
1886 1883 if not l.endswith(b'\n'):
1887 1884 l += b'\n'
1888 1885 if l.startswith(b'#require'):
1889 1886 lsplit = l.split()
1890 1887 if len(lsplit) < 2 or lsplit[0] != b'#require':
1891 1888 after.setdefault(pos, []).append(
1892 1889 b' !!! invalid #require\n'
1893 1890 )
1894 1891 if not skipping:
1895 1892 haveresult, message = self._hghave(lsplit[1:])
1896 1893 if not haveresult:
1897 1894 script = [b'echo "%s"\nexit 80\n' % message]
1898 1895 break
1899 1896 after.setdefault(pos, []).append(l)
1900 1897 elif l.startswith(b'#if'):
1901 1898 lsplit = l.split()
1902 1899 if len(lsplit) < 2 or lsplit[0] != b'#if':
1903 1900 after.setdefault(pos, []).append(b' !!! invalid #if\n')
1904 1901 if skipping is not None:
1905 1902 after.setdefault(pos, []).append(b' !!! nested #if\n')
1906 1903 skipping = not self._iftest(lsplit[1:])
1907 1904 after.setdefault(pos, []).append(l)
1908 1905 elif l.startswith(b'#else'):
1909 1906 if skipping is None:
1910 1907 after.setdefault(pos, []).append(b' !!! missing #if\n')
1911 1908 skipping = not skipping
1912 1909 after.setdefault(pos, []).append(l)
1913 1910 elif l.startswith(b'#endif'):
1914 1911 if skipping is None:
1915 1912 after.setdefault(pos, []).append(b' !!! missing #if\n')
1916 1913 skipping = None
1917 1914 after.setdefault(pos, []).append(l)
1918 1915 elif skipping:
1919 1916 after.setdefault(pos, []).append(l)
1920 1917 elif l.startswith(b' >>> '): # python inlines
1921 1918 after.setdefault(pos, []).append(l)
1922 1919 prepos = pos
1923 1920 pos = n
1924 1921 if not inpython:
1925 1922 # We've just entered a Python block. Add the header.
1926 1923 inpython = True
1927 1924 addsalt(prepos, False) # Make sure we report the exit code.
1928 1925 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1929 1926 addsalt(n, True)
1930 1927 script.append(l[2:])
1931 1928 elif l.startswith(b' ... '): # python inlines
1932 1929 after.setdefault(prepos, []).append(l)
1933 1930 script.append(l[2:])
1934 1931 elif l.startswith(b' $ '): # commands
1935 1932 if inpython:
1936 1933 script.append(b'EOF\n')
1937 1934 inpython = False
1938 1935 after.setdefault(pos, []).append(l)
1939 1936 prepos = pos
1940 1937 pos = n
1941 1938 addsalt(n, False)
1942 1939 rawcmd = l[4:]
1943 1940 cmd = rawcmd.split()
1944 1941 toggletrace(rawcmd)
1945 1942 if len(cmd) == 2 and cmd[0] == b'cd':
1946 1943 rawcmd = b'cd %s || exit 1\n' % cmd[1]
1947 1944 script.append(rawcmd)
1948 1945 elif l.startswith(b' > '): # continuations
1949 1946 after.setdefault(prepos, []).append(l)
1950 1947 script.append(l[4:])
1951 1948 elif l.startswith(b' '): # results
1952 1949 # Queue up a list of expected results.
1953 1950 expected.setdefault(pos, []).append(l[2:])
1954 1951 else:
1955 1952 if inpython:
1956 1953 script.append(b'EOF\n')
1957 1954 inpython = False
1958 1955 # Non-command/result. Queue up for merged output.
1959 1956 after.setdefault(pos, []).append(l)
1960 1957
1961 1958 if inpython:
1962 1959 script.append(b'EOF\n')
1963 1960 if skipping is not None:
1964 1961 after.setdefault(pos, []).append(b' !!! missing #endif\n')
1965 1962 addsalt(n + 1, False)
1966 1963 # Need to end any current per-command trace
1967 1964 if activetrace:
1968 1965 toggletrace()
1969 1966 return salt, script, after, expected
1970 1967
1971 1968 def _processoutput(self, exitcode, output, salt, after, expected):
1972 1969 # Merge the script output back into a unified test.
1973 1970 warnonly = WARN_UNDEFINED # 1: not yet; 2: yes; 3: for sure not
1974 1971 if exitcode != 0:
1975 1972 warnonly = WARN_NO
1976 1973
1977 1974 pos = -1
1978 1975 postout = []
1979 1976 for out_rawline in output:
1980 1977 out_line, cmd_line = out_rawline, None
1981 1978 if salt in out_rawline:
1982 1979 out_line, cmd_line = out_rawline.split(salt, 1)
1983 1980
1984 1981 pos, postout, warnonly = self._process_out_line(
1985 1982 out_line, pos, postout, expected, warnonly
1986 1983 )
1987 1984 pos, postout = self._process_cmd_line(cmd_line, pos, postout, after)
1988 1985
1989 1986 if pos in after:
1990 1987 postout += after.pop(pos)
1991 1988
1992 1989 if warnonly == WARN_YES:
1993 1990 exitcode = False # Set exitcode to warned.
1994 1991
1995 1992 return exitcode, postout
1996 1993
1997 1994 def _process_out_line(self, out_line, pos, postout, expected, warnonly):
1998 1995 while out_line:
1999 1996 if not out_line.endswith(b'\n'):
2000 1997 out_line += b' (no-eol)\n'
2001 1998
2002 1999 # Find the expected output at the current position.
2003 2000 els = [None]
2004 2001 if expected.get(pos, None):
2005 2002 els = expected[pos]
2006 2003
2007 2004 optional = []
2008 2005 for i, el in enumerate(els):
2009 2006 r = False
2010 2007 if el:
2011 2008 r, exact = self.linematch(el, out_line)
2012 2009 if isinstance(r, str):
2013 2010 if r == '-glob':
2014 2011 out_line = ''.join(el.rsplit(' (glob)', 1))
2015 2012 r = '' # Warn only this line.
2016 2013 elif r == "retry":
2017 2014 postout.append(b' ' + el)
2018 2015 else:
2019 2016 log('\ninfo, unknown linematch result: %r\n' % r)
2020 2017 r = False
2021 2018 if r:
2022 2019 els.pop(i)
2023 2020 break
2024 2021 if el:
2025 2022 if isoptional(el):
2026 2023 optional.append(i)
2027 2024 else:
2028 2025 m = optline.match(el)
2029 2026 if m:
2030 2027 conditions = [c for c in m.group(2).split(b' ')]
2031 2028
2032 2029 if not self._iftest(conditions):
2033 2030 optional.append(i)
2034 2031 if exact:
2035 2032 # Don't allow line to be matches against a later
2036 2033 # line in the output
2037 2034 els.pop(i)
2038 2035 break
2039 2036
2040 2037 if r:
2041 2038 if r == "retry":
2042 2039 continue
2043 2040 # clean up any optional leftovers
2044 2041 for i in optional:
2045 2042 postout.append(b' ' + els[i])
2046 2043 for i in reversed(optional):
2047 2044 del els[i]
2048 2045 postout.append(b' ' + el)
2049 2046 else:
2050 2047 if self.NEEDESCAPE(out_line):
2051 2048 out_line = TTest._stringescape(
2052 2049 b'%s (esc)\n' % out_line.rstrip(b'\n')
2053 2050 )
2054 2051 postout.append(b' ' + out_line) # Let diff deal with it.
2055 2052 if r != '': # If line failed.
2056 2053 warnonly = WARN_NO
2057 2054 elif warnonly == WARN_UNDEFINED:
2058 2055 warnonly = WARN_YES
2059 2056 break
2060 2057 else:
2061 2058 # clean up any optional leftovers
2062 2059 while expected.get(pos, None):
2063 2060 el = expected[pos].pop(0)
2064 2061 if el:
2065 2062 if not isoptional(el):
2066 2063 m = optline.match(el)
2067 2064 if m:
2068 2065 conditions = [c for c in m.group(2).split(b' ')]
2069 2066
2070 2067 if self._iftest(conditions):
2071 2068 # Don't append as optional line
2072 2069 continue
2073 2070 else:
2074 2071 continue
2075 2072 postout.append(b' ' + el)
2076 2073 return pos, postout, warnonly
2077 2074
2078 2075 def _process_cmd_line(self, cmd_line, pos, postout, after):
2079 2076 """process a "command" part of a line from unified test output"""
2080 2077 if cmd_line:
2081 2078 # Add on last return code.
2082 2079 ret = int(cmd_line.split()[1])
2083 2080 if ret != 0:
2084 2081 postout.append(b' [%d]\n' % ret)
2085 2082 if pos in after:
2086 2083 # Merge in non-active test bits.
2087 2084 postout += after.pop(pos)
2088 2085 pos = int(cmd_line.split()[0])
2089 2086 return pos, postout
2090 2087
2091 2088 @staticmethod
2092 2089 def rematch(el, l):
2093 2090 try:
2094 2091 # parse any flags at the beginning of the regex. Only 'i' is
2095 2092 # supported right now, but this should be easy to extend.
2096 2093 flags, el = re.match(br'^(\(\?i\))?(.*)', el).groups()[0:2]
2097 2094 flags = flags or b''
2098 2095 el = flags + b'(?:' + el + b')'
2099 2096 # use \Z to ensure that the regex matches to the end of the string
2100 2097 if WINDOWS:
2101 2098 return re.match(el + br'\r?\n\Z', l)
2102 2099 return re.match(el + br'\n\Z', l)
2103 2100 except re.error:
2104 2101 # el is an invalid regex
2105 2102 return False
2106 2103
2107 2104 @staticmethod
2108 2105 def globmatch(el, l):
2109 2106 # The only supported special characters are * and ? plus / which also
2110 2107 # matches \ on windows. Escaping of these characters is supported.
2111 2108 if el + b'\n' == l:
2112 2109 if os.altsep:
2113 2110 # matching on "/" is not needed for this line
2114 2111 for pat in checkcodeglobpats:
2115 2112 if pat.match(el):
2116 2113 return True
2117 2114 return b'-glob'
2118 2115 return True
2119 2116 el = el.replace(b'$LOCALIP', b'*')
2120 2117 i, n = 0, len(el)
2121 2118 res = b''
2122 2119 while i < n:
2123 2120 c = el[i : i + 1]
2124 2121 i += 1
2125 2122 if c == b'\\' and i < n and el[i : i + 1] in b'*?\\/':
2126 2123 res += el[i - 1 : i + 1]
2127 2124 i += 1
2128 2125 elif c == b'*':
2129 2126 res += b'.*'
2130 2127 elif c == b'?':
2131 2128 res += b'.'
2132 2129 elif c == b'/' and os.altsep:
2133 2130 res += b'[/\\\\]'
2134 2131 else:
2135 2132 res += re.escape(c)
2136 2133 return TTest.rematch(res, l)
2137 2134
2138 2135 def linematch(self, el, l):
2139 2136 if el == l: # perfect match (fast)
2140 2137 return True, True
2141 2138 retry = False
2142 2139 if isoptional(el):
2143 2140 retry = "retry"
2144 2141 el = el[: -len(MARK_OPTIONAL)] + b"\n"
2145 2142 else:
2146 2143 m = optline.match(el)
2147 2144 if m:
2148 2145 conditions = [c for c in m.group(2).split(b' ')]
2149 2146
2150 2147 el = m.group(1) + b"\n"
2151 2148 if not self._iftest(conditions):
2152 2149 # listed feature missing, should not match
2153 2150 return "retry", False
2154 2151
2155 2152 if el.endswith(b" (esc)\n"):
2156 2153 el = el[:-7].decode('unicode_escape') + '\n'
2157 2154 el = el.encode('latin-1')
2158 2155 if el == l or WINDOWS and el[:-1] + b'\r\n' == l:
2159 2156 return True, True
2160 2157 if el.endswith(b" (re)\n"):
2161 2158 return (TTest.rematch(el[:-6], l) or retry), False
2162 2159 if el.endswith(b" (glob)\n"):
2163 2160 # ignore '(glob)' added to l by 'replacements'
2164 2161 if l.endswith(b" (glob)\n"):
2165 2162 l = l[:-8] + b"\n"
2166 2163 return (TTest.globmatch(el[:-8], l) or retry), False
2167 2164 if os.altsep:
2168 2165 _l = l.replace(b'\\', b'/')
2169 2166 if el == _l or WINDOWS and el[:-1] + b'\r\n' == _l:
2170 2167 return True, True
2171 2168 return retry, True
2172 2169
2173 2170 @staticmethod
2174 2171 def parsehghaveoutput(lines):
2175 2172 """Parse hghave log lines.
2176 2173
2177 2174 Return tuple of lists (missing, failed):
2178 2175 * the missing/unknown features
2179 2176 * the features for which existence check failed"""
2180 2177 missing = []
2181 2178 failed = []
2182 2179 for line in lines:
2183 2180 if line.startswith(TTest.SKIPPED_PREFIX):
2184 2181 line = line.splitlines()[0]
2185 2182 missing.append(_bytes2sys(line[len(TTest.SKIPPED_PREFIX) :]))
2186 2183 elif line.startswith(TTest.FAILED_PREFIX):
2187 2184 line = line.splitlines()[0]
2188 2185 failed.append(_bytes2sys(line[len(TTest.FAILED_PREFIX) :]))
2189 2186
2190 2187 return missing, failed
2191 2188
2192 2189 @staticmethod
2193 2190 def _escapef(m):
2194 2191 return TTest.ESCAPEMAP[m.group(0)]
2195 2192
2196 2193 @staticmethod
2197 2194 def _stringescape(s):
2198 2195 return TTest.ESCAPESUB(TTest._escapef, s)
2199 2196
2200 2197
2201 2198 iolock = threading.RLock()
2202 2199 firstlock = threading.RLock()
2203 2200 firsterror = False
2204 2201
2205 2202 base_class = unittest.TextTestResult
2206 2203
2207 2204
2208 2205 class TestResult(base_class):
2209 2206 """Holds results when executing via unittest."""
2210 2207
2211 2208 def __init__(self, options, *args, **kwargs):
2212 2209 super(TestResult, self).__init__(*args, **kwargs)
2213 2210
2214 2211 self._options = options
2215 2212
2216 2213 # unittest.TestResult didn't have skipped until 2.7. We need to
2217 2214 # polyfill it.
2218 2215 self.skipped = []
2219 2216
2220 2217 # We have a custom "ignored" result that isn't present in any Python
2221 2218 # unittest implementation. It is very similar to skipped. It may make
2222 2219 # sense to map it into skip some day.
2223 2220 self.ignored = []
2224 2221
2225 2222 self.times = []
2226 2223 self._firststarttime = None
2227 2224 # Data stored for the benefit of generating xunit reports.
2228 2225 self.successes = []
2229 2226 self.faildata = {}
2230 2227
2231 2228 if options.color == 'auto':
2232 2229 isatty = self.stream.isatty()
2233 2230 # For some reason, redirecting stdout on Windows disables the ANSI
2234 2231 # color processing of stderr, which is what is used to print the
2235 2232 # output. Therefore, both must be tty on Windows to enable color.
2236 2233 if WINDOWS:
2237 2234 isatty = isatty and sys.stdout.isatty()
2238 2235 self.color = pygmentspresent and isatty
2239 2236 elif options.color == 'never':
2240 2237 self.color = False
2241 2238 else: # 'always', for testing purposes
2242 2239 self.color = pygmentspresent
2243 2240
2244 2241 def onStart(self, test):
2245 2242 """Can be overriden by custom TestResult"""
2246 2243
2247 2244 def onEnd(self):
2248 2245 """Can be overriden by custom TestResult"""
2249 2246
2250 2247 def addFailure(self, test, reason):
2251 2248 self.failures.append((test, reason))
2252 2249
2253 2250 if self._options.first:
2254 2251 self.stop()
2255 2252 else:
2256 2253 with iolock:
2257 2254 if reason == "timed out":
2258 2255 self.stream.write('t')
2259 2256 else:
2260 2257 if not self._options.nodiff:
2261 2258 self.stream.write('\n')
2262 2259 # Exclude the '\n' from highlighting to lex correctly
2263 2260 formatted = 'ERROR: %s output changed\n' % test
2264 2261 self.stream.write(highlightmsg(formatted, self.color))
2265 2262 self.stream.write('!')
2266 2263
2267 2264 self.stream.flush()
2268 2265
2269 2266 def addSuccess(self, test):
2270 2267 with iolock:
2271 2268 super(TestResult, self).addSuccess(test)
2272 2269 self.successes.append(test)
2273 2270
2274 2271 def addError(self, test, err):
2275 2272 super(TestResult, self).addError(test, err)
2276 2273 if self._options.first:
2277 2274 self.stop()
2278 2275
2279 2276 # Polyfill.
2280 2277 def addSkip(self, test, reason):
2281 2278 self.skipped.append((test, reason))
2282 2279 with iolock:
2283 2280 if self.showAll:
2284 2281 self.stream.writeln('skipped %s' % reason)
2285 2282 else:
2286 2283 self.stream.write('s')
2287 2284 self.stream.flush()
2288 2285
2289 2286 def addIgnore(self, test, reason):
2290 2287 self.ignored.append((test, reason))
2291 2288 with iolock:
2292 2289 if self.showAll:
2293 2290 self.stream.writeln('ignored %s' % reason)
2294 2291 else:
2295 2292 if reason not in ('not retesting', "doesn't match keyword"):
2296 2293 self.stream.write('i')
2297 2294 else:
2298 2295 self.testsRun += 1
2299 2296 self.stream.flush()
2300 2297
2301 2298 def addOutputMismatch(self, test, ret, got, expected):
2302 2299 """Record a mismatch in test output for a particular test."""
2303 2300 if self.shouldStop or firsterror:
2304 2301 # don't print, some other test case already failed and
2305 2302 # printed, we're just stale and probably failed due to our
2306 2303 # temp dir getting cleaned up.
2307 2304 return
2308 2305
2309 2306 accepted = False
2310 2307 lines = []
2311 2308
2312 2309 with iolock:
2313 2310 if self._options.nodiff:
2314 2311 pass
2315 2312 elif self._options.view:
2316 2313 v = self._options.view
2317 2314 subprocess.call(
2318 2315 r'"%s" "%s" "%s"'
2319 2316 % (v, _bytes2sys(test.refpath), _bytes2sys(test.errpath)),
2320 2317 shell=True,
2321 2318 )
2322 2319 else:
2323 2320 servefail, lines = getdiff(
2324 2321 expected, got, test.refpath, test.errpath
2325 2322 )
2326 2323 self.stream.write('\n')
2327 2324 for line in lines:
2328 2325 line = highlightdiff(line, self.color)
2329 2326 self.stream.flush()
2330 2327 self.stream.buffer.write(line)
2331 2328 self.stream.buffer.flush()
2332 2329
2333 2330 if servefail:
2334 2331 raise test.failureException(
2335 2332 'server failed to start (HGPORT=%s)' % test._startport
2336 2333 )
2337 2334
2338 2335 # handle interactive prompt without releasing iolock
2339 2336 if self._options.interactive:
2340 2337 if test.readrefout() != expected:
2341 2338 self.stream.write(
2342 2339 'Reference output has changed (run again to prompt '
2343 2340 'changes)'
2344 2341 )
2345 2342 else:
2346 2343 self.stream.write('Accept this change? [y/N] ')
2347 2344 self.stream.flush()
2348 2345 answer = sys.stdin.readline().strip()
2349 2346 if answer.lower() in ('y', 'yes'):
2350 2347 if test.path.endswith(b'.t'):
2351 2348 rename(test.errpath, test.path)
2352 2349 else:
2353 2350 rename(test.errpath, b'%s.out' % test.path)
2354 2351 accepted = True
2355 2352 if not accepted:
2356 2353 self.faildata[test.name] = b''.join(lines)
2357 2354
2358 2355 return accepted
2359 2356
2360 2357 def startTest(self, test):
2361 2358 super(TestResult, self).startTest(test)
2362 2359
2363 2360 # os.times module computes the user time and system time spent by
2364 2361 # child's processes along with real elapsed time taken by a process.
2365 2362 # This module has one limitation. It can only work for Linux user
2366 2363 # and not for Windows. Hence why we fall back to another function
2367 2364 # for wall time calculations.
2368 2365 test.started_times = os.times()
2369 2366 # TODO use a monotonic clock once support for Python 2.7 is dropped.
2370 2367 test.started_time = time.time()
2371 2368 if self._firststarttime is None: # thread racy but irrelevant
2372 2369 self._firststarttime = test.started_time
2373 2370
2374 2371 def stopTest(self, test, interrupted=False):
2375 2372 super(TestResult, self).stopTest(test)
2376 2373
2377 2374 test.stopped_times = os.times()
2378 2375 stopped_time = time.time()
2379 2376
2380 2377 starttime = test.started_times
2381 2378 endtime = test.stopped_times
2382 2379 origin = self._firststarttime
2383 2380 self.times.append(
2384 2381 (
2385 2382 test.name,
2386 2383 endtime[2] - starttime[2], # user space CPU time
2387 2384 endtime[3] - starttime[3], # sys space CPU time
2388 2385 stopped_time - test.started_time, # real time
2389 2386 test.started_time - origin, # start date in run context
2390 2387 stopped_time - origin, # end date in run context
2391 2388 )
2392 2389 )
2393 2390
2394 2391 if interrupted:
2395 2392 with iolock:
2396 2393 self.stream.writeln(
2397 2394 'INTERRUPTED: %s (after %d seconds)'
2398 2395 % (test.name, self.times[-1][3])
2399 2396 )
2400 2397
2401 2398
2402 2399 def getTestResult():
2403 2400 """
2404 2401 Returns the relevant test result
2405 2402 """
2406 2403 if "CUSTOM_TEST_RESULT" in os.environ:
2407 2404 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
2408 2405 return testresultmodule.TestResult
2409 2406 else:
2410 2407 return TestResult
2411 2408
2412 2409
2413 2410 class TestSuite(unittest.TestSuite):
2414 2411 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
2415 2412
2416 2413 def __init__(
2417 2414 self,
2418 2415 testdir,
2419 2416 jobs=1,
2420 2417 whitelist=None,
2421 2418 blacklist=None,
2422 2419 keywords=None,
2423 2420 loop=False,
2424 2421 runs_per_test=1,
2425 2422 loadtest=None,
2426 2423 showchannels=False,
2427 2424 *args,
2428 2425 **kwargs
2429 2426 ):
2430 2427 """Create a new instance that can run tests with a configuration.
2431 2428
2432 2429 testdir specifies the directory where tests are executed from. This
2433 2430 is typically the ``tests`` directory from Mercurial's source
2434 2431 repository.
2435 2432
2436 2433 jobs specifies the number of jobs to run concurrently. Each test
2437 2434 executes on its own thread. Tests actually spawn new processes, so
2438 2435 state mutation should not be an issue.
2439 2436
2440 2437 If there is only one job, it will use the main thread.
2441 2438
2442 2439 whitelist and blacklist denote tests that have been whitelisted and
2443 2440 blacklisted, respectively. These arguments don't belong in TestSuite.
2444 2441 Instead, whitelist and blacklist should be handled by the thing that
2445 2442 populates the TestSuite with tests. They are present to preserve
2446 2443 backwards compatible behavior which reports skipped tests as part
2447 2444 of the results.
2448 2445
2449 2446 keywords denotes key words that will be used to filter which tests
2450 2447 to execute. This arguably belongs outside of TestSuite.
2451 2448
2452 2449 loop denotes whether to loop over tests forever.
2453 2450 """
2454 2451 super(TestSuite, self).__init__(*args, **kwargs)
2455 2452
2456 2453 self._jobs = jobs
2457 2454 self._whitelist = whitelist
2458 2455 self._blacklist = blacklist
2459 2456 self._keywords = keywords
2460 2457 self._loop = loop
2461 2458 self._runs_per_test = runs_per_test
2462 2459 self._loadtest = loadtest
2463 2460 self._showchannels = showchannels
2464 2461
2465 2462 def run(self, result):
2466 2463 # We have a number of filters that need to be applied. We do this
2467 2464 # here instead of inside Test because it makes the running logic for
2468 2465 # Test simpler.
2469 2466 tests = []
2470 2467 num_tests = [0]
2471 2468 for test in self._tests:
2472 2469
2473 2470 def get():
2474 2471 num_tests[0] += 1
2475 2472 if getattr(test, 'should_reload', False):
2476 2473 return self._loadtest(test, num_tests[0])
2477 2474 return test
2478 2475
2479 2476 if not os.path.exists(test.path):
2480 2477 result.addSkip(test, "Doesn't exist")
2481 2478 continue
2482 2479
2483 2480 is_whitelisted = self._whitelist and (
2484 2481 test.relpath in self._whitelist or test.bname in self._whitelist
2485 2482 )
2486 2483 if not is_whitelisted:
2487 2484 is_blacklisted = self._blacklist and (
2488 2485 test.relpath in self._blacklist
2489 2486 or test.bname in self._blacklist
2490 2487 )
2491 2488 if is_blacklisted:
2492 2489 result.addSkip(test, 'blacklisted')
2493 2490 continue
2494 2491 if self._keywords:
2495 2492 with open(test.path, 'rb') as f:
2496 2493 t = f.read().lower() + test.bname.lower()
2497 2494 ignored = False
2498 2495 for k in self._keywords.lower().split():
2499 2496 if k not in t:
2500 2497 result.addIgnore(test, "doesn't match keyword")
2501 2498 ignored = True
2502 2499 break
2503 2500
2504 2501 if ignored:
2505 2502 continue
2506 for _ in xrange(self._runs_per_test):
2503 for _ in range(self._runs_per_test):
2507 2504 tests.append(get())
2508 2505
2509 2506 runtests = list(tests)
2510 2507 done = queue.Queue()
2511 2508 running = 0
2512 2509
2513 2510 channels_lock = threading.Lock()
2514 2511 channels = [""] * self._jobs
2515 2512
2516 2513 def job(test, result):
2517 2514 with channels_lock:
2518 2515 for n, v in enumerate(channels):
2519 2516 if not v:
2520 2517 channel = n
2521 2518 break
2522 2519 else:
2523 2520 raise ValueError('Could not find output channel')
2524 2521 channels[channel] = "=" + test.name[5:].split(".")[0]
2525 2522
2526 2523 r = None
2527 2524 try:
2528 2525 test(result)
2529 2526 except KeyboardInterrupt:
2530 2527 pass
2531 2528 except: # re-raises
2532 2529 r = ('!', test, 'run-test raised an error, see traceback')
2533 2530 raise
2534 2531 finally:
2535 2532 try:
2536 2533 channels[channel] = ''
2537 2534 except IndexError:
2538 2535 pass
2539 2536 done.put(r)
2540 2537
2541 2538 def stat():
2542 2539 count = 0
2543 2540 while channels:
2544 2541 d = '\n%03s ' % count
2545 2542 for n, v in enumerate(channels):
2546 2543 if v:
2547 2544 d += v[0]
2548 2545 channels[n] = v[1:] or '.'
2549 2546 else:
2550 2547 d += ' '
2551 2548 d += ' '
2552 2549 with iolock:
2553 2550 sys.stdout.write(d + ' ')
2554 2551 sys.stdout.flush()
2555 for x in xrange(10):
2552 for x in range(10):
2556 2553 if channels:
2557 2554 time.sleep(0.1)
2558 2555 count += 1
2559 2556
2560 2557 stoppedearly = False
2561 2558
2562 2559 if self._showchannels:
2563 2560 statthread = threading.Thread(target=stat, name="stat")
2564 2561 statthread.start()
2565 2562
2566 2563 try:
2567 2564 while tests or running:
2568 2565 if not done.empty() or running == self._jobs or not tests:
2569 2566 try:
2570 2567 done.get(True, 1)
2571 2568 running -= 1
2572 2569 if result and result.shouldStop:
2573 2570 stoppedearly = True
2574 2571 break
2575 2572 except queue.Empty:
2576 2573 continue
2577 2574 if tests and not running == self._jobs:
2578 2575 test = tests.pop(0)
2579 2576 if self._loop:
2580 2577 if getattr(test, 'should_reload', False):
2581 2578 num_tests[0] += 1
2582 2579 tests.append(self._loadtest(test, num_tests[0]))
2583 2580 else:
2584 2581 tests.append(test)
2585 2582 if self._jobs == 1:
2586 2583 job(test, result)
2587 2584 else:
2588 2585 t = threading.Thread(
2589 2586 target=job, name=test.name, args=(test, result)
2590 2587 )
2591 2588 t.start()
2592 2589 running += 1
2593 2590
2594 2591 # If we stop early we still need to wait on started tests to
2595 2592 # finish. Otherwise, there is a race between the test completing
2596 2593 # and the test's cleanup code running. This could result in the
2597 2594 # test reporting incorrect.
2598 2595 if stoppedearly:
2599 2596 while running:
2600 2597 try:
2601 2598 done.get(True, 1)
2602 2599 running -= 1
2603 2600 except queue.Empty:
2604 2601 continue
2605 2602 except KeyboardInterrupt:
2606 2603 for test in runtests:
2607 2604 test.abort()
2608 2605
2609 2606 channels = []
2610 2607
2611 2608 return result
2612 2609
2613 2610
2614 2611 # Save the most recent 5 wall-clock runtimes of each test to a
2615 2612 # human-readable text file named .testtimes. Tests are sorted
2616 2613 # alphabetically, while times for each test are listed from oldest to
2617 2614 # newest.
2618 2615
2619 2616
2620 2617 def loadtimes(outputdir):
2621 2618 times = []
2622 2619 try:
2623 2620 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2624 2621 for line in fp:
2625 2622 m = re.match('(.*?) ([0-9. ]+)', line)
2626 2623 times.append(
2627 2624 (m.group(1), [float(t) for t in m.group(2).split()])
2628 2625 )
2629 2626 except IOError as err:
2630 2627 if err.errno != errno.ENOENT:
2631 2628 raise
2632 2629 return times
2633 2630
2634 2631
2635 2632 def savetimes(outputdir, result):
2636 2633 saved = dict(loadtimes(outputdir))
2637 2634 maxruns = 5
2638 2635 skipped = {str(t[0]) for t in result.skipped}
2639 2636 for tdata in result.times:
2640 2637 test, real = tdata[0], tdata[3]
2641 2638 if test not in skipped:
2642 2639 ts = saved.setdefault(test, [])
2643 2640 ts.append(real)
2644 2641 ts[:] = ts[-maxruns:]
2645 2642
2646 2643 fd, tmpname = tempfile.mkstemp(
2647 2644 prefix=b'.testtimes', dir=outputdir, text=True
2648 2645 )
2649 2646 with os.fdopen(fd, 'w') as fp:
2650 2647 for name, ts in sorted(saved.items()):
2651 2648 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2652 2649 timepath = os.path.join(outputdir, b'.testtimes')
2653 2650 try:
2654 2651 os.unlink(timepath)
2655 2652 except OSError:
2656 2653 pass
2657 2654 try:
2658 2655 os.rename(tmpname, timepath)
2659 2656 except OSError:
2660 2657 pass
2661 2658
2662 2659
2663 2660 class TextTestRunner(unittest.TextTestRunner):
2664 2661 """Custom unittest test runner that uses appropriate settings."""
2665 2662
2666 2663 def __init__(self, runner, *args, **kwargs):
2667 2664 super(TextTestRunner, self).__init__(*args, **kwargs)
2668 2665
2669 2666 self._runner = runner
2670 2667
2671 2668 self._result = getTestResult()(
2672 2669 self._runner.options, self.stream, self.descriptions, self.verbosity
2673 2670 )
2674 2671
2675 2672 def listtests(self, test):
2676 2673 test = sorted(test, key=lambda t: t.name)
2677 2674
2678 2675 self._result.onStart(test)
2679 2676
2680 2677 for t in test:
2681 2678 print(t.name)
2682 2679 self._result.addSuccess(t)
2683 2680
2684 2681 if self._runner.options.xunit:
2685 2682 with open(self._runner.options.xunit, "wb") as xuf:
2686 2683 self._writexunit(self._result, xuf)
2687 2684
2688 2685 if self._runner.options.json:
2689 2686 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2690 2687 with open(jsonpath, 'w') as fp:
2691 2688 self._writejson(self._result, fp)
2692 2689
2693 2690 return self._result
2694 2691
2695 2692 def run(self, test):
2696 2693 self._result.onStart(test)
2697 2694 test(self._result)
2698 2695
2699 2696 failed = len(self._result.failures)
2700 2697 skipped = len(self._result.skipped)
2701 2698 ignored = len(self._result.ignored)
2702 2699
2703 2700 with iolock:
2704 2701 self.stream.writeln('')
2705 2702
2706 2703 if not self._runner.options.noskips:
2707 2704 for test, msg in sorted(
2708 2705 self._result.skipped, key=lambda s: s[0].name
2709 2706 ):
2710 2707 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2711 2708 msg = highlightmsg(formatted, self._result.color)
2712 2709 self.stream.write(msg)
2713 2710 for test, msg in sorted(
2714 2711 self._result.failures, key=lambda f: f[0].name
2715 2712 ):
2716 2713 formatted = 'Failed %s: %s\n' % (test.name, msg)
2717 2714 self.stream.write(highlightmsg(formatted, self._result.color))
2718 2715 for test, msg in sorted(
2719 2716 self._result.errors, key=lambda e: e[0].name
2720 2717 ):
2721 2718 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2722 2719
2723 2720 if self._runner.options.xunit:
2724 2721 with open(self._runner.options.xunit, "wb") as xuf:
2725 2722 self._writexunit(self._result, xuf)
2726 2723
2727 2724 if self._runner.options.json:
2728 2725 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2729 2726 with open(jsonpath, 'w') as fp:
2730 2727 self._writejson(self._result, fp)
2731 2728
2732 2729 self._runner._checkhglib('Tested')
2733 2730
2734 2731 savetimes(self._runner._outputdir, self._result)
2735 2732
2736 2733 if failed and self._runner.options.known_good_rev:
2737 2734 self._bisecttests(t for t, m in self._result.failures)
2738 2735 self.stream.writeln(
2739 2736 '# Ran %d tests, %d skipped, %d failed.'
2740 2737 % (self._result.testsRun, skipped + ignored, failed)
2741 2738 )
2742 2739 if failed:
2743 2740 self.stream.writeln(
2744 2741 'python hash seed: %s' % os.environ['PYTHONHASHSEED']
2745 2742 )
2746 2743 if self._runner.options.time:
2747 2744 self.printtimes(self._result.times)
2748 2745
2749 2746 if self._runner.options.exceptions:
2750 2747 exceptions = aggregateexceptions(
2751 2748 os.path.join(self._runner._outputdir, b'exceptions')
2752 2749 )
2753 2750
2754 2751 self.stream.writeln('Exceptions Report:')
2755 2752 self.stream.writeln(
2756 2753 '%d total from %d frames'
2757 2754 % (exceptions['total'], len(exceptions['exceptioncounts']))
2758 2755 )
2759 2756 combined = exceptions['combined']
2760 2757 for key in sorted(combined, key=combined.get, reverse=True):
2761 2758 frame, line, exc = key
2762 2759 totalcount, testcount, leastcount, leasttest = combined[key]
2763 2760
2764 2761 self.stream.writeln(
2765 2762 '%d (%d tests)\t%s: %s (%s - %d total)'
2766 2763 % (
2767 2764 totalcount,
2768 2765 testcount,
2769 2766 frame,
2770 2767 exc,
2771 2768 leasttest,
2772 2769 leastcount,
2773 2770 )
2774 2771 )
2775 2772
2776 2773 self.stream.flush()
2777 2774
2778 2775 return self._result
2779 2776
2780 2777 def _bisecttests(self, tests):
2781 2778 bisectcmd = ['hg', 'bisect']
2782 2779 bisectrepo = self._runner.options.bisect_repo
2783 2780 if bisectrepo:
2784 2781 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2785 2782
2786 2783 def pread(args):
2787 2784 env = os.environ.copy()
2788 2785 env['HGPLAIN'] = '1'
2789 2786 p = subprocess.Popen(
2790 2787 args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env
2791 2788 )
2792 2789 data = p.stdout.read()
2793 2790 p.wait()
2794 2791 return data
2795 2792
2796 2793 for test in tests:
2797 2794 pread(bisectcmd + ['--reset']),
2798 2795 pread(bisectcmd + ['--bad', '.'])
2799 2796 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2800 2797 # TODO: we probably need to forward more options
2801 2798 # that alter hg's behavior inside the tests.
2802 2799 opts = ''
2803 2800 withhg = self._runner.options.with_hg
2804 2801 if withhg:
2805 2802 opts += ' --with-hg=%s ' % shellquote(_bytes2sys(withhg))
2806 2803 rtc = '%s %s %s %s' % (sysexecutable, sys.argv[0], opts, test)
2807 2804 data = pread(bisectcmd + ['--command', rtc])
2808 2805 m = re.search(
2809 2806 (
2810 2807 br'\nThe first (?P<goodbad>bad|good) revision '
2811 2808 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2812 2809 br'summary: +(?P<summary>[^\n]+)\n'
2813 2810 ),
2814 2811 data,
2815 2812 (re.MULTILINE | re.DOTALL),
2816 2813 )
2817 2814 if m is None:
2818 2815 self.stream.writeln(
2819 2816 'Failed to identify failure point for %s' % test
2820 2817 )
2821 2818 continue
2822 2819 dat = m.groupdict()
2823 2820 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2824 2821 self.stream.writeln(
2825 2822 '%s %s by %s (%s)'
2826 2823 % (
2827 2824 test,
2828 2825 verb,
2829 2826 dat['node'].decode('ascii'),
2830 2827 dat['summary'].decode('utf8', 'ignore'),
2831 2828 )
2832 2829 )
2833 2830
2834 2831 def printtimes(self, times):
2835 2832 # iolock held by run
2836 2833 self.stream.writeln('# Producing time report')
2837 2834 times.sort(key=lambda t: (t[3]))
2838 2835 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2839 2836 self.stream.writeln(
2840 2837 '%-7s %-7s %-7s %-7s %-7s %s'
2841 2838 % ('start', 'end', 'cuser', 'csys', 'real', 'Test')
2842 2839 )
2843 2840 for tdata in times:
2844 2841 test = tdata[0]
2845 2842 cuser, csys, real, start, end = tdata[1:6]
2846 2843 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2847 2844
2848 2845 @staticmethod
2849 2846 def _writexunit(result, outf):
2850 2847 # See http://llg.cubic.org/docs/junit/ for a reference.
2851 2848 timesd = {t[0]: t[3] for t in result.times}
2852 2849 doc = minidom.Document()
2853 2850 s = doc.createElement('testsuite')
2854 2851 s.setAttribute('errors', "0") # TODO
2855 2852 s.setAttribute('failures', str(len(result.failures)))
2856 2853 s.setAttribute('name', 'run-tests')
2857 2854 s.setAttribute(
2858 2855 'skipped', str(len(result.skipped) + len(result.ignored))
2859 2856 )
2860 2857 s.setAttribute('tests', str(result.testsRun))
2861 2858 doc.appendChild(s)
2862 2859 for tc in result.successes:
2863 2860 t = doc.createElement('testcase')
2864 2861 t.setAttribute('name', tc.name)
2865 2862 tctime = timesd.get(tc.name)
2866 2863 if tctime is not None:
2867 2864 t.setAttribute('time', '%.3f' % tctime)
2868 2865 s.appendChild(t)
2869 2866 for tc, err in sorted(result.faildata.items()):
2870 2867 t = doc.createElement('testcase')
2871 2868 t.setAttribute('name', tc)
2872 2869 tctime = timesd.get(tc)
2873 2870 if tctime is not None:
2874 2871 t.setAttribute('time', '%.3f' % tctime)
2875 2872 # createCDATASection expects a unicode or it will
2876 2873 # convert using default conversion rules, which will
2877 2874 # fail if string isn't ASCII.
2878 2875 err = cdatasafe(err).decode('utf-8', 'replace')
2879 2876 cd = doc.createCDATASection(err)
2880 2877 # Use 'failure' here instead of 'error' to match errors = 0,
2881 2878 # failures = len(result.failures) in the testsuite element.
2882 2879 failelem = doc.createElement('failure')
2883 2880 failelem.setAttribute('message', 'output changed')
2884 2881 failelem.setAttribute('type', 'output-mismatch')
2885 2882 failelem.appendChild(cd)
2886 2883 t.appendChild(failelem)
2887 2884 s.appendChild(t)
2888 2885 for tc, message in result.skipped:
2889 2886 # According to the schema, 'skipped' has no attributes. So store
2890 2887 # the skip message as a text node instead.
2891 2888 t = doc.createElement('testcase')
2892 2889 t.setAttribute('name', tc.name)
2893 2890 binmessage = message.encode('utf-8')
2894 2891 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2895 2892 cd = doc.createCDATASection(message)
2896 2893 skipelem = doc.createElement('skipped')
2897 2894 skipelem.appendChild(cd)
2898 2895 t.appendChild(skipelem)
2899 2896 s.appendChild(t)
2900 2897 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2901 2898
2902 2899 @staticmethod
2903 2900 def _writejson(result, outf):
2904 2901 timesd = {}
2905 2902 for tdata in result.times:
2906 2903 test = tdata[0]
2907 2904 timesd[test] = tdata[1:]
2908 2905
2909 2906 outcome = {}
2910 2907 groups = [
2911 2908 ('success', ((tc, None) for tc in result.successes)),
2912 2909 ('failure', result.failures),
2913 2910 ('skip', result.skipped),
2914 2911 ]
2915 2912 for res, testcases in groups:
2916 2913 for tc, __ in testcases:
2917 2914 if tc.name in timesd:
2918 2915 diff = result.faildata.get(tc.name, b'')
2919 2916 try:
2920 2917 diff = diff.decode('unicode_escape')
2921 2918 except UnicodeDecodeError as e:
2922 2919 diff = '%r decoding diff, sorry' % e
2923 2920 tres = {
2924 2921 'result': res,
2925 2922 'time': ('%0.3f' % timesd[tc.name][2]),
2926 2923 'cuser': ('%0.3f' % timesd[tc.name][0]),
2927 2924 'csys': ('%0.3f' % timesd[tc.name][1]),
2928 2925 'start': ('%0.3f' % timesd[tc.name][3]),
2929 2926 'end': ('%0.3f' % timesd[tc.name][4]),
2930 2927 'diff': diff,
2931 2928 }
2932 2929 else:
2933 2930 # blacklisted test
2934 2931 tres = {'result': res}
2935 2932
2936 2933 outcome[tc.name] = tres
2937 2934 jsonout = json.dumps(
2938 2935 outcome, sort_keys=True, indent=4, separators=(',', ': ')
2939 2936 )
2940 2937 outf.writelines(("testreport =", jsonout))
2941 2938
2942 2939
2943 2940 def sorttests(testdescs, previoustimes, shuffle=False):
2944 2941 """Do an in-place sort of tests."""
2945 2942 if shuffle:
2946 2943 random.shuffle(testdescs)
2947 2944 return
2948 2945
2949 2946 if previoustimes:
2950 2947
2951 2948 def sortkey(f):
2952 2949 f = f['path']
2953 2950 if f in previoustimes:
2954 2951 # Use most recent time as estimate
2955 2952 return -(previoustimes[f][-1])
2956 2953 else:
2957 2954 # Default to a rather arbitrary value of 1 second for new tests
2958 2955 return -1.0
2959 2956
2960 2957 else:
2961 2958 # keywords for slow tests
2962 2959 slow = {
2963 2960 b'svn': 10,
2964 2961 b'cvs': 10,
2965 2962 b'hghave': 10,
2966 2963 b'largefiles-update': 10,
2967 2964 b'run-tests': 10,
2968 2965 b'corruption': 10,
2969 2966 b'race': 10,
2970 2967 b'i18n': 10,
2971 2968 b'check': 100,
2972 2969 b'gendoc': 100,
2973 2970 b'contrib-perf': 200,
2974 2971 b'merge-combination': 100,
2975 2972 }
2976 2973 perf = {}
2977 2974
2978 2975 def sortkey(f):
2979 2976 # run largest tests first, as they tend to take the longest
2980 2977 f = f['path']
2981 2978 try:
2982 2979 return perf[f]
2983 2980 except KeyError:
2984 2981 try:
2985 2982 val = -os.stat(f).st_size
2986 2983 except OSError as e:
2987 2984 if e.errno != errno.ENOENT:
2988 2985 raise
2989 2986 perf[f] = -1e9 # file does not exist, tell early
2990 2987 return -1e9
2991 2988 for kw, mul in slow.items():
2992 2989 if kw in f:
2993 2990 val *= mul
2994 2991 if f.endswith(b'.py'):
2995 2992 val /= 10.0
2996 2993 perf[f] = val / 1000.0
2997 2994 return perf[f]
2998 2995
2999 2996 testdescs.sort(key=sortkey)
3000 2997
3001 2998
3002 2999 class TestRunner:
3003 3000 """Holds context for executing tests.
3004 3001
3005 3002 Tests rely on a lot of state. This object holds it for them.
3006 3003 """
3007 3004
3008 3005 # Programs required to run tests.
3009 3006 REQUIREDTOOLS = [
3010 3007 b'diff',
3011 3008 b'grep',
3012 3009 b'unzip',
3013 3010 b'gunzip',
3014 3011 b'bunzip2',
3015 3012 b'sed',
3016 3013 ]
3017 3014
3018 3015 # Maps file extensions to test class.
3019 3016 TESTTYPES = [
3020 3017 (b'.py', PythonTest),
3021 3018 (b'.t', TTest),
3022 3019 ]
3023 3020
3024 3021 def __init__(self):
3025 3022 self.options = None
3026 3023 self._hgroot = None
3027 3024 self._testdir = None
3028 3025 self._outputdir = None
3029 3026 self._hgtmp = None
3030 3027 self._installdir = None
3031 3028 self._bindir = None
3032 3029 # a place for run-tests.py to generate executable it needs
3033 3030 self._custom_bin_dir = None
3034 3031 self._pythondir = None
3035 3032 # True if we had to infer the pythondir from --with-hg
3036 3033 self._pythondir_inferred = False
3037 3034 self._coveragefile = None
3038 3035 self._createdfiles = []
3039 3036 self._hgcommand = None
3040 3037 self._hgpath = None
3041 3038 self._portoffset = 0
3042 3039 self._ports = {}
3043 3040
3044 3041 def run(self, args, parser=None):
3045 3042 """Run the test suite."""
3046 3043 oldmask = os.umask(0o22)
3047 3044 try:
3048 3045 parser = parser or getparser()
3049 3046 options = parseargs(args, parser)
3050 3047 tests = [_sys2bytes(a) for a in options.tests]
3051 3048 if options.test_list is not None:
3052 3049 for listfile in options.test_list:
3053 3050 with open(listfile, 'rb') as f:
3054 3051 tests.extend(t for t in f.read().splitlines() if t)
3055 3052 self.options = options
3056 3053
3057 3054 self._checktools()
3058 3055 testdescs = self.findtests(tests)
3059 3056 if options.profile_runner:
3060 3057 import statprof
3061 3058
3062 3059 statprof.start()
3063 3060 result = self._run(testdescs)
3064 3061 if options.profile_runner:
3065 3062 statprof.stop()
3066 3063 statprof.display()
3067 3064 return result
3068 3065
3069 3066 finally:
3070 3067 os.umask(oldmask)
3071 3068
3072 3069 def _run(self, testdescs):
3073 3070 testdir = getcwdb()
3074 3071 # assume all tests in same folder for now
3075 3072 if testdescs:
3076 3073 pathname = os.path.dirname(testdescs[0]['path'])
3077 3074 if pathname:
3078 3075 testdir = os.path.join(testdir, pathname)
3079 3076 self._testdir = osenvironb[b'TESTDIR'] = testdir
3080 3077 osenvironb[b'TESTDIR_FORWARD_SLASH'] = osenvironb[b'TESTDIR'].replace(
3081 3078 os.sep.encode('ascii'), b'/'
3082 3079 )
3083 3080
3084 3081 if self.options.outputdir:
3085 3082 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3086 3083 else:
3087 3084 self._outputdir = getcwdb()
3088 3085 if testdescs and pathname:
3089 3086 self._outputdir = os.path.join(self._outputdir, pathname)
3090 3087 previoustimes = {}
3091 3088 if self.options.order_by_runtime:
3092 3089 previoustimes = dict(loadtimes(self._outputdir))
3093 3090 sorttests(testdescs, previoustimes, shuffle=self.options.random)
3094 3091
3095 3092 if 'PYTHONHASHSEED' not in os.environ:
3096 3093 # use a random python hash seed all the time
3097 3094 # we do the randomness ourself to know what seed is used
3098 3095 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
3099 3096
3100 3097 # Rayon (Rust crate for multi-threading) will use all logical CPU cores
3101 3098 # by default, causing thrashing on high-cpu-count systems.
3102 3099 # Setting its limit to 3 during tests should still let us uncover
3103 3100 # multi-threading bugs while keeping the thrashing reasonable.
3104 3101 os.environ.setdefault("RAYON_NUM_THREADS", "3")
3105 3102
3106 3103 if self.options.tmpdir:
3107 3104 self.options.keep_tmpdir = True
3108 3105 tmpdir = _sys2bytes(self.options.tmpdir)
3109 3106 if os.path.exists(tmpdir):
3110 3107 # Meaning of tmpdir has changed since 1.3: we used to create
3111 3108 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
3112 3109 # tmpdir already exists.
3113 3110 print("error: temp dir %r already exists" % tmpdir)
3114 3111 return 1
3115 3112
3116 3113 os.makedirs(tmpdir)
3117 3114 else:
3118 3115 d = None
3119 3116 if WINDOWS:
3120 3117 # without this, we get the default temp dir location, but
3121 3118 # in all lowercase, which causes troubles with paths (issue3490)
3122 3119 d = osenvironb.get(b'TMP', None)
3123 3120 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
3124 3121
3125 3122 self._hgtmp = osenvironb[b'HGTMP'] = os.path.realpath(tmpdir)
3126 3123
3127 3124 self._custom_bin_dir = os.path.join(self._hgtmp, b'custom-bin')
3128 3125 os.makedirs(self._custom_bin_dir)
3129 3126
3130 3127 if self.options.with_hg:
3131 3128 self._installdir = None
3132 3129 whg = self.options.with_hg
3133 3130 self._bindir = os.path.dirname(os.path.realpath(whg))
3134 3131 assert isinstance(self._bindir, bytes)
3135 3132 self._hgcommand = os.path.basename(whg)
3136 3133
3137 3134 normbin = os.path.normpath(os.path.abspath(whg))
3138 3135 normbin = normbin.replace(_sys2bytes(os.sep), b'/')
3139 3136
3140 3137 # Other Python scripts in the test harness need to
3141 3138 # `import mercurial`. If `hg` is a Python script, we assume
3142 3139 # the Mercurial modules are relative to its path and tell the tests
3143 3140 # to load Python modules from its directory.
3144 3141 with open(whg, 'rb') as fh:
3145 3142 initial = fh.read(1024)
3146 3143
3147 3144 if re.match(b'#!.*python', initial):
3148 3145 self._pythondir = self._bindir
3149 3146 # If it looks like our in-repo Rust binary, use the source root.
3150 3147 # This is a bit hacky. But rhg is still not supported outside the
3151 3148 # source directory. So until it is, do the simple thing.
3152 3149 elif re.search(b'/rust/target/[^/]+/hg', normbin):
3153 3150 self._pythondir = os.path.dirname(self._testdir)
3154 3151 # Fall back to the legacy behavior.
3155 3152 else:
3156 3153 self._pythondir = self._bindir
3157 3154 self._pythondir_inferred = True
3158 3155
3159 3156 else:
3160 3157 self._installdir = os.path.join(self._hgtmp, b"install")
3161 3158 self._bindir = os.path.join(self._installdir, b"bin")
3162 3159 self._hgcommand = b'hg'
3163 3160 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
3164 3161
3165 3162 # Force the use of hg.exe instead of relying on MSYS to recognize hg is
3166 3163 # a python script and feed it to python.exe. Legacy stdio is force
3167 3164 # enabled by hg.exe, and this is a more realistic way to launch hg
3168 3165 # anyway.
3169 3166 if WINDOWS and not self._hgcommand.endswith(b'.exe'):
3170 3167 self._hgcommand += b'.exe'
3171 3168
3172 3169 real_hg = os.path.join(self._bindir, self._hgcommand)
3173 3170 osenvironb[b'HGTEST_REAL_HG'] = real_hg
3174 3171 # set CHGHG, then replace "hg" command by "chg"
3175 3172 chgbindir = self._bindir
3176 3173 if self.options.chg or self.options.with_chg:
3177 3174 osenvironb[b'CHG_INSTALLED_AS_HG'] = b'1'
3178 3175 osenvironb[b'CHGHG'] = real_hg
3179 3176 else:
3180 3177 # drop flag for hghave
3181 3178 osenvironb.pop(b'CHG_INSTALLED_AS_HG', None)
3182 3179 if self.options.chg:
3183 3180 self._hgcommand = b'chg'
3184 3181 elif self.options.with_chg:
3185 3182 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
3186 3183 self._hgcommand = os.path.basename(self.options.with_chg)
3187 3184
3188 3185 # configure fallback and replace "hg" command by "rhg"
3189 3186 rhgbindir = self._bindir
3190 3187 if self.options.rhg or self.options.with_rhg:
3191 3188 # Affects hghave.py
3192 3189 osenvironb[b'RHG_INSTALLED_AS_HG'] = b'1'
3193 3190 # Affects configuration. Alternatives would be setting configuration through
3194 3191 # `$HGRCPATH` but some tests override that, or changing `_hgcommand` to include
3195 3192 # `--config` but that disrupts tests that print command lines and check expected
3196 3193 # output.
3197 3194 osenvironb[b'RHG_ON_UNSUPPORTED'] = b'fallback'
3198 3195 osenvironb[b'RHG_FALLBACK_EXECUTABLE'] = real_hg
3199 3196 else:
3200 3197 # drop flag for hghave
3201 3198 osenvironb.pop(b'RHG_INSTALLED_AS_HG', None)
3202 3199 if self.options.rhg:
3203 3200 self._hgcommand = b'rhg'
3204 3201 elif self.options.with_rhg:
3205 3202 rhgbindir = os.path.dirname(os.path.realpath(self.options.with_rhg))
3206 3203 self._hgcommand = os.path.basename(self.options.with_rhg)
3207 3204
3208 3205 if self.options.pyoxidized:
3209 3206 testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
3210 3207 reporootdir = os.path.dirname(testdir)
3211 3208 # XXX we should ideally install stuff instead of using the local build
3212 3209 bin_path = (
3213 3210 b'build/pyoxidizer/x86_64-pc-windows-msvc/release/app/hg.exe'
3214 3211 )
3215 3212 full_path = os.path.join(reporootdir, bin_path)
3216 3213 self._hgcommand = full_path
3217 3214 # Affects hghave.py
3218 3215 osenvironb[b'PYOXIDIZED_INSTALLED_AS_HG'] = b'1'
3219 3216 else:
3220 3217 osenvironb.pop(b'PYOXIDIZED_INSTALLED_AS_HG', None)
3221 3218
3222 3219 osenvironb[b"BINDIR"] = self._bindir
3223 3220 osenvironb[b"PYTHON"] = PYTHON
3224 3221
3225 3222 fileb = _sys2bytes(__file__)
3226 3223 runtestdir = os.path.abspath(os.path.dirname(fileb))
3227 3224 osenvironb[b'RUNTESTDIR'] = runtestdir
3228 3225 osenvironb[b'RUNTESTDIR_FORWARD_SLASH'] = runtestdir.replace(
3229 3226 os.sep.encode('ascii'), b'/'
3230 3227 )
3231 3228 sepb = _sys2bytes(os.pathsep)
3232 3229 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
3233 3230 if os.path.islink(__file__):
3234 3231 # test helper will likely be at the end of the symlink
3235 3232 realfile = os.path.realpath(fileb)
3236 3233 realdir = os.path.abspath(os.path.dirname(realfile))
3237 3234 path.insert(2, realdir)
3238 3235 if chgbindir != self._bindir:
3239 3236 path.insert(1, chgbindir)
3240 3237 if rhgbindir != self._bindir:
3241 3238 path.insert(1, rhgbindir)
3242 3239 if self._testdir != runtestdir:
3243 3240 path = [self._testdir] + path
3244 3241 path = [self._custom_bin_dir] + path
3245 3242 osenvironb[b"PATH"] = sepb.join(path)
3246 3243
3247 3244 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
3248 3245 # can run .../tests/run-tests.py test-foo where test-foo
3249 3246 # adds an extension to HGRC. Also include run-test.py directory to
3250 3247 # import modules like heredoctest.
3251 3248 pypath = [self._pythondir, self._testdir, runtestdir]
3252 3249 # We have to augment PYTHONPATH, rather than simply replacing
3253 3250 # it, in case external libraries are only available via current
3254 3251 # PYTHONPATH. (In particular, the Subversion bindings on OS X
3255 3252 # are in /opt/subversion.)
3256 3253 oldpypath = osenvironb.get(IMPL_PATH)
3257 3254 if oldpypath:
3258 3255 pypath.append(oldpypath)
3259 3256 osenvironb[IMPL_PATH] = sepb.join(pypath)
3260 3257
3261 3258 if self.options.pure:
3262 3259 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
3263 3260 os.environ["HGMODULEPOLICY"] = "py"
3264 3261 if self.options.rust:
3265 3262 os.environ["HGMODULEPOLICY"] = "rust+c"
3266 3263 if self.options.no_rust:
3267 3264 current_policy = os.environ.get("HGMODULEPOLICY", "")
3268 3265 if current_policy.startswith("rust+"):
3269 3266 os.environ["HGMODULEPOLICY"] = current_policy[len("rust+") :]
3270 3267 os.environ.pop("HGWITHRUSTEXT", None)
3271 3268
3272 3269 if self.options.allow_slow_tests:
3273 3270 os.environ["HGTEST_SLOW"] = "slow"
3274 3271 elif 'HGTEST_SLOW' in os.environ:
3275 3272 del os.environ['HGTEST_SLOW']
3276 3273
3277 3274 self._coveragefile = os.path.join(self._testdir, b'.coverage')
3278 3275
3279 3276 if self.options.exceptions:
3280 3277 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
3281 3278 try:
3282 3279 os.makedirs(exceptionsdir)
3283 3280 except OSError as e:
3284 3281 if e.errno != errno.EEXIST:
3285 3282 raise
3286 3283
3287 3284 # Remove all existing exception reports.
3288 3285 for f in os.listdir(exceptionsdir):
3289 3286 os.unlink(os.path.join(exceptionsdir, f))
3290 3287
3291 3288 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
3292 3289 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
3293 3290 self.options.extra_config_opt.append(
3294 3291 'extensions.logexceptions=%s' % logexceptions.decode('utf-8')
3295 3292 )
3296 3293
3297 3294 vlog("# Using TESTDIR", _bytes2sys(self._testdir))
3298 3295 vlog("# Using RUNTESTDIR", _bytes2sys(osenvironb[b'RUNTESTDIR']))
3299 3296 vlog("# Using HGTMP", _bytes2sys(self._hgtmp))
3300 3297 vlog("# Using PATH", os.environ["PATH"])
3301 3298 vlog(
3302 3299 "# Using",
3303 3300 _bytes2sys(IMPL_PATH),
3304 3301 _bytes2sys(osenvironb[IMPL_PATH]),
3305 3302 )
3306 3303 vlog("# Writing to directory", _bytes2sys(self._outputdir))
3307 3304
3308 3305 try:
3309 3306 return self._runtests(testdescs) or 0
3310 3307 finally:
3311 3308 time.sleep(0.1)
3312 3309 self._cleanup()
3313 3310
3314 3311 def findtests(self, args):
3315 3312 """Finds possible test files from arguments.
3316 3313
3317 3314 If you wish to inject custom tests into the test harness, this would
3318 3315 be a good function to monkeypatch or override in a derived class.
3319 3316 """
3320 3317 if not args:
3321 3318 if self.options.changed:
3322 3319 proc = Popen4(
3323 3320 b'hg st --rev "%s" -man0 .'
3324 3321 % _sys2bytes(self.options.changed),
3325 3322 None,
3326 3323 0,
3327 3324 )
3328 3325 stdout, stderr = proc.communicate()
3329 3326 args = stdout.strip(b'\0').split(b'\0')
3330 3327 else:
3331 3328 args = os.listdir(b'.')
3332 3329
3333 3330 expanded_args = []
3334 3331 for arg in args:
3335 3332 if os.path.isdir(arg):
3336 3333 if not arg.endswith(b'/'):
3337 3334 arg += b'/'
3338 3335 expanded_args.extend([arg + a for a in os.listdir(arg)])
3339 3336 else:
3340 3337 expanded_args.append(arg)
3341 3338 args = expanded_args
3342 3339
3343 3340 testcasepattern = re.compile(br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-.#]+))')
3344 3341 tests = []
3345 3342 for t in args:
3346 3343 case = []
3347 3344
3348 3345 if not (
3349 3346 os.path.basename(t).startswith(b'test-')
3350 3347 and (t.endswith(b'.py') or t.endswith(b'.t'))
3351 3348 ):
3352 3349
3353 3350 m = testcasepattern.match(os.path.basename(t))
3354 3351 if m is not None:
3355 3352 t_basename, casestr = m.groups()
3356 3353 t = os.path.join(os.path.dirname(t), t_basename)
3357 3354 if casestr:
3358 3355 case = casestr.split(b'#')
3359 3356 else:
3360 3357 continue
3361 3358
3362 3359 if t.endswith(b'.t'):
3363 3360 # .t file may contain multiple test cases
3364 3361 casedimensions = parsettestcases(t)
3365 3362 if casedimensions:
3366 3363 cases = []
3367 3364
3368 3365 def addcases(case, casedimensions):
3369 3366 if not casedimensions:
3370 3367 cases.append(case)
3371 3368 else:
3372 3369 for c in casedimensions[0]:
3373 3370 addcases(case + [c], casedimensions[1:])
3374 3371
3375 3372 addcases([], casedimensions)
3376 3373 if case and case in cases:
3377 3374 cases = [case]
3378 3375 elif case:
3379 3376 # Ignore invalid cases
3380 3377 cases = []
3381 3378 else:
3382 3379 pass
3383 3380 tests += [{'path': t, 'case': c} for c in sorted(cases)]
3384 3381 else:
3385 3382 tests.append({'path': t})
3386 3383 else:
3387 3384 tests.append({'path': t})
3388 3385
3389 3386 if self.options.retest:
3390 3387 retest_args = []
3391 3388 for test in tests:
3392 3389 errpath = self._geterrpath(test)
3393 3390 if os.path.exists(errpath):
3394 3391 retest_args.append(test)
3395 3392 tests = retest_args
3396 3393 return tests
3397 3394
3398 3395 def _runtests(self, testdescs):
3399 3396 def _reloadtest(test, i):
3400 3397 # convert a test back to its description dict
3401 3398 desc = {'path': test.path}
3402 3399 case = getattr(test, '_case', [])
3403 3400 if case:
3404 3401 desc['case'] = case
3405 3402 return self._gettest(desc, i)
3406 3403
3407 3404 try:
3408 3405 if self.options.restart:
3409 3406 orig = list(testdescs)
3410 3407 while testdescs:
3411 3408 desc = testdescs[0]
3412 3409 errpath = self._geterrpath(desc)
3413 3410 if os.path.exists(errpath):
3414 3411 break
3415 3412 testdescs.pop(0)
3416 3413 if not testdescs:
3417 3414 print("running all tests")
3418 3415 testdescs = orig
3419 3416
3420 3417 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
3421 3418 num_tests = len(tests) * self.options.runs_per_test
3422 3419
3423 3420 jobs = min(num_tests, self.options.jobs)
3424 3421
3425 3422 failed = False
3426 3423 kws = self.options.keywords
3427 3424 if kws is not None:
3428 3425 kws = kws.encode('utf-8')
3429 3426
3430 3427 suite = TestSuite(
3431 3428 self._testdir,
3432 3429 jobs=jobs,
3433 3430 whitelist=self.options.whitelisted,
3434 3431 blacklist=self.options.blacklist,
3435 3432 keywords=kws,
3436 3433 loop=self.options.loop,
3437 3434 runs_per_test=self.options.runs_per_test,
3438 3435 showchannels=self.options.showchannels,
3439 3436 tests=tests,
3440 3437 loadtest=_reloadtest,
3441 3438 )
3442 3439 verbosity = 1
3443 3440 if self.options.list_tests:
3444 3441 verbosity = 0
3445 3442 elif self.options.verbose:
3446 3443 verbosity = 2
3447 3444 runner = TextTestRunner(self, verbosity=verbosity)
3448 3445
3449 3446 if self.options.list_tests:
3450 3447 result = runner.listtests(suite)
3451 3448 else:
3452 3449 self._usecorrectpython()
3453 3450 if self._installdir:
3454 3451 self._installhg()
3455 3452 self._checkhglib("Testing")
3456 3453 if self.options.chg:
3457 3454 assert self._installdir
3458 3455 self._installchg()
3459 3456 if self.options.rhg:
3460 3457 assert self._installdir
3461 3458 self._installrhg()
3462 3459 elif self.options.pyoxidized:
3463 3460 self._build_pyoxidized()
3464 3461 self._use_correct_mercurial()
3465 3462
3466 3463 log(
3467 3464 'running %d tests using %d parallel processes'
3468 3465 % (num_tests, jobs)
3469 3466 )
3470 3467
3471 3468 result = runner.run(suite)
3472 3469
3473 3470 if result.failures or result.errors:
3474 3471 failed = True
3475 3472
3476 3473 result.onEnd()
3477 3474
3478 3475 if self.options.anycoverage:
3479 3476 self._outputcoverage()
3480 3477 except KeyboardInterrupt:
3481 3478 failed = True
3482 3479 print("\ninterrupted!")
3483 3480
3484 3481 if failed:
3485 3482 return 1
3486 3483
3487 3484 def _geterrpath(self, test):
3488 3485 # test['path'] is a relative path
3489 3486 if 'case' in test:
3490 3487 # for multiple dimensions test cases
3491 3488 casestr = b'#'.join(test['case'])
3492 3489 errpath = b'%s#%s.err' % (test['path'], casestr)
3493 3490 else:
3494 3491 errpath = b'%s.err' % test['path']
3495 3492 if self.options.outputdir:
3496 3493 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3497 3494 errpath = os.path.join(self._outputdir, errpath)
3498 3495 return errpath
3499 3496
3500 3497 def _getport(self, count):
3501 3498 port = self._ports.get(count) # do we have a cached entry?
3502 3499 if port is None:
3503 3500 portneeded = 3
3504 3501 # above 100 tries we just give up and let test reports failure
3505 for tries in xrange(100):
3502 for tries in range(100):
3506 3503 allfree = True
3507 3504 port = self.options.port + self._portoffset
3508 for idx in xrange(portneeded):
3505 for idx in range(portneeded):
3509 3506 if not checkportisavailable(port + idx):
3510 3507 allfree = False
3511 3508 break
3512 3509 self._portoffset += portneeded
3513 3510 if allfree:
3514 3511 break
3515 3512 self._ports[count] = port
3516 3513 return port
3517 3514
3518 3515 def _gettest(self, testdesc, count):
3519 3516 """Obtain a Test by looking at its filename.
3520 3517
3521 3518 Returns a Test instance. The Test may not be runnable if it doesn't
3522 3519 map to a known type.
3523 3520 """
3524 3521 path = testdesc['path']
3525 3522 lctest = path.lower()
3526 3523 testcls = Test
3527 3524
3528 3525 for ext, cls in self.TESTTYPES:
3529 3526 if lctest.endswith(ext):
3530 3527 testcls = cls
3531 3528 break
3532 3529
3533 3530 refpath = os.path.join(getcwdb(), path)
3534 3531 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
3535 3532
3536 3533 # extra keyword parameters. 'case' is used by .t tests
3537 3534 kwds = {k: testdesc[k] for k in ['case'] if k in testdesc}
3538 3535
3539 3536 t = testcls(
3540 3537 refpath,
3541 3538 self._outputdir,
3542 3539 tmpdir,
3543 3540 keeptmpdir=self.options.keep_tmpdir,
3544 3541 debug=self.options.debug,
3545 3542 first=self.options.first,
3546 3543 timeout=self.options.timeout,
3547 3544 startport=self._getport(count),
3548 3545 extraconfigopts=self.options.extra_config_opt,
3549 3546 shell=self.options.shell,
3550 3547 hgcommand=self._hgcommand,
3551 3548 usechg=bool(self.options.with_chg or self.options.chg),
3552 3549 chgdebug=self.options.chg_debug,
3553 3550 useipv6=useipv6,
3554 3551 **kwds
3555 3552 )
3556 3553 t.should_reload = True
3557 3554 return t
3558 3555
3559 3556 def _cleanup(self):
3560 3557 """Clean up state from this test invocation."""
3561 3558 if self.options.keep_tmpdir:
3562 3559 return
3563 3560
3564 3561 vlog("# Cleaning up HGTMP", _bytes2sys(self._hgtmp))
3565 3562 shutil.rmtree(self._hgtmp, True)
3566 3563 for f in self._createdfiles:
3567 3564 try:
3568 3565 os.remove(f)
3569 3566 except OSError:
3570 3567 pass
3571 3568
3572 3569 def _usecorrectpython(self):
3573 3570 """Configure the environment to use the appropriate Python in tests."""
3574 3571 # Tests must use the same interpreter as us or bad things will happen.
3575 3572 if WINDOWS:
3576 3573 pyexe_names = [b'python', b'python3', b'python.exe']
3577 3574 else:
3578 3575 pyexe_names = [b'python', b'python3']
3579 3576
3580 3577 # os.symlink() is a thing with py3 on Windows, but it requires
3581 3578 # Administrator rights.
3582 3579 if not WINDOWS and getattr(os, 'symlink', None):
3583 3580 msg = "# Making python executable in test path a symlink to '%s'"
3584 3581 msg %= sysexecutable
3585 3582 vlog(msg)
3586 3583 for pyexename in pyexe_names:
3587 3584 mypython = os.path.join(self._custom_bin_dir, pyexename)
3588 3585 try:
3589 3586 if os.readlink(mypython) == sysexecutable:
3590 3587 continue
3591 3588 os.unlink(mypython)
3592 3589 except OSError as err:
3593 3590 if err.errno != errno.ENOENT:
3594 3591 raise
3595 3592 if self._findprogram(pyexename) != sysexecutable:
3596 3593 try:
3597 3594 os.symlink(sysexecutable, mypython)
3598 3595 self._createdfiles.append(mypython)
3599 3596 except OSError as err:
3600 3597 # child processes may race, which is harmless
3601 3598 if err.errno != errno.EEXIST:
3602 3599 raise
3603 3600 elif WINDOWS and not os.getenv('MSYSTEM'):
3604 3601 raise AssertionError('cannot run test on Windows without MSYSTEM')
3605 3602 else:
3606 3603 # Generate explicit file instead of symlink
3607 3604 #
3608 3605 # This is especially important as Windows doesn't have
3609 3606 # `python3.exe`, and MSYS cannot understand the reparse point with
3610 3607 # that name provided by Microsoft. Create a simple script on PATH
3611 3608 # with that name that delegates to the py3 launcher so the shebang
3612 3609 # lines work.
3613 3610 esc_executable = _sys2bytes(shellquote(sysexecutable))
3614 3611 for pyexename in pyexe_names:
3615 3612 stub_exec_path = os.path.join(self._custom_bin_dir, pyexename)
3616 3613 with open(stub_exec_path, 'wb') as f:
3617 3614 f.write(b'#!/bin/sh\n')
3618 3615 f.write(b'%s "$@"\n' % esc_executable)
3619 3616
3620 3617 if WINDOWS:
3621 3618 # adjust the path to make sur the main python finds it own dll
3622 3619 path = os.environ['PATH'].split(os.pathsep)
3623 3620 main_exec_dir = os.path.dirname(sysexecutable)
3624 3621 extra_paths = [_bytes2sys(self._custom_bin_dir), main_exec_dir]
3625 3622
3626 3623 # Binaries installed by pip into the user area like pylint.exe may
3627 3624 # not be in PATH by default.
3628 3625 appdata = os.environ.get('APPDATA')
3629 3626 vi = sys.version_info
3630 3627 if appdata is not None:
3631 3628 python_dir = 'Python%d%d' % (vi[0], vi[1])
3632 3629 scripts_path = [appdata, 'Python', python_dir, 'Scripts']
3633 3630 scripts_dir = os.path.join(*scripts_path)
3634 3631 extra_paths.append(scripts_dir)
3635 3632
3636 3633 os.environ['PATH'] = os.pathsep.join(extra_paths + path)
3637 3634
3638 3635 def _use_correct_mercurial(self):
3639 3636 target_exec = os.path.join(self._custom_bin_dir, b'hg')
3640 3637 if self._hgcommand != b'hg':
3641 3638 # shutil.which only accept bytes from 3.8
3642 3639 real_exec = which(self._hgcommand)
3643 3640 if real_exec is None:
3644 3641 raise ValueError('could not find exec path for "%s"', real_exec)
3645 3642 if real_exec == target_exec:
3646 3643 # do not overwrite something with itself
3647 3644 return
3648 3645 if WINDOWS:
3649 3646 with open(target_exec, 'wb') as f:
3650 3647 f.write(b'#!/bin/sh\n')
3651 3648 escaped_exec = shellquote(_bytes2sys(real_exec))
3652 3649 f.write(b'%s "$@"\n' % _sys2bytes(escaped_exec))
3653 3650 else:
3654 3651 os.symlink(real_exec, target_exec)
3655 3652 self._createdfiles.append(target_exec)
3656 3653
3657 3654 def _installhg(self):
3658 3655 """Install hg into the test environment.
3659 3656
3660 3657 This will also configure hg with the appropriate testing settings.
3661 3658 """
3662 3659 vlog("# Performing temporary installation of HG")
3663 3660 installerrs = os.path.join(self._hgtmp, b"install.err")
3664 3661 compiler = ''
3665 3662 if self.options.compiler:
3666 3663 compiler = '--compiler ' + self.options.compiler
3667 3664 setup_opts = b""
3668 3665 if self.options.pure:
3669 3666 setup_opts = b"--pure"
3670 3667 elif self.options.rust:
3671 3668 setup_opts = b"--rust"
3672 3669 elif self.options.no_rust:
3673 3670 setup_opts = b"--no-rust"
3674 3671
3675 3672 # Run installer in hg root
3676 3673 compiler = _sys2bytes(compiler)
3677 3674 script = _sys2bytes(os.path.realpath(sys.argv[0]))
3678 3675 exe = _sys2bytes(sysexecutable)
3679 3676 hgroot = os.path.dirname(os.path.dirname(script))
3680 3677 self._hgroot = hgroot
3681 3678 os.chdir(hgroot)
3682 3679 nohome = b'--home=""'
3683 3680 if WINDOWS:
3684 3681 # The --home="" trick works only on OS where os.sep == '/'
3685 3682 # because of a distutils convert_path() fast-path. Avoid it at
3686 3683 # least on Windows for now, deal with .pydistutils.cfg bugs
3687 3684 # when they happen.
3688 3685 nohome = b''
3689 3686 cmd = (
3690 3687 b'"%(exe)s" setup.py %(setup_opts)s clean --all'
3691 3688 b' build %(compiler)s --build-base="%(base)s"'
3692 3689 b' install --force --prefix="%(prefix)s"'
3693 3690 b' --install-lib="%(libdir)s"'
3694 3691 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
3695 3692 % {
3696 3693 b'exe': exe,
3697 3694 b'setup_opts': setup_opts,
3698 3695 b'compiler': compiler,
3699 3696 b'base': os.path.join(self._hgtmp, b"build"),
3700 3697 b'prefix': self._installdir,
3701 3698 b'libdir': self._pythondir,
3702 3699 b'bindir': self._bindir,
3703 3700 b'nohome': nohome,
3704 3701 b'logfile': installerrs,
3705 3702 }
3706 3703 )
3707 3704
3708 3705 # setuptools requires install directories to exist.
3709 3706 def makedirs(p):
3710 3707 try:
3711 3708 os.makedirs(p)
3712 3709 except OSError as e:
3713 3710 if e.errno != errno.EEXIST:
3714 3711 raise
3715 3712
3716 3713 makedirs(self._pythondir)
3717 3714 makedirs(self._bindir)
3718 3715
3719 3716 vlog("# Running", cmd.decode("utf-8"))
3720 3717 if subprocess.call(_bytes2sys(cmd), shell=True) == 0:
3721 3718 if not self.options.verbose:
3722 3719 try:
3723 3720 os.remove(installerrs)
3724 3721 except OSError as e:
3725 3722 if e.errno != errno.ENOENT:
3726 3723 raise
3727 3724 else:
3728 3725 with open(installerrs, 'rb') as f:
3729 3726 for line in f:
3730 3727 sys.stdout.buffer.write(line)
3731 3728 sys.exit(1)
3732 3729 os.chdir(self._testdir)
3733 3730
3734 3731 hgbat = os.path.join(self._bindir, b'hg.bat')
3735 3732 if os.path.isfile(hgbat):
3736 3733 # hg.bat expects to be put in bin/scripts while run-tests.py
3737 3734 # installation layout put it in bin/ directly. Fix it
3738 3735 with open(hgbat, 'rb') as f:
3739 3736 data = f.read()
3740 3737 if br'"%~dp0..\python" "%~dp0hg" %*' in data:
3741 3738 data = data.replace(
3742 3739 br'"%~dp0..\python" "%~dp0hg" %*',
3743 3740 b'"%~dp0python" "%~dp0hg" %*',
3744 3741 )
3745 3742 with open(hgbat, 'wb') as f:
3746 3743 f.write(data)
3747 3744 else:
3748 3745 print('WARNING: cannot fix hg.bat reference to python.exe')
3749 3746
3750 3747 if self.options.anycoverage:
3751 3748 custom = os.path.join(
3752 3749 osenvironb[b'RUNTESTDIR'], b'sitecustomize.py'
3753 3750 )
3754 3751 target = os.path.join(self._pythondir, b'sitecustomize.py')
3755 3752 vlog('# Installing coverage trigger to %s' % target)
3756 3753 shutil.copyfile(custom, target)
3757 3754 rc = os.path.join(self._testdir, b'.coveragerc')
3758 3755 vlog('# Installing coverage rc to %s' % rc)
3759 3756 osenvironb[b'COVERAGE_PROCESS_START'] = rc
3760 3757 covdir = os.path.join(self._installdir, b'..', b'coverage')
3761 3758 try:
3762 3759 os.mkdir(covdir)
3763 3760 except OSError as e:
3764 3761 if e.errno != errno.EEXIST:
3765 3762 raise
3766 3763
3767 3764 osenvironb[b'COVERAGE_DIR'] = covdir
3768 3765
3769 3766 def _checkhglib(self, verb):
3770 3767 """Ensure that the 'mercurial' package imported by python is
3771 3768 the one we expect it to be. If not, print a warning to stderr."""
3772 3769 if self._pythondir_inferred:
3773 3770 # The pythondir has been inferred from --with-hg flag.
3774 3771 # We cannot expect anything sensible here.
3775 3772 return
3776 3773 expecthg = os.path.join(self._pythondir, b'mercurial')
3777 3774 actualhg = self._gethgpath()
3778 3775 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3779 3776 sys.stderr.write(
3780 3777 'warning: %s with unexpected mercurial lib: %s\n'
3781 3778 ' (expected %s)\n' % (verb, actualhg, expecthg)
3782 3779 )
3783 3780
3784 3781 def _gethgpath(self):
3785 3782 """Return the path to the mercurial package that is actually found by
3786 3783 the current Python interpreter."""
3787 3784 if self._hgpath is not None:
3788 3785 return self._hgpath
3789 3786
3790 3787 cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
3791 3788 cmd = _bytes2sys(cmd % PYTHON)
3792 3789
3793 3790 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
3794 3791 out, err = p.communicate()
3795 3792
3796 3793 self._hgpath = out.strip()
3797 3794
3798 3795 return self._hgpath
3799 3796
3800 3797 def _installchg(self):
3801 3798 """Install chg into the test environment"""
3802 3799 vlog('# Performing temporary installation of CHG')
3803 3800 assert os.path.dirname(self._bindir) == self._installdir
3804 3801 assert self._hgroot, 'must be called after _installhg()'
3805 3802 cmd = b'"%(make)s" clean install PREFIX="%(prefix)s"' % {
3806 3803 b'make': b'make', # TODO: switch by option or environment?
3807 3804 b'prefix': self._installdir,
3808 3805 }
3809 3806 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3810 3807 vlog("# Running", cmd)
3811 3808 proc = subprocess.Popen(
3812 3809 cmd,
3813 3810 shell=True,
3814 3811 cwd=cwd,
3815 3812 stdin=subprocess.PIPE,
3816 3813 stdout=subprocess.PIPE,
3817 3814 stderr=subprocess.STDOUT,
3818 3815 )
3819 3816 out, _err = proc.communicate()
3820 3817 if proc.returncode != 0:
3821 3818 sys.stdout.buffer.write(out)
3822 3819 sys.exit(1)
3823 3820
3824 3821 def _installrhg(self):
3825 3822 """Install rhg into the test environment"""
3826 3823 vlog('# Performing temporary installation of rhg')
3827 3824 assert os.path.dirname(self._bindir) == self._installdir
3828 3825 assert self._hgroot, 'must be called after _installhg()'
3829 3826 cmd = b'"%(make)s" install-rhg PREFIX="%(prefix)s"' % {
3830 3827 b'make': b'make', # TODO: switch by option or environment?
3831 3828 b'prefix': self._installdir,
3832 3829 }
3833 3830 cwd = self._hgroot
3834 3831 vlog("# Running", cmd)
3835 3832 proc = subprocess.Popen(
3836 3833 cmd,
3837 3834 shell=True,
3838 3835 cwd=cwd,
3839 3836 stdin=subprocess.PIPE,
3840 3837 stdout=subprocess.PIPE,
3841 3838 stderr=subprocess.STDOUT,
3842 3839 )
3843 3840 out, _err = proc.communicate()
3844 3841 if proc.returncode != 0:
3845 3842 sys.stdout.buffer.write(out)
3846 3843 sys.exit(1)
3847 3844
3848 3845 def _build_pyoxidized(self):
3849 3846 """build a pyoxidized version of mercurial into the test environment
3850 3847
3851 3848 Ideally this function would be `install_pyoxidier` and would both build
3852 3849 and install pyoxidier. However we are starting small to get pyoxidizer
3853 3850 build binary to testing quickly.
3854 3851 """
3855 3852 vlog('# build a pyoxidized version of Mercurial')
3856 3853 assert os.path.dirname(self._bindir) == self._installdir
3857 3854 assert self._hgroot, 'must be called after _installhg()'
3858 3855 cmd = b'"%(make)s" pyoxidizer-windows-tests' % {
3859 3856 b'make': b'make',
3860 3857 }
3861 3858 cwd = self._hgroot
3862 3859 vlog("# Running", cmd)
3863 3860 proc = subprocess.Popen(
3864 3861 _bytes2sys(cmd),
3865 3862 shell=True,
3866 3863 cwd=_bytes2sys(cwd),
3867 3864 stdin=subprocess.PIPE,
3868 3865 stdout=subprocess.PIPE,
3869 3866 stderr=subprocess.STDOUT,
3870 3867 )
3871 3868 out, _err = proc.communicate()
3872 3869 if proc.returncode != 0:
3873 3870 sys.stdout.buffer.write(out)
3874 3871 sys.exit(1)
3875 3872
3876 3873 def _outputcoverage(self):
3877 3874 """Produce code coverage output."""
3878 3875 import coverage
3879 3876
3880 3877 coverage = coverage.coverage
3881 3878
3882 3879 vlog('# Producing coverage report')
3883 3880 # chdir is the easiest way to get short, relative paths in the
3884 3881 # output.
3885 3882 os.chdir(self._hgroot)
3886 3883 covdir = os.path.join(_bytes2sys(self._installdir), '..', 'coverage')
3887 3884 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3888 3885
3889 3886 # Map install directory paths back to source directory.
3890 3887 cov.config.paths['srcdir'] = ['.', _bytes2sys(self._pythondir)]
3891 3888
3892 3889 cov.combine()
3893 3890
3894 3891 omit = [
3895 3892 _bytes2sys(os.path.join(x, b'*'))
3896 3893 for x in [self._bindir, self._testdir]
3897 3894 ]
3898 3895 cov.report(ignore_errors=True, omit=omit)
3899 3896
3900 3897 if self.options.htmlcov:
3901 3898 htmldir = os.path.join(_bytes2sys(self._outputdir), 'htmlcov')
3902 3899 cov.html_report(directory=htmldir, omit=omit)
3903 3900 if self.options.annotate:
3904 3901 adir = os.path.join(_bytes2sys(self._outputdir), 'annotated')
3905 3902 if not os.path.isdir(adir):
3906 3903 os.mkdir(adir)
3907 3904 cov.annotate(directory=adir, omit=omit)
3908 3905
3909 3906 def _findprogram(self, program):
3910 3907 """Search PATH for a executable program"""
3911 3908 dpb = _sys2bytes(os.defpath)
3912 3909 sepb = _sys2bytes(os.pathsep)
3913 3910 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3914 3911 name = os.path.join(p, program)
3915 3912 if WINDOWS or os.access(name, os.X_OK):
3916 3913 return _bytes2sys(name)
3917 3914 return None
3918 3915
3919 3916 def _checktools(self):
3920 3917 """Ensure tools required to run tests are present."""
3921 3918 for p in self.REQUIREDTOOLS:
3922 3919 if WINDOWS and not p.endswith(b'.exe'):
3923 3920 p += b'.exe'
3924 3921 found = self._findprogram(p)
3925 3922 p = p.decode("utf-8")
3926 3923 if found:
3927 3924 vlog("# Found prerequisite", p, "at", found)
3928 3925 else:
3929 3926 print("WARNING: Did not find prerequisite tool: %s " % p)
3930 3927
3931 3928
3932 3929 def aggregateexceptions(path):
3933 3930 exceptioncounts = collections.Counter()
3934 3931 testsbyfailure = collections.defaultdict(set)
3935 3932 failuresbytest = collections.defaultdict(set)
3936 3933
3937 3934 for f in os.listdir(path):
3938 3935 with open(os.path.join(path, f), 'rb') as fh:
3939 3936 data = fh.read().split(b'\0')
3940 3937 if len(data) != 5:
3941 3938 continue
3942 3939
3943 3940 exc, mainframe, hgframe, hgline, testname = data
3944 3941 exc = exc.decode('utf-8')
3945 3942 mainframe = mainframe.decode('utf-8')
3946 3943 hgframe = hgframe.decode('utf-8')
3947 3944 hgline = hgline.decode('utf-8')
3948 3945 testname = testname.decode('utf-8')
3949 3946
3950 3947 key = (hgframe, hgline, exc)
3951 3948 exceptioncounts[key] += 1
3952 3949 testsbyfailure[key].add(testname)
3953 3950 failuresbytest[testname].add(key)
3954 3951
3955 3952 # Find test having fewest failures for each failure.
3956 3953 leastfailing = {}
3957 3954 for key, tests in testsbyfailure.items():
3958 3955 fewesttest = None
3959 3956 fewestcount = 99999999
3960 3957 for test in sorted(tests):
3961 3958 if len(failuresbytest[test]) < fewestcount:
3962 3959 fewesttest = test
3963 3960 fewestcount = len(failuresbytest[test])
3964 3961
3965 3962 leastfailing[key] = (fewestcount, fewesttest)
3966 3963
3967 3964 # Create a combined counter so we can sort by total occurrences and
3968 3965 # impacted tests.
3969 3966 combined = {}
3970 3967 for key in exceptioncounts:
3971 3968 combined[key] = (
3972 3969 exceptioncounts[key],
3973 3970 len(testsbyfailure[key]),
3974 3971 leastfailing[key][0],
3975 3972 leastfailing[key][1],
3976 3973 )
3977 3974
3978 3975 return {
3979 3976 'exceptioncounts': exceptioncounts,
3980 3977 'total': sum(exceptioncounts.values()),
3981 3978 'combined': combined,
3982 3979 'leastfailing': leastfailing,
3983 3980 'byfailure': testsbyfailure,
3984 3981 'bytest': failuresbytest,
3985 3982 }
3986 3983
3987 3984
3988 3985 if __name__ == '__main__':
3989 3986 if WINDOWS and not os.getenv('MSYSTEM'):
3990 3987 print('cannot run test on Windows without MSYSTEM', file=sys.stderr)
3991 3988 print(
3992 3989 '(if you need to do so contact the mercurial devs: '
3993 3990 'mercurial@mercurial-scm.org)',
3994 3991 file=sys.stderr,
3995 3992 )
3996 3993 sys.exit(255)
3997 3994
3998 3995 runner = TestRunner()
3999 3996
4000 3997 try:
4001 3998 import msvcrt
4002 3999
4003 4000 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
4004 4001 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
4005 4002 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
4006 4003 except ImportError:
4007 4004 pass
4008 4005
4009 4006 sys.exit(runner.run(sys.argv[1:]))
@@ -1,36 +1,33
1 1 #!/usr/bin/env python3
2 2 #
3 3 # A portable replacement for 'seq'
4 4 #
5 5 # Usage:
6 6 # seq STOP [1, STOP] stepping by 1
7 7 # seq START STOP [START, STOP] stepping by 1
8 8 # seq START STEP STOP [START, STOP] stepping by STEP
9 9
10 10 import os
11 11 import sys
12 12
13 13 try:
14 14 import msvcrt
15 15
16 16 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
17 17 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
18 18 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
19 19 except ImportError:
20 20 pass
21 21
22 if sys.version_info[0] >= 3:
23 xrange = range
24
25 22 start = 1
26 23 if len(sys.argv) > 2:
27 24 start = int(sys.argv[1])
28 25
29 26 step = 1
30 27 if len(sys.argv) > 3:
31 28 step = int(sys.argv[2])
32 29
33 30 stop = int(sys.argv[-1]) + 1
34 31
35 for i in xrange(start, stop, step):
32 for i in range(start, stop, step):
36 33 print(i)
@@ -1,471 +1,470
1 1 import binascii
2 2 import getopt
3 3 import math
4 4 import os
5 5 import random
6 6 import sys
7 7 import time
8 8
9 9 from mercurial.node import nullrev
10 10 from mercurial import (
11 11 ancestor,
12 12 debugcommands,
13 13 hg,
14 14 pycompat,
15 15 ui as uimod,
16 16 util,
17 17 )
18 18
19 19 if pycompat.ispy3:
20 20 long = int
21 xrange = range
22 21
23 22
24 23 def buildgraph(rng, nodes=100, rootprob=0.05, mergeprob=0.2, prevprob=0.7):
25 24 """nodes: total number of nodes in the graph
26 25 rootprob: probability that a new node (not 0) will be a root
27 26 mergeprob: probability that, excluding a root a node will be a merge
28 27 prevprob: probability that p1 will be the previous node
29 28
30 29 return value is a graph represented as an adjacency list.
31 30 """
32 31 graph = [None] * nodes
33 for i in xrange(nodes):
32 for i in range(nodes):
34 33 if i == 0 or rng.random() < rootprob:
35 34 graph[i] = [nullrev]
36 35 elif i == 1:
37 36 graph[i] = [0]
38 37 elif rng.random() < mergeprob:
39 38 if i == 2 or rng.random() < prevprob:
40 39 # p1 is prev
41 40 p1 = i - 1
42 41 else:
43 42 p1 = rng.randrange(i - 1)
44 43 p2 = rng.choice(list(range(0, p1)) + list(range(p1 + 1, i)))
45 44 graph[i] = [p1, p2]
46 45 elif rng.random() < prevprob:
47 46 graph[i] = [i - 1]
48 47 else:
49 48 graph[i] = [rng.randrange(i - 1)]
50 49
51 50 return graph
52 51
53 52
54 53 def buildancestorsets(graph):
55 54 ancs = [None] * len(graph)
56 for i in xrange(len(graph)):
55 for i in range(len(graph)):
57 56 ancs[i] = {i}
58 57 if graph[i] == [nullrev]:
59 58 continue
60 59 for p in graph[i]:
61 60 ancs[i].update(ancs[p])
62 61 return ancs
63 62
64 63
65 64 class naiveincrementalmissingancestors:
66 65 def __init__(self, ancs, bases):
67 66 self.ancs = ancs
68 67 self.bases = set(bases)
69 68
70 69 def addbases(self, newbases):
71 70 self.bases.update(newbases)
72 71
73 72 def removeancestorsfrom(self, revs):
74 73 for base in self.bases:
75 74 if base != nullrev:
76 75 revs.difference_update(self.ancs[base])
77 76 revs.discard(nullrev)
78 77
79 78 def missingancestors(self, revs):
80 79 res = set()
81 80 for rev in revs:
82 81 if rev != nullrev:
83 82 res.update(self.ancs[rev])
84 83 for base in self.bases:
85 84 if base != nullrev:
86 85 res.difference_update(self.ancs[base])
87 86 return sorted(res)
88 87
89 88
90 89 def test_missingancestors(seed, rng):
91 90 # empirically observed to take around 1 second
92 91 graphcount = 100
93 92 testcount = 10
94 93 inccount = 10
95 94 nerrs = [0]
96 95 # the default mu and sigma give us a nice distribution of mostly
97 96 # single-digit counts (including 0) with some higher ones
98 97 def lognormrandom(mu, sigma):
99 98 return int(math.floor(rng.lognormvariate(mu, sigma)))
100 99
101 100 def samplerevs(nodes, mu=1.1, sigma=0.8):
102 101 count = min(lognormrandom(mu, sigma), len(nodes))
103 102 return rng.sample(nodes, count)
104 103
105 104 def err(seed, graph, bases, seq, output, expected):
106 105 if nerrs[0] == 0:
107 106 print('seed:', hex(seed)[:-1], file=sys.stderr)
108 107 if gerrs[0] == 0:
109 108 print('graph:', graph, file=sys.stderr)
110 109 print('* bases:', bases, file=sys.stderr)
111 110 print('* seq: ', seq, file=sys.stderr)
112 111 print('* output: ', output, file=sys.stderr)
113 112 print('* expected:', expected, file=sys.stderr)
114 113 nerrs[0] += 1
115 114 gerrs[0] += 1
116 115
117 for g in xrange(graphcount):
116 for g in range(graphcount):
118 117 graph = buildgraph(rng)
119 118 ancs = buildancestorsets(graph)
120 119 gerrs = [0]
121 for _ in xrange(testcount):
120 for _ in range(testcount):
122 121 # start from nullrev to include it as a possibility
123 122 graphnodes = range(nullrev, len(graph))
124 123 bases = samplerevs(graphnodes)
125 124
126 125 # fast algorithm
127 126 inc = ancestor.incrementalmissingancestors(graph.__getitem__, bases)
128 127 # reference slow algorithm
129 128 naiveinc = naiveincrementalmissingancestors(ancs, bases)
130 129 seq = []
131 for _ in xrange(inccount):
130 for _ in range(inccount):
132 131 if rng.random() < 0.2:
133 132 newbases = samplerevs(graphnodes)
134 133 seq.append(('addbases', newbases))
135 134 inc.addbases(newbases)
136 135 naiveinc.addbases(newbases)
137 136 if rng.random() < 0.4:
138 137 # larger set so that there are more revs to remove from
139 138 revs = samplerevs(graphnodes, mu=1.5)
140 139 seq.append(('removeancestorsfrom', revs))
141 140 hrevs = set(revs)
142 141 rrevs = set(revs)
143 142 inc.removeancestorsfrom(hrevs)
144 143 naiveinc.removeancestorsfrom(rrevs)
145 144 if hrevs != rrevs:
146 145 err(
147 146 seed,
148 147 graph,
149 148 bases,
150 149 seq,
151 150 sorted(hrevs),
152 151 sorted(rrevs),
153 152 )
154 153 else:
155 154 revs = samplerevs(graphnodes)
156 155 seq.append(('missingancestors', revs))
157 156 h = inc.missingancestors(revs)
158 157 r = naiveinc.missingancestors(revs)
159 158 if h != r:
160 159 err(seed, graph, bases, seq, h, r)
161 160
162 161
163 162 # graph is a dict of child->parent adjacency lists for this graph:
164 163 # o 13
165 164 # |
166 165 # | o 12
167 166 # | |
168 167 # | | o 11
169 168 # | | |\
170 169 # | | | | o 10
171 170 # | | | | |
172 171 # | o---+ | 9
173 172 # | | | | |
174 173 # o | | | | 8
175 174 # / / / /
176 175 # | | o | 7
177 176 # | | | |
178 177 # o---+ | 6
179 178 # / / /
180 179 # | | o 5
181 180 # | |/
182 181 # | o 4
183 182 # | |
184 183 # o | 3
185 184 # | |
186 185 # | o 2
187 186 # |/
188 187 # o 1
189 188 # |
190 189 # o 0
191 190
192 191 graph = {
193 192 0: [-1, -1],
194 193 1: [0, -1],
195 194 2: [1, -1],
196 195 3: [1, -1],
197 196 4: [2, -1],
198 197 5: [4, -1],
199 198 6: [4, -1],
200 199 7: [4, -1],
201 200 8: [-1, -1],
202 201 9: [6, 7],
203 202 10: [5, -1],
204 203 11: [3, 7],
205 204 12: [9, -1],
206 205 13: [8, -1],
207 206 }
208 207
209 208
210 209 def test_missingancestors_explicit():
211 210 """A few explicit cases, easier to check for catching errors in refactors.
212 211
213 212 The bigger graph at the end has been produced by the random generator
214 213 above, and we have some evidence that the other tests don't cover it.
215 214 """
216 215 for i, (bases, revs) in enumerate(
217 216 (
218 ({1, 2, 3, 4, 7}, set(xrange(10))),
217 ({1, 2, 3, 4, 7}, set(range(10))),
219 218 ({10}, set({11, 12, 13, 14})),
220 219 ({7}, set({1, 2, 3, 4, 5})),
221 220 )
222 221 ):
223 222 print("%% removeancestorsfrom(), example %d" % (i + 1))
224 223 missanc = ancestor.incrementalmissingancestors(graph.get, bases)
225 224 missanc.removeancestorsfrom(revs)
226 225 print("remaining (sorted): %s" % sorted(list(revs)))
227 226
228 227 for i, (bases, revs) in enumerate(
229 228 (
230 229 ({10}, {11}),
231 230 ({11}, {10}),
232 231 ({7}, {9, 11}),
233 232 )
234 233 ):
235 234 print("%% missingancestors(), example %d" % (i + 1))
236 235 missanc = ancestor.incrementalmissingancestors(graph.get, bases)
237 236 print("return %s" % missanc.missingancestors(revs))
238 237
239 238 print("% removeancestorsfrom(), bigger graph")
240 239 vecgraph = [
241 240 [-1, -1],
242 241 [0, -1],
243 242 [1, 0],
244 243 [2, 1],
245 244 [3, -1],
246 245 [4, -1],
247 246 [5, 1],
248 247 [2, -1],
249 248 [7, -1],
250 249 [8, -1],
251 250 [9, -1],
252 251 [10, 1],
253 252 [3, -1],
254 253 [12, -1],
255 254 [13, -1],
256 255 [14, -1],
257 256 [4, -1],
258 257 [16, -1],
259 258 [17, -1],
260 259 [18, -1],
261 260 [19, 11],
262 261 [20, -1],
263 262 [21, -1],
264 263 [22, -1],
265 264 [23, -1],
266 265 [2, -1],
267 266 [3, -1],
268 267 [26, 24],
269 268 [27, -1],
270 269 [28, -1],
271 270 [12, -1],
272 271 [1, -1],
273 272 [1, 9],
274 273 [32, -1],
275 274 [33, -1],
276 275 [34, 31],
277 276 [35, -1],
278 277 [36, 26],
279 278 [37, -1],
280 279 [38, -1],
281 280 [39, -1],
282 281 [40, -1],
283 282 [41, -1],
284 283 [42, 26],
285 284 [0, -1],
286 285 [44, -1],
287 286 [45, 4],
288 287 [40, -1],
289 288 [47, -1],
290 289 [36, 0],
291 290 [49, -1],
292 291 [-1, -1],
293 292 [51, -1],
294 293 [52, -1],
295 294 [53, -1],
296 295 [14, -1],
297 296 [55, -1],
298 297 [15, -1],
299 298 [23, -1],
300 299 [58, -1],
301 300 [59, -1],
302 301 [2, -1],
303 302 [61, 59],
304 303 [62, -1],
305 304 [63, -1],
306 305 [-1, -1],
307 306 [65, -1],
308 307 [66, -1],
309 308 [67, -1],
310 309 [68, -1],
311 310 [37, 28],
312 311 [69, 25],
313 312 [71, -1],
314 313 [72, -1],
315 314 [50, 2],
316 315 [74, -1],
317 316 [12, -1],
318 317 [18, -1],
319 318 [77, -1],
320 319 [78, -1],
321 320 [79, -1],
322 321 [43, 33],
323 322 [81, -1],
324 323 [82, -1],
325 324 [83, -1],
326 325 [84, 45],
327 326 [85, -1],
328 327 [86, -1],
329 328 [-1, -1],
330 329 [88, -1],
331 330 [-1, -1],
332 331 [76, 83],
333 332 [44, -1],
334 333 [92, -1],
335 334 [93, -1],
336 335 [9, -1],
337 336 [95, 67],
338 337 [96, -1],
339 338 [97, -1],
340 339 [-1, -1],
341 340 ]
342 341 problem_rev = 28
343 342 problem_base = 70
344 343 # problem_rev is a parent of problem_base, but a faulty implementation
345 344 # could forget to remove it.
346 345 bases = {60, 26, 70, 3, 96, 19, 98, 49, 97, 47, 1, 6}
347 346 if problem_rev not in vecgraph[problem_base] or problem_base not in bases:
348 347 print("Conditions have changed")
349 348 missanc = ancestor.incrementalmissingancestors(vecgraph.__getitem__, bases)
350 349 revs = {4, 12, 41, 28, 68, 38, 1, 30, 56, 44}
351 350 missanc.removeancestorsfrom(revs)
352 351 if 28 in revs:
353 352 print("Failed!")
354 353 else:
355 354 print("Ok")
356 355
357 356
358 357 def genlazyancestors(revs, stoprev=0, inclusive=False):
359 358 print(
360 359 (
361 360 "%% lazy ancestor set for %s, stoprev = %s, inclusive = %s"
362 361 % (revs, stoprev, inclusive)
363 362 )
364 363 )
365 364 return ancestor.lazyancestors(
366 365 graph.get, revs, stoprev=stoprev, inclusive=inclusive
367 366 )
368 367
369 368
370 369 def printlazyancestors(s, l):
371 370 print('membership: %r' % [n for n in l if n in s])
372 371 print('iteration: %r' % list(s))
373 372
374 373
375 374 def test_lazyancestors():
376 375 # Empty revs
377 376 s = genlazyancestors([])
378 377 printlazyancestors(s, [3, 0, -1])
379 378
380 379 # Standard example
381 380 s = genlazyancestors([11, 13])
382 381 printlazyancestors(s, [11, 13, 7, 9, 8, 3, 6, 4, 1, -1, 0])
383 382
384 383 # Standard with ancestry in the initial set (1 is ancestor of 3)
385 384 s = genlazyancestors([1, 3])
386 385 printlazyancestors(s, [1, -1, 0])
387 386
388 387 # Including revs
389 388 s = genlazyancestors([11, 13], inclusive=True)
390 389 printlazyancestors(s, [11, 13, 7, 9, 8, 3, 6, 4, 1, -1, 0])
391 390
392 391 # Test with stoprev
393 392 s = genlazyancestors([11, 13], stoprev=6)
394 393 printlazyancestors(s, [11, 13, 7, 9, 8, 3, 6, 4, 1, -1, 0])
395 394 s = genlazyancestors([11, 13], stoprev=6, inclusive=True)
396 395 printlazyancestors(s, [11, 13, 7, 9, 8, 3, 6, 4, 1, -1, 0])
397 396
398 397 # Test with stoprev >= min(initrevs)
399 398 s = genlazyancestors([11, 13], stoprev=11, inclusive=True)
400 399 printlazyancestors(s, [11, 13, 7, 9, 8, 3, 6, 4, 1, -1, 0])
401 400 s = genlazyancestors([11, 13], stoprev=12, inclusive=True)
402 401 printlazyancestors(s, [11, 13, 7, 9, 8, 3, 6, 4, 1, -1, 0])
403 402
404 403 # Contiguous chains: 5->4, 2->1 (where 1 is in seen set), 1->0
405 404 s = genlazyancestors([10, 1], inclusive=True)
406 405 printlazyancestors(s, [2, 10, 4, 5, -1, 0, 1])
407 406
408 407
409 408 # The C gca algorithm requires a real repo. These are textual descriptions of
410 409 # DAGs that have been known to be problematic, and, optionally, known pairs
411 410 # of revisions and their expected ancestor list.
412 411 dagtests = [
413 412 (b'+2*2*2/*3/2', {}),
414 413 (b'+3*3/*2*2/*4*4/*4/2*4/2*2', {}),
415 414 (b'+2*2*/2*4*/4*/3*2/4', {(6, 7): [3, 5]}),
416 415 ]
417 416
418 417
419 418 def test_gca():
420 419 u = uimod.ui.load()
421 420 for i, (dag, tests) in enumerate(dagtests):
422 421 repo = hg.repository(u, b'gca%d' % i, create=1)
423 422 cl = repo.changelog
424 423 if not util.safehasattr(cl.index, 'ancestors'):
425 424 # C version not available
426 425 return
427 426
428 427 debugcommands.debugbuilddag(u, repo, dag)
429 428 # Compare the results of the Python and C versions. This does not
430 429 # include choosing a winner when more than one gca exists -- we make
431 430 # sure both return exactly the same set of gcas.
432 431 # Also compare against expected results, if available.
433 432 for a in cl:
434 433 for b in cl:
435 434 cgcas = sorted(cl.index.ancestors(a, b))
436 435 pygcas = sorted(ancestor.ancestors(cl.parentrevs, a, b))
437 436 expected = None
438 437 if (a, b) in tests:
439 438 expected = tests[(a, b)]
440 439 if cgcas != pygcas or (expected and cgcas != expected):
441 440 print(
442 441 "test_gca: for dag %s, gcas for %d, %d:" % (dag, a, b)
443 442 )
444 443 print(" C returned: %s" % cgcas)
445 444 print(" Python returned: %s" % pygcas)
446 445 if expected:
447 446 print(" expected: %s" % expected)
448 447
449 448
450 449 def main():
451 450 seed = None
452 451 opts, args = getopt.getopt(sys.argv[1:], 's:', ['seed='])
453 452 for o, a in opts:
454 453 if o in ('-s', '--seed'):
455 454 seed = long(a, base=0) # accepts base 10 or 16 strings
456 455
457 456 if seed is None:
458 457 try:
459 458 seed = long(binascii.hexlify(os.urandom(16)), 16)
460 459 except AttributeError:
461 460 seed = long(time.time() * 1000)
462 461
463 462 rng = random.Random(seed)
464 463 test_missingancestors_explicit()
465 464 test_missingancestors(seed, rng)
466 465 test_lazyancestors()
467 466 test_gca()
468 467
469 468
470 469 if __name__ == '__main__':
471 470 main()
@@ -1,131 +1,127
1 1 import glob
2 2 import os
3 3 import shutil
4 4 import stat
5 5 import tempfile
6 6 import unittest
7 7
8 8 from mercurial import (
9 pycompat,
10 9 util,
11 10 )
12 11
13 12 atomictempfile = util.atomictempfile
14 13
15 if pycompat.ispy3:
16 xrange = range
17
18 14
19 15 class testatomictempfile(unittest.TestCase):
20 16 def setUp(self):
21 17 self._testdir = tempfile.mkdtemp(b'atomictempfiletest')
22 18 self._filename = os.path.join(self._testdir, b'testfilename')
23 19
24 20 def tearDown(self):
25 21 shutil.rmtree(self._testdir, True)
26 22
27 23 def testsimple(self):
28 24 file = atomictempfile(self._filename)
29 25 self.assertFalse(os.path.isfile(self._filename))
30 26 tempfilename = file._tempname
31 27 self.assertTrue(
32 28 tempfilename
33 29 in glob.glob(os.path.join(self._testdir, b'.testfilename-*'))
34 30 )
35 31
36 32 file.write(b'argh\n')
37 33 file.close()
38 34
39 35 self.assertTrue(os.path.isfile(self._filename))
40 36 self.assertTrue(
41 37 tempfilename
42 38 not in glob.glob(os.path.join(self._testdir, b'.testfilename-*'))
43 39 )
44 40
45 41 # discard() removes the temp file without making the write permanent
46 42 def testdiscard(self):
47 43 file = atomictempfile(self._filename)
48 44 (dir, basename) = os.path.split(file._tempname)
49 45
50 46 file.write(b'yo\n')
51 47 file.discard()
52 48
53 49 self.assertFalse(os.path.isfile(self._filename))
54 50 self.assertTrue(basename not in os.listdir(b'.'))
55 51
56 52 # if a programmer screws up and passes bad args to atomictempfile, they
57 53 # get a plain ordinary TypeError, not infinite recursion
58 54 def testoops(self):
59 55 with self.assertRaises(TypeError):
60 56 atomictempfile()
61 57
62 58 # checkambig=True avoids ambiguity of timestamp
63 59 def testcheckambig(self):
64 60 def atomicwrite(checkambig):
65 61 f = atomictempfile(self._filename, checkambig=checkambig)
66 62 f.write(b'FOO')
67 63 f.close()
68 64
69 65 # try some times, because reproduction of ambiguity depends on
70 66 # "filesystem time"
71 for i in xrange(5):
67 for i in range(5):
72 68 atomicwrite(False)
73 69 oldstat = os.stat(self._filename)
74 70 if oldstat[stat.ST_CTIME] != oldstat[stat.ST_MTIME]:
75 71 # subsequent changing never causes ambiguity
76 72 continue
77 73
78 74 repetition = 3
79 75
80 76 # repeat atomic write with checkambig=True, to examine
81 77 # whether st_mtime is advanced multiple times as expected
82 for j in xrange(repetition):
78 for j in range(repetition):
83 79 atomicwrite(True)
84 80 newstat = os.stat(self._filename)
85 81 if oldstat[stat.ST_CTIME] != newstat[stat.ST_CTIME]:
86 82 # timestamp ambiguity was naturally avoided while repetition
87 83 continue
88 84
89 85 # st_mtime should be advanced "repetition" times, because
90 86 # all atomicwrite() occurred at same time (in sec)
91 87 oldtime = (oldstat[stat.ST_MTIME] + repetition) & 0x7FFFFFFF
92 88 self.assertTrue(newstat[stat.ST_MTIME] == oldtime)
93 89 # no more examination is needed, if assumption above is true
94 90 break
95 91 else:
96 92 # This platform seems too slow to examine anti-ambiguity
97 93 # of file timestamp (or test happened to be executed at
98 94 # bad timing). Exit silently in this case, because running
99 95 # on other faster platforms can detect problems
100 96 pass
101 97
102 98 def testread(self):
103 99 with open(self._filename, 'wb') as f:
104 100 f.write(b'foobar\n')
105 101 file = atomictempfile(self._filename, mode=b'rb')
106 102 self.assertTrue(file.read(), b'foobar\n')
107 103 file.discard()
108 104
109 105 def testcontextmanagersuccess(self):
110 106 """When the context closes, the file is closed"""
111 107 with atomictempfile(b'foo') as f:
112 108 self.assertFalse(os.path.isfile(b'foo'))
113 109 f.write(b'argh\n')
114 110 self.assertTrue(os.path.isfile(b'foo'))
115 111
116 112 def testcontextmanagerfailure(self):
117 113 """On exception, the file is discarded"""
118 114 try:
119 115 with atomictempfile(b'foo') as f:
120 116 self.assertFalse(os.path.isfile(b'foo'))
121 117 f.write(b'argh\n')
122 118 raise ValueError
123 119 except ValueError:
124 120 pass
125 121 self.assertFalse(os.path.isfile(b'foo'))
126 122
127 123
128 124 if __name__ == '__main__':
129 125 import silenttestrunner
130 126
131 127 silenttestrunner.main(__name__)
@@ -1,224 +1,221
1 1 import os
2 2 import tempfile
3 3
4 4 from mercurial import (
5 5 pycompat,
6 6 util,
7 7 )
8 8
9 9 from hgext.fastannotate import error, revmap
10 10
11 if pycompat.ispy3:
12 xrange = range
13
14 11
15 12 def genhsh(i):
16 13 return pycompat.bytechr(i) + b'\0' * 19
17 14
18 15
19 16 def gettemppath():
20 17 fd, path = tempfile.mkstemp()
21 18 os.close(fd)
22 19 os.unlink(path)
23 20 return path
24 21
25 22
26 23 def ensure(condition):
27 24 if not condition:
28 25 raise RuntimeError('Unexpected')
29 26
30 27
31 28 def testbasicreadwrite():
32 29 path = gettemppath()
33 30
34 31 rm = revmap.revmap(path)
35 32 ensure(rm.maxrev == 0)
36 for i in xrange(5):
33 for i in range(5):
37 34 ensure(rm.rev2hsh(i) is None)
38 35 ensure(rm.hsh2rev(b'\0' * 20) is None)
39 36
40 37 paths = [
41 38 b'',
42 39 b'a',
43 40 None,
44 41 b'b',
45 42 b'b',
46 43 b'c',
47 44 b'c',
48 45 None,
49 46 b'a',
50 47 b'b',
51 48 b'a',
52 49 b'a',
53 50 ]
54 for i in xrange(1, 5):
51 for i in range(1, 5):
55 52 ensure(rm.append(genhsh(i), sidebranch=(i & 1), path=paths[i]) == i)
56 53
57 54 ensure(rm.maxrev == 4)
58 for i in xrange(1, 5):
55 for i in range(1, 5):
59 56 ensure(rm.hsh2rev(genhsh(i)) == i)
60 57 ensure(rm.rev2hsh(i) == genhsh(i))
61 58
62 59 # re-load and verify
63 60 rm.flush()
64 61 rm = revmap.revmap(path)
65 62 ensure(rm.maxrev == 4)
66 for i in xrange(1, 5):
63 for i in range(1, 5):
67 64 ensure(rm.hsh2rev(genhsh(i)) == i)
68 65 ensure(rm.rev2hsh(i) == genhsh(i))
69 66 ensure(bool(rm.rev2flag(i) & revmap.sidebranchflag) == bool(i & 1))
70 67
71 68 # append without calling save() explicitly
72 for i in xrange(5, 12):
69 for i in range(5, 12):
73 70 ensure(
74 71 rm.append(genhsh(i), sidebranch=(i & 1), path=paths[i], flush=True)
75 72 == i
76 73 )
77 74
78 75 # re-load and verify
79 76 rm = revmap.revmap(path)
80 77 ensure(rm.maxrev == 11)
81 for i in xrange(1, 12):
78 for i in range(1, 12):
82 79 ensure(rm.hsh2rev(genhsh(i)) == i)
83 80 ensure(rm.rev2hsh(i) == genhsh(i))
84 81 ensure(rm.rev2path(i) == paths[i] or paths[i - 1])
85 82 ensure(bool(rm.rev2flag(i) & revmap.sidebranchflag) == bool(i & 1))
86 83
87 84 os.unlink(path)
88 85
89 86 # missing keys
90 87 ensure(rm.rev2hsh(12) is None)
91 88 ensure(rm.rev2hsh(0) is None)
92 89 ensure(rm.rev2hsh(-1) is None)
93 90 ensure(rm.rev2flag(12) is None)
94 91 ensure(rm.rev2path(12) is None)
95 92 ensure(rm.hsh2rev(b'\1' * 20) is None)
96 93
97 94 # illformed hash (not 20 bytes)
98 95 try:
99 96 rm.append(b'\0')
100 97 ensure(False)
101 98 except Exception:
102 99 pass
103 100
104 101
105 102 def testcorruptformat():
106 103 path = gettemppath()
107 104
108 105 # incorrect header
109 106 with open(path, 'wb') as f:
110 107 f.write(b'NOT A VALID HEADER')
111 108 try:
112 109 revmap.revmap(path)
113 110 ensure(False)
114 111 except error.CorruptedFileError:
115 112 pass
116 113
117 114 # rewrite the file
118 115 os.unlink(path)
119 116 rm = revmap.revmap(path)
120 117 rm.append(genhsh(0), flush=True)
121 118
122 119 rm = revmap.revmap(path)
123 120 ensure(rm.maxrev == 1)
124 121
125 122 # corrupt the file by appending a byte
126 123 size = os.stat(path).st_size
127 124 with open(path, 'ab') as f:
128 125 f.write(b'\xff')
129 126 try:
130 127 revmap.revmap(path)
131 128 ensure(False)
132 129 except error.CorruptedFileError:
133 130 pass
134 131
135 132 # corrupt the file by removing the last byte
136 133 ensure(size > 0)
137 134 with open(path, 'wb') as f:
138 135 f.truncate(size - 1)
139 136 try:
140 137 revmap.revmap(path)
141 138 ensure(False)
142 139 except error.CorruptedFileError:
143 140 pass
144 141
145 142 os.unlink(path)
146 143
147 144
148 145 def testcopyfrom():
149 146 path = gettemppath()
150 147 rm = revmap.revmap(path)
151 for i in xrange(1, 10):
148 for i in range(1, 10):
152 149 ensure(
153 150 rm.append(genhsh(i), sidebranch=(i & 1), path=(b'%d' % (i // 3)))
154 151 == i
155 152 )
156 153 rm.flush()
157 154
158 155 # copy rm to rm2
159 156 rm2 = revmap.revmap()
160 157 rm2.copyfrom(rm)
161 158 path2 = gettemppath()
162 159 rm2.path = path2
163 160 rm2.flush()
164 161
165 162 # two files should be the same
166 163 ensure(len({util.readfile(p) for p in [path, path2]}) == 1)
167 164
168 165 os.unlink(path)
169 166 os.unlink(path2)
170 167
171 168
172 169 class fakefctx:
173 170 def __init__(self, node, path=None):
174 171 self._node = node
175 172 self._path = path
176 173
177 174 def node(self):
178 175 return self._node
179 176
180 177 def path(self):
181 178 return self._path
182 179
183 180
184 181 def testcontains():
185 182 path = gettemppath()
186 183
187 184 rm = revmap.revmap(path)
188 for i in xrange(1, 5):
185 for i in range(1, 5):
189 186 ensure(rm.append(genhsh(i), sidebranch=(i & 1)) == i)
190 187
191 for i in xrange(1, 5):
188 for i in range(1, 5):
192 189 ensure(((genhsh(i), None) in rm) == ((i & 1) == 0))
193 190 ensure((fakefctx(genhsh(i)) in rm) == ((i & 1) == 0))
194 for i in xrange(5, 10):
191 for i in range(5, 10):
195 192 ensure(fakefctx(genhsh(i)) not in rm)
196 193 ensure((genhsh(i), None) not in rm)
197 194
198 195 # "contains" checks paths
199 196 rm = revmap.revmap()
200 for i in xrange(1, 5):
197 for i in range(1, 5):
201 198 ensure(rm.append(genhsh(i), path=(b'%d' % (i // 2))) == i)
202 for i in xrange(1, 5):
199 for i in range(1, 5):
203 200 ensure(fakefctx(genhsh(i), path=(b'%d' % (i // 2))) in rm)
204 201 ensure(fakefctx(genhsh(i), path=b'a') not in rm)
205 202
206 203
207 204 def testlastnode():
208 205 path = gettemppath()
209 206 ensure(revmap.getlastnode(path) is None)
210 207 rm = revmap.revmap(path)
211 208 ensure(revmap.getlastnode(path) is None)
212 for i in xrange(1, 10):
209 for i in range(1, 10):
213 210 hsh = genhsh(i)
214 211 rm.append(hsh, path=(b'%d' % (i // 2)), flush=True)
215 212 ensure(revmap.getlastnode(path) == hsh)
216 213 rm2 = revmap.revmap(path)
217 214 ensure(rm2.rev2hsh(rm2.maxrev) == hsh)
218 215
219 216
220 217 testbasicreadwrite()
221 218 testcorruptformat()
222 219 testcopyfrom()
223 220 testcontains()
224 221 testlastnode()
@@ -1,285 +1,282
1 1 import os
2 2 import stat
3 3 import subprocess
4 4 import sys
5 5
6 6 if subprocess.call(
7 7 [sys.executable, '%s/hghave' % os.environ['TESTDIR'], 'cacheable']
8 8 ):
9 9 sys.exit(80)
10 10
11 11 print_ = print
12 12
13 13
14 14 def print(*args, **kwargs):
15 15 """print() wrapper that flushes stdout buffers to avoid py3 buffer issues
16 16
17 17 We could also just write directly to sys.stdout.buffer the way the
18 18 ui object will, but this was easier for porting the test.
19 19 """
20 20 print_(*args, **kwargs)
21 21 sys.stdout.flush()
22 22
23 23
24 24 from mercurial import (
25 25 extensions,
26 26 hg,
27 27 localrepo,
28 28 pycompat,
29 29 ui as uimod,
30 30 util,
31 31 vfs as vfsmod,
32 32 )
33 33
34 if pycompat.ispy3:
35 xrange = range
36
37 34
38 35 class fakerepo:
39 36 def __init__(self):
40 37 self._filecache = {}
41 38
42 39 class fakevfs:
43 40 def join(self, p):
44 41 return p
45 42
46 43 vfs = fakevfs()
47 44
48 45 def unfiltered(self):
49 46 return self
50 47
51 48 def sjoin(self, p):
52 49 return p
53 50
54 51 @localrepo.repofilecache('x', 'y')
55 52 def cached(self):
56 53 print('creating')
57 54 return 'string from function'
58 55
59 56 def invalidate(self):
60 57 for k in self._filecache:
61 58 try:
62 59 delattr(self, pycompat.sysstr(k))
63 60 except AttributeError:
64 61 pass
65 62
66 63
67 64 def basic(repo):
68 65 print("* neither file exists")
69 66 # calls function
70 67 repo.cached
71 68
72 69 repo.invalidate()
73 70 print("* neither file still exists")
74 71 # uses cache
75 72 repo.cached
76 73
77 74 # create empty file
78 75 f = open('x', 'w')
79 76 f.close()
80 77 repo.invalidate()
81 78 print("* empty file x created")
82 79 # should recreate the object
83 80 repo.cached
84 81
85 82 f = open('x', 'w')
86 83 f.write('a')
87 84 f.close()
88 85 repo.invalidate()
89 86 print("* file x changed size")
90 87 # should recreate the object
91 88 repo.cached
92 89
93 90 repo.invalidate()
94 91 print("* nothing changed with either file")
95 92 # stats file again, reuses object
96 93 repo.cached
97 94
98 95 # atomic replace file, size doesn't change
99 96 # hopefully st_mtime doesn't change as well so this doesn't use the cache
100 97 # because of inode change
101 98 f = vfsmod.vfs(b'.')(b'x', b'w', atomictemp=True)
102 99 f.write(b'b')
103 100 f.close()
104 101
105 102 repo.invalidate()
106 103 print("* file x changed inode")
107 104 repo.cached
108 105
109 106 # create empty file y
110 107 f = open('y', 'w')
111 108 f.close()
112 109 repo.invalidate()
113 110 print("* empty file y created")
114 111 # should recreate the object
115 112 repo.cached
116 113
117 114 f = open('y', 'w')
118 115 f.write('A')
119 116 f.close()
120 117 repo.invalidate()
121 118 print("* file y changed size")
122 119 # should recreate the object
123 120 repo.cached
124 121
125 122 f = vfsmod.vfs(b'.')(b'y', b'w', atomictemp=True)
126 123 f.write(b'B')
127 124 f.close()
128 125
129 126 repo.invalidate()
130 127 print("* file y changed inode")
131 128 repo.cached
132 129
133 130 f = vfsmod.vfs(b'.')(b'x', b'w', atomictemp=True)
134 131 f.write(b'c')
135 132 f.close()
136 133 f = vfsmod.vfs(b'.')(b'y', b'w', atomictemp=True)
137 134 f.write(b'C')
138 135 f.close()
139 136
140 137 repo.invalidate()
141 138 print("* both files changed inode")
142 139 repo.cached
143 140
144 141
145 142 def fakeuncacheable():
146 143 def wrapcacheable(orig, *args, **kwargs):
147 144 return False
148 145
149 146 def wrapinit(orig, *args, **kwargs):
150 147 pass
151 148
152 149 originit = extensions.wrapfunction(util.cachestat, '__init__', wrapinit)
153 150 origcacheable = extensions.wrapfunction(
154 151 util.cachestat, 'cacheable', wrapcacheable
155 152 )
156 153
157 154 for fn in ['x', 'y']:
158 155 try:
159 156 os.remove(fn)
160 157 except OSError:
161 158 pass
162 159
163 160 basic(fakerepo())
164 161
165 162 util.cachestat.cacheable = origcacheable
166 163 util.cachestat.__init__ = originit
167 164
168 165
169 166 def test_filecache_synced():
170 167 # test old behavior that caused filecached properties to go out of sync
171 168 os.system('hg init && echo a >> a && hg ci -qAm.')
172 169 repo = hg.repository(uimod.ui.load())
173 170 # first rollback clears the filecache, but changelog to stays in __dict__
174 171 repo.rollback()
175 172 repo.commit(b'.')
176 173 # second rollback comes along and touches the changelog externally
177 174 # (file is moved)
178 175 repo.rollback()
179 176 # but since changelog isn't under the filecache control anymore, we don't
180 177 # see that it changed, and return the old changelog without reconstructing
181 178 # it
182 179 repo.commit(b'.')
183 180
184 181
185 182 def setbeforeget(repo):
186 183 os.remove('x')
187 184 os.remove('y')
188 185 repo.__class__.cached.set(repo, 'string set externally')
189 186 repo.invalidate()
190 187 print("* neither file exists")
191 188 print(repo.cached)
192 189 repo.invalidate()
193 190 f = open('x', 'w')
194 191 f.write('a')
195 192 f.close()
196 193 print("* file x created")
197 194 print(repo.cached)
198 195
199 196 repo.__class__.cached.set(repo, 'string 2 set externally')
200 197 repo.invalidate()
201 198 print("* string set externally again")
202 199 print(repo.cached)
203 200
204 201 repo.invalidate()
205 202 f = open('y', 'w')
206 203 f.write('b')
207 204 f.close()
208 205 print("* file y created")
209 206 print(repo.cached)
210 207
211 208
212 209 def antiambiguity():
213 210 filename = 'ambigcheck'
214 211
215 212 # try some times, because reproduction of ambiguity depends on
216 213 # "filesystem time"
217 for i in xrange(5):
214 for i in range(5):
218 215 fp = open(filename, 'w')
219 216 fp.write('FOO')
220 217 fp.close()
221 218
222 219 oldstat = os.stat(filename)
223 220 if oldstat[stat.ST_CTIME] != oldstat[stat.ST_MTIME]:
224 221 # subsequent changing never causes ambiguity
225 222 continue
226 223
227 224 repetition = 3
228 225
229 226 # repeat changing via checkambigatclosing, to examine whether
230 227 # st_mtime is advanced multiple times as expected
231 for i in xrange(repetition):
228 for i in range(repetition):
232 229 # explicit closing
233 230 fp = vfsmod.checkambigatclosing(open(filename, 'a'))
234 231 fp.write('FOO')
235 232 fp.close()
236 233
237 234 # implicit closing by "with" statement
238 235 with vfsmod.checkambigatclosing(open(filename, 'a')) as fp:
239 236 fp.write('BAR')
240 237
241 238 newstat = os.stat(filename)
242 239 if oldstat[stat.ST_CTIME] != newstat[stat.ST_CTIME]:
243 240 # timestamp ambiguity was naturally avoided while repetition
244 241 continue
245 242
246 243 # st_mtime should be advanced "repetition * 2" times, because
247 244 # all changes occurred at same time (in sec)
248 245 expected = (oldstat[stat.ST_MTIME] + repetition * 2) & 0x7FFFFFFF
249 246 if newstat[stat.ST_MTIME] != expected:
250 247 print(
251 248 "'newstat[stat.ST_MTIME] %s is not %s (as %s + %s * 2)"
252 249 % (
253 250 newstat[stat.ST_MTIME],
254 251 expected,
255 252 oldstat[stat.ST_MTIME],
256 253 repetition,
257 254 )
258 255 )
259 256
260 257 # no more examination is needed regardless of result
261 258 break
262 259 else:
263 260 # This platform seems too slow to examine anti-ambiguity
264 261 # of file timestamp (or test happened to be executed at
265 262 # bad timing). Exit silently in this case, because running
266 263 # on other faster platforms can detect problems
267 264 pass
268 265
269 266
270 267 print('basic:')
271 268 print()
272 269 basic(fakerepo())
273 270 print()
274 271 print('fakeuncacheable:')
275 272 print()
276 273 fakeuncacheable()
277 274 test_filecache_synced()
278 275 print()
279 276 print('setbeforeget:')
280 277 print()
281 278 setbeforeget(fakerepo())
282 279 print()
283 280 print('antiambiguity:')
284 281 print()
285 282 antiambiguity()
@@ -1,462 +1,460
1 1 import binascii
2 2 import itertools
3 3 import silenttestrunner
4 4 import unittest
5 5 import zlib
6 6
7 7 from mercurial.node import sha1nodeconstants
8 8
9 9 from mercurial import (
10 10 manifest as manifestmod,
11 11 match as matchmod,
12 12 util,
13 13 )
14 14
15 15 EMTPY_MANIFEST = b''
16 16
17 17 HASH_1 = b'1' * 40
18 18 BIN_HASH_1 = binascii.unhexlify(HASH_1)
19 19 HASH_2 = b'f' * 40
20 20 BIN_HASH_2 = binascii.unhexlify(HASH_2)
21 21 HASH_3 = b'1234567890abcdef0987654321deadbeef0fcafe'
22 22 BIN_HASH_3 = binascii.unhexlify(HASH_3)
23 23 A_SHORT_MANIFEST = (
24 24 b'bar/baz/qux.py\0%(hash2)s%(flag2)s\n' b'foo\0%(hash1)s%(flag1)s\n'
25 25 ) % {
26 26 b'hash1': HASH_1,
27 27 b'flag1': b'',
28 28 b'hash2': HASH_2,
29 29 b'flag2': b'l',
30 30 }
31 31
32 32 A_DEEPER_MANIFEST = (
33 33 b'a/b/c/bar.py\0%(hash3)s%(flag1)s\n'
34 34 b'a/b/c/bar.txt\0%(hash1)s%(flag1)s\n'
35 35 b'a/b/c/foo.py\0%(hash3)s%(flag1)s\n'
36 36 b'a/b/c/foo.txt\0%(hash2)s%(flag2)s\n'
37 37 b'a/b/d/baz.py\0%(hash3)s%(flag1)s\n'
38 38 b'a/b/d/qux.py\0%(hash1)s%(flag2)s\n'
39 39 b'a/b/d/ten.txt\0%(hash3)s%(flag2)s\n'
40 40 b'a/b/dog.py\0%(hash3)s%(flag1)s\n'
41 41 b'a/b/fish.py\0%(hash2)s%(flag1)s\n'
42 42 b'a/c/london.py\0%(hash3)s%(flag2)s\n'
43 43 b'a/c/paper.txt\0%(hash2)s%(flag2)s\n'
44 44 b'a/c/paris.py\0%(hash2)s%(flag1)s\n'
45 45 b'a/d/apple.py\0%(hash3)s%(flag1)s\n'
46 46 b'a/d/pizza.py\0%(hash3)s%(flag2)s\n'
47 47 b'a/green.py\0%(hash1)s%(flag2)s\n'
48 48 b'a/purple.py\0%(hash2)s%(flag1)s\n'
49 49 b'app.py\0%(hash3)s%(flag1)s\n'
50 50 b'readme.txt\0%(hash2)s%(flag1)s\n'
51 51 ) % {
52 52 b'hash1': HASH_1,
53 53 b'flag1': b'',
54 54 b'hash2': HASH_2,
55 55 b'flag2': b'l',
56 56 b'hash3': HASH_3,
57 57 }
58 58
59 59 HUGE_MANIFEST_ENTRIES = 200001
60 60
61 61 izip = getattr(itertools, 'izip', zip)
62 if 'xrange' not in globals():
63 xrange = range
64 62
65 63 A_HUGE_MANIFEST = b''.join(
66 64 sorted(
67 65 b'file%d\0%s%s\n' % (i, h, f)
68 66 for i, h, f in izip(
69 xrange(200001),
67 range(200001),
70 68 itertools.cycle((HASH_1, HASH_2)),
71 69 itertools.cycle((b'', b'x', b'l')),
72 70 )
73 71 )
74 72 )
75 73
76 74
77 75 class basemanifesttests:
78 76 def parsemanifest(self, text):
79 77 raise NotImplementedError('parsemanifest not implemented by test case')
80 78
81 79 def testEmptyManifest(self):
82 80 m = self.parsemanifest(20, EMTPY_MANIFEST)
83 81 self.assertEqual(0, len(m))
84 82 self.assertEqual([], list(m))
85 83
86 84 def testManifest(self):
87 85 m = self.parsemanifest(20, A_SHORT_MANIFEST)
88 86 self.assertEqual([b'bar/baz/qux.py', b'foo'], list(m))
89 87 self.assertEqual(BIN_HASH_2, m[b'bar/baz/qux.py'])
90 88 self.assertEqual(b'l', m.flags(b'bar/baz/qux.py'))
91 89 self.assertEqual(BIN_HASH_1, m[b'foo'])
92 90 self.assertEqual(b'', m.flags(b'foo'))
93 91 with self.assertRaises(KeyError):
94 92 m[b'wat']
95 93
96 94 def testSetItem(self):
97 95 want = BIN_HASH_1
98 96
99 97 m = self.parsemanifest(20, EMTPY_MANIFEST)
100 98 m[b'a'] = want
101 99 self.assertIn(b'a', m)
102 100 self.assertEqual(want, m[b'a'])
103 101 self.assertEqual(b'a\0' + HASH_1 + b'\n', m.text())
104 102
105 103 m = self.parsemanifest(20, A_SHORT_MANIFEST)
106 104 m[b'a'] = want
107 105 self.assertEqual(want, m[b'a'])
108 106 self.assertEqual(b'a\0' + HASH_1 + b'\n' + A_SHORT_MANIFEST, m.text())
109 107
110 108 def testSetFlag(self):
111 109 want = b'x'
112 110
113 111 m = self.parsemanifest(20, EMTPY_MANIFEST)
114 112 # first add a file; a file-less flag makes no sense
115 113 m[b'a'] = BIN_HASH_1
116 114 m.setflag(b'a', want)
117 115 self.assertEqual(want, m.flags(b'a'))
118 116 self.assertEqual(b'a\0' + HASH_1 + want + b'\n', m.text())
119 117
120 118 m = self.parsemanifest(20, A_SHORT_MANIFEST)
121 119 # first add a file; a file-less flag makes no sense
122 120 m[b'a'] = BIN_HASH_1
123 121 m.setflag(b'a', want)
124 122 self.assertEqual(want, m.flags(b'a'))
125 123 self.assertEqual(
126 124 b'a\0' + HASH_1 + want + b'\n' + A_SHORT_MANIFEST, m.text()
127 125 )
128 126
129 127 def testCopy(self):
130 128 m = self.parsemanifest(20, A_SHORT_MANIFEST)
131 129 m[b'a'] = BIN_HASH_1
132 130 m2 = m.copy()
133 131 del m
134 132 del m2 # make sure we don't double free() anything
135 133
136 134 def testCompaction(self):
137 135 unhex = binascii.unhexlify
138 136 h1, h2 = unhex(HASH_1), unhex(HASH_2)
139 137 m = self.parsemanifest(20, A_SHORT_MANIFEST)
140 138 m[b'alpha'] = h1
141 139 m[b'beta'] = h2
142 140 del m[b'foo']
143 141 want = b'alpha\0%s\nbar/baz/qux.py\0%sl\nbeta\0%s\n' % (
144 142 HASH_1,
145 143 HASH_2,
146 144 HASH_2,
147 145 )
148 146 self.assertEqual(want, m.text())
149 147 self.assertEqual(3, len(m))
150 148 self.assertEqual([b'alpha', b'bar/baz/qux.py', b'beta'], list(m))
151 149 self.assertEqual(h1, m[b'alpha'])
152 150 self.assertEqual(h2, m[b'bar/baz/qux.py'])
153 151 self.assertEqual(h2, m[b'beta'])
154 152 self.assertEqual(b'', m.flags(b'alpha'))
155 153 self.assertEqual(b'l', m.flags(b'bar/baz/qux.py'))
156 154 self.assertEqual(b'', m.flags(b'beta'))
157 155 with self.assertRaises(KeyError):
158 156 m[b'foo']
159 157
160 158 def testMatchException(self):
161 159 m = self.parsemanifest(20, A_SHORT_MANIFEST)
162 160 match = matchmod.match(util.localpath(b'/repo'), b'', [b're:.*'])
163 161
164 162 def filt(path):
165 163 if path == b'foo':
166 164 assert False
167 165 return True
168 166
169 167 match.matchfn = filt
170 168 with self.assertRaises(AssertionError):
171 169 m._matches(match)
172 170
173 171 def testRemoveItem(self):
174 172 m = self.parsemanifest(20, A_SHORT_MANIFEST)
175 173 del m[b'foo']
176 174 with self.assertRaises(KeyError):
177 175 m[b'foo']
178 176 self.assertEqual(1, len(m))
179 177 self.assertEqual(1, len(list(m)))
180 178 # now restore and make sure everything works right
181 179 m[b'foo'] = b'a' * 20
182 180 self.assertEqual(2, len(m))
183 181 self.assertEqual(2, len(list(m)))
184 182
185 183 def testManifestDiff(self):
186 184 MISSING = (None, b'')
187 185 addl = b'z-only-in-left\0' + HASH_1 + b'\n'
188 186 addr = b'z-only-in-right\0' + HASH_2 + b'x\n'
189 187 left = self.parsemanifest(
190 188 20, A_SHORT_MANIFEST.replace(HASH_1, HASH_3 + b'x') + addl
191 189 )
192 190 right = self.parsemanifest(20, A_SHORT_MANIFEST + addr)
193 191 want = {
194 192 b'foo': ((BIN_HASH_3, b'x'), (BIN_HASH_1, b'')),
195 193 b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
196 194 b'z-only-in-right': (MISSING, (BIN_HASH_2, b'x')),
197 195 }
198 196 self.assertEqual(want, left.diff(right))
199 197
200 198 want = {
201 199 b'bar/baz/qux.py': (MISSING, (BIN_HASH_2, b'l')),
202 200 b'foo': (MISSING, (BIN_HASH_3, b'x')),
203 201 b'z-only-in-left': (MISSING, (BIN_HASH_1, b'')),
204 202 }
205 203 self.assertEqual(
206 204 want, self.parsemanifest(20, EMTPY_MANIFEST).diff(left)
207 205 )
208 206
209 207 want = {
210 208 b'bar/baz/qux.py': ((BIN_HASH_2, b'l'), MISSING),
211 209 b'foo': ((BIN_HASH_3, b'x'), MISSING),
212 210 b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
213 211 }
214 212 self.assertEqual(
215 213 want, left.diff(self.parsemanifest(20, EMTPY_MANIFEST))
216 214 )
217 215 copy = right.copy()
218 216 del copy[b'z-only-in-right']
219 217 del right[b'foo']
220 218 want = {
221 219 b'foo': (MISSING, (BIN_HASH_1, b'')),
222 220 b'z-only-in-right': ((BIN_HASH_2, b'x'), MISSING),
223 221 }
224 222 self.assertEqual(want, right.diff(copy))
225 223
226 224 short = self.parsemanifest(20, A_SHORT_MANIFEST)
227 225 pruned = short.copy()
228 226 del pruned[b'foo']
229 227 want = {
230 228 b'foo': ((BIN_HASH_1, b''), MISSING),
231 229 }
232 230 self.assertEqual(want, short.diff(pruned))
233 231 want = {
234 232 b'foo': (MISSING, (BIN_HASH_1, b'')),
235 233 }
236 234 self.assertEqual(want, pruned.diff(short))
237 235 want = {
238 236 b'bar/baz/qux.py': None,
239 237 b'foo': (MISSING, (BIN_HASH_1, b'')),
240 238 }
241 239 self.assertEqual(want, pruned.diff(short, clean=True))
242 240
243 241 def testReversedLines(self):
244 242 backwards = b''.join(
245 243 l + b'\n' for l in reversed(A_SHORT_MANIFEST.split(b'\n')) if l
246 244 )
247 245 try:
248 246 self.parsemanifest(20, backwards)
249 247 self.fail('Should have raised ValueError')
250 248 except ValueError as v:
251 249 self.assertIn('Manifest lines not in sorted order.', str(v))
252 250
253 251 def testNoTerminalNewline(self):
254 252 try:
255 253 self.parsemanifest(20, A_SHORT_MANIFEST + b'wat')
256 254 self.fail('Should have raised ValueError')
257 255 except ValueError as v:
258 256 self.assertIn('Manifest did not end in a newline.', str(v))
259 257
260 258 def testNoNewLineAtAll(self):
261 259 try:
262 260 self.parsemanifest(20, b'wat')
263 261 self.fail('Should have raised ValueError')
264 262 except ValueError as v:
265 263 self.assertIn('Manifest did not end in a newline.', str(v))
266 264
267 265 def testHugeManifest(self):
268 266 m = self.parsemanifest(20, A_HUGE_MANIFEST)
269 267 self.assertEqual(HUGE_MANIFEST_ENTRIES, len(m))
270 268 self.assertEqual(len(m), len(list(m)))
271 269
272 270 def testMatchesMetadata(self):
273 271 """Tests matches() for a few specific files to make sure that both
274 272 the set of files as well as their flags and nodeids are correct in
275 273 the resulting manifest."""
276 274 m = self.parsemanifest(20, A_HUGE_MANIFEST)
277 275
278 276 match = matchmod.exact([b'file1', b'file200', b'file300'])
279 277 m2 = m._matches(match)
280 278
281 279 w = (b'file1\0%sx\n' b'file200\0%sl\n' b'file300\0%s\n') % (
282 280 HASH_2,
283 281 HASH_1,
284 282 HASH_1,
285 283 )
286 284 self.assertEqual(w, m2.text())
287 285
288 286 def testMatchesNonexistentFile(self):
289 287 """Tests matches() for a small set of specific files, including one
290 288 nonexistent file to make sure in only matches against existing files.
291 289 """
292 290 m = self.parsemanifest(20, A_DEEPER_MANIFEST)
293 291
294 292 match = matchmod.exact(
295 293 [b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt', b'nonexistent']
296 294 )
297 295 m2 = m._matches(match)
298 296
299 297 self.assertEqual(
300 298 [b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt'], m2.keys()
301 299 )
302 300
303 301 def testMatchesNonexistentDirectory(self):
304 302 """Tests matches() for a relpath match on a directory that doesn't
305 303 actually exist."""
306 304 m = self.parsemanifest(20, A_DEEPER_MANIFEST)
307 305
308 306 match = matchmod.match(
309 307 util.localpath(b'/repo'), b'', [b'a/f'], default=b'relpath'
310 308 )
311 309 m2 = m._matches(match)
312 310
313 311 self.assertEqual([], m2.keys())
314 312
315 313 def testMatchesExactLarge(self):
316 314 """Tests matches() for files matching a large list of exact files."""
317 315 m = self.parsemanifest(20, A_HUGE_MANIFEST)
318 316
319 317 flist = m.keys()[80:300]
320 318 match = matchmod.exact(flist)
321 319 m2 = m._matches(match)
322 320
323 321 self.assertEqual(flist, m2.keys())
324 322
325 323 def testMatchesFull(self):
326 324 '''Tests matches() for what should be a full match.'''
327 325 m = self.parsemanifest(20, A_DEEPER_MANIFEST)
328 326
329 327 match = matchmod.match(util.localpath(b'/repo'), b'', [b''])
330 328 m2 = m._matches(match)
331 329
332 330 self.assertEqual(m.keys(), m2.keys())
333 331
334 332 def testMatchesDirectory(self):
335 333 """Tests matches() on a relpath match on a directory, which should
336 334 match against all files within said directory."""
337 335 m = self.parsemanifest(20, A_DEEPER_MANIFEST)
338 336
339 337 match = matchmod.match(
340 338 util.localpath(b'/repo'), b'', [b'a/b'], default=b'relpath'
341 339 )
342 340 m2 = m._matches(match)
343 341
344 342 self.assertEqual(
345 343 [
346 344 b'a/b/c/bar.py',
347 345 b'a/b/c/bar.txt',
348 346 b'a/b/c/foo.py',
349 347 b'a/b/c/foo.txt',
350 348 b'a/b/d/baz.py',
351 349 b'a/b/d/qux.py',
352 350 b'a/b/d/ten.txt',
353 351 b'a/b/dog.py',
354 352 b'a/b/fish.py',
355 353 ],
356 354 m2.keys(),
357 355 )
358 356
359 357 def testMatchesExactPath(self):
360 358 """Tests matches() on an exact match on a directory, which should
361 359 result in an empty manifest because you can't perform an exact match
362 360 against a directory."""
363 361 m = self.parsemanifest(20, A_DEEPER_MANIFEST)
364 362
365 363 match = matchmod.exact([b'a/b'])
366 364 m2 = m._matches(match)
367 365
368 366 self.assertEqual([], m2.keys())
369 367
370 368 def testMatchesCwd(self):
371 369 """Tests matches() on a relpath match with the current directory ('.')
372 370 when not in the root directory."""
373 371 m = self.parsemanifest(20, A_DEEPER_MANIFEST)
374 372
375 373 match = matchmod.match(
376 374 util.localpath(b'/repo'), b'a/b', [b'.'], default=b'relpath'
377 375 )
378 376 m2 = m._matches(match)
379 377
380 378 self.assertEqual(
381 379 [
382 380 b'a/b/c/bar.py',
383 381 b'a/b/c/bar.txt',
384 382 b'a/b/c/foo.py',
385 383 b'a/b/c/foo.txt',
386 384 b'a/b/d/baz.py',
387 385 b'a/b/d/qux.py',
388 386 b'a/b/d/ten.txt',
389 387 b'a/b/dog.py',
390 388 b'a/b/fish.py',
391 389 ],
392 390 m2.keys(),
393 391 )
394 392
395 393 def testMatchesWithPattern(self):
396 394 """Tests matches() for files matching a pattern that reside
397 395 deeper than the specified directory."""
398 396 m = self.parsemanifest(20, A_DEEPER_MANIFEST)
399 397
400 398 match = matchmod.match(util.localpath(b'/repo'), b'', [b'a/b/*/*.txt'])
401 399 m2 = m._matches(match)
402 400
403 401 self.assertEqual(
404 402 [b'a/b/c/bar.txt', b'a/b/c/foo.txt', b'a/b/d/ten.txt'], m2.keys()
405 403 )
406 404
407 405
408 406 class testmanifestdict(unittest.TestCase, basemanifesttests):
409 407 def parsemanifest(self, nodelen, text):
410 408 return manifestmod.manifestdict(nodelen, text)
411 409
412 410 def testManifestLongHashes(self):
413 411 m = self.parsemanifest(32, b'a\0' + b'f' * 64 + b'\n')
414 412 self.assertEqual(binascii.unhexlify(b'f' * 64), m[b'a'])
415 413
416 414 def testObviouslyBogusManifest(self):
417 415 # This is a 163k manifest that came from oss-fuzz. It was a
418 416 # timeout there, but when run normally it doesn't seem to
419 417 # present any particular slowness.
420 418 data = zlib.decompress(
421 419 b'x\x9c\xed\xce;\n\x83\x00\x10\x04\xd0\x8deNa\x93~\xf1\x03\xc9q\xf4'
422 420 b'\x14\xeaU\xbdB\xda\xd4\xe6Cj\xc1FA\xde+\x86\xe9f\xa2\xfci\xbb\xfb'
423 421 b'\xa3\xef\xea\xba\xca\x7fk\x86q\x9a\xc6\xc8\xcc&\xb3\xcf\xf8\xb8|#'
424 422 b'\x8a9\x00\xd8\xe6v\xf4\x01N\xe1\n\x00\x00\x00\x00\x00\x00\x00\x00'
425 423 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
426 424 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
427 425 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
428 426 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
429 427 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
430 428 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
431 429 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
432 430 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
433 431 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
434 432 b'\x00\x00\xc0\x8aey\x1d}\x01\xd8\xe0\xb9\xf3\xde\x1b\xcf\x17'
435 433 b'\xac\xbe'
436 434 )
437 435 with self.assertRaises(ValueError):
438 436 self.parsemanifest(20, data)
439 437
440 438
441 439 class testtreemanifest(unittest.TestCase, basemanifesttests):
442 440 def parsemanifest(self, nodelen, text):
443 441 return manifestmod.treemanifest(sha1nodeconstants, b'', text)
444 442
445 443 def testWalkSubtrees(self):
446 444 m = self.parsemanifest(20, A_DEEPER_MANIFEST)
447 445
448 446 dirs = [s._dir for s in m.walksubtrees()]
449 447 self.assertEqual(
450 448 sorted(
451 449 [b'', b'a/', b'a/c/', b'a/d/', b'a/b/', b'a/b/c/', b'a/b/d/']
452 450 ),
453 451 sorted(dirs),
454 452 )
455 453
456 454 match = matchmod.match(util.localpath(b'/repo'), b'', [b'path:a/b/'])
457 455 dirs = [s._dir for s in m.walksubtrees(matcher=match)]
458 456 self.assertEqual(sorted([b'a/b/', b'a/b/c/', b'a/b/d/']), sorted(dirs))
459 457
460 458
461 459 if __name__ == '__main__':
462 460 silenttestrunner.main(__name__)
@@ -1,283 +1,278
1 1 # This is a randomized test that generates different pathnames every
2 2 # time it is invoked, and tests the encoding of those pathnames.
3 3 #
4 4 # It uses a simple probabilistic model to generate valid pathnames
5 5 # that have proven likely to expose bugs and divergent behavior in
6 6 # different encoding implementations.
7 7
8 8
9 9 import binascii
10 10 import collections
11 11 import itertools
12 12 import math
13 13 import os
14 14 import random
15 15 import sys
16 16 import time
17 17 from mercurial import (
18 18 pycompat,
19 19 store,
20 20 )
21 21
22 try:
23 xrange
24 except NameError:
25 xrange = range
26
27 22 validchars = set(map(pycompat.bytechr, range(0, 256)))
28 23 alphanum = range(ord('A'), ord('Z'))
29 24
30 25 for c in (b'\0', b'/'):
31 26 validchars.remove(c)
32 27
33 28 winreserved = (
34 29 b'aux con prn nul'.split()
35 + [b'com%d' % i for i in xrange(1, 10)]
36 + [b'lpt%d' % i for i in xrange(1, 10)]
30 + [b'com%d' % i for i in range(1, 10)]
31 + [b'lpt%d' % i for i in range(1, 10)]
37 32 )
38 33
39 34
40 35 def casecombinations(names):
41 36 '''Build all case-diddled combinations of names.'''
42 37
43 38 combos = set()
44 39
45 40 for r in names:
46 for i in xrange(len(r) + 1):
47 for c in itertools.combinations(xrange(len(r)), i):
41 for i in range(len(r) + 1):
42 for c in itertools.combinations(range(len(r)), i):
48 43 d = r
49 44 for j in c:
50 45 d = b''.join((d[:j], d[j : j + 1].upper(), d[j + 1 :]))
51 46 combos.add(d)
52 47 return sorted(combos)
53 48
54 49
55 50 def buildprobtable(fp, cmd='hg manifest tip'):
56 51 """Construct and print a table of probabilities for path name
57 52 components. The numbers are percentages."""
58 53
59 54 counts = collections.defaultdict(lambda: 0)
60 55 for line in os.popen(cmd).read().splitlines():
61 56 if line[-2:] in ('.i', '.d'):
62 57 line = line[:-2]
63 58 if line.startswith('data/'):
64 59 line = line[5:]
65 60 for c in line:
66 61 counts[c] += 1
67 62 for c in '\r/\n':
68 63 counts.pop(c, None)
69 64 t = sum(counts.values()) / 100.0
70 65 fp.write('probtable = (')
71 66 for i, (k, v) in enumerate(
72 67 sorted(counts.items(), key=lambda x: x[1], reverse=True)
73 68 ):
74 69 if (i % 5) == 0:
75 70 fp.write('\n ')
76 71 vt = v / t
77 72 if vt < 0.0005:
78 73 break
79 74 fp.write('(%r, %.03f), ' % (k, vt))
80 75 fp.write('\n )\n')
81 76
82 77
83 78 # A table of character frequencies (as percentages), gleaned by
84 79 # looking at filelog names from a real-world, very large repo.
85 80
86 81 probtable = (
87 82 (b't', 9.828),
88 83 (b'e', 9.042),
89 84 (b's', 8.011),
90 85 (b'a', 6.801),
91 86 (b'i', 6.618),
92 87 (b'g', 5.053),
93 88 (b'r', 5.030),
94 89 (b'o', 4.887),
95 90 (b'p', 4.363),
96 91 (b'n', 4.258),
97 92 (b'l', 3.830),
98 93 (b'h', 3.693),
99 94 (b'_', 3.659),
100 95 (b'.', 3.377),
101 96 (b'm', 3.194),
102 97 (b'u', 2.364),
103 98 (b'd', 2.296),
104 99 (b'c', 2.163),
105 100 (b'b', 1.739),
106 101 (b'f', 1.625),
107 102 (b'6', 0.666),
108 103 (b'j', 0.610),
109 104 (b'y', 0.554),
110 105 (b'x', 0.487),
111 106 (b'w', 0.477),
112 107 (b'k', 0.476),
113 108 (b'v', 0.473),
114 109 (b'3', 0.336),
115 110 (b'1', 0.335),
116 111 (b'2', 0.326),
117 112 (b'4', 0.310),
118 113 (b'5', 0.305),
119 114 (b'9', 0.302),
120 115 (b'8', 0.300),
121 116 (b'7', 0.299),
122 117 (b'q', 0.298),
123 118 (b'0', 0.250),
124 119 (b'z', 0.223),
125 120 (b'-', 0.118),
126 121 (b'C', 0.095),
127 122 (b'T', 0.087),
128 123 (b'F', 0.085),
129 124 (b'B', 0.077),
130 125 (b'S', 0.076),
131 126 (b'P', 0.076),
132 127 (b'L', 0.059),
133 128 (b'A', 0.058),
134 129 (b'N', 0.051),
135 130 (b'D', 0.049),
136 131 (b'M', 0.046),
137 132 (b'E', 0.039),
138 133 (b'I', 0.035),
139 134 (b'R', 0.035),
140 135 (b'G', 0.028),
141 136 (b'U', 0.026),
142 137 (b'W', 0.025),
143 138 (b'O', 0.017),
144 139 (b'V', 0.015),
145 140 (b'H', 0.013),
146 141 (b'Q', 0.011),
147 142 (b'J', 0.007),
148 143 (b'K', 0.005),
149 144 (b'+', 0.004),
150 145 (b'X', 0.003),
151 146 (b'Y', 0.001),
152 147 )
153 148
154 149 for c, _ in probtable:
155 150 validchars.remove(c)
156 151 validchars = list(validchars)
157 152
158 153
159 154 def pickfrom(rng, table):
160 155 c = 0
161 156 r = rng.random() * sum(i[1] for i in table)
162 157 for i, p in table:
163 158 c += p
164 159 if c >= r:
165 160 return i
166 161
167 162
168 163 reservedcombos = casecombinations(winreserved)
169 164
170 165 # The first component of a name following a slash.
171 166
172 167 firsttable = (
173 168 (lambda rng: pickfrom(rng, probtable), 90),
174 169 (lambda rng: rng.choice(validchars), 5),
175 170 (lambda rng: rng.choice(reservedcombos), 5),
176 171 )
177 172
178 173 # Components of a name following the first.
179 174
180 175 resttable = firsttable[:-1]
181 176
182 177 # Special suffixes.
183 178
184 179 internalsuffixcombos = casecombinations(b'.hg .i .d'.split())
185 180
186 181 # The last component of a path, before a slash or at the end of a name.
187 182
188 183 lasttable = resttable + (
189 184 (lambda rng: b'', 95),
190 185 (lambda rng: rng.choice(internalsuffixcombos), 5),
191 186 )
192 187
193 188
194 189 def makepart(rng, k):
195 190 '''Construct a part of a pathname, without slashes.'''
196 191
197 192 p = pickfrom(rng, firsttable)(rng)
198 193 l = len(p)
199 194 ps = [p]
200 195 maxl = rng.randint(1, k)
201 196 while l < maxl:
202 197 p = pickfrom(rng, resttable)(rng)
203 198 l += len(p)
204 199 ps.append(p)
205 200 ps.append(pickfrom(rng, lasttable)(rng))
206 201 return b''.join(ps)
207 202
208 203
209 204 def makepath(rng, j, k):
210 205 '''Construct a complete pathname.'''
211 206
212 207 return (
213 208 b'data/'
214 + b'/'.join(makepart(rng, k) for _ in xrange(j))
209 + b'/'.join(makepart(rng, k) for _ in range(j))
215 210 + rng.choice([b'.d', b'.i'])
216 211 )
217 212
218 213
219 214 def genpath(rng, count):
220 215 '''Generate random pathnames with gradually increasing lengths.'''
221 216
222 217 mink, maxk = 1, 4096
223 218
224 219 def steps():
225 for i in xrange(count):
220 for i in range(count):
226 221 yield mink + int(round(math.sqrt((maxk - mink) * float(i) / count)))
227 222
228 223 for k in steps():
229 224 x = rng.randint(1, k)
230 225 y = rng.randint(1, k)
231 226 yield makepath(rng, x, y)
232 227
233 228
234 229 def runtests(rng, seed, count):
235 230 nerrs = 0
236 231 for p in genpath(rng, count):
237 232 h = store._pathencode(p) # uses C implementation, if available
238 233 r = store._hybridencode(p, True) # reference implementation in Python
239 234 if h != r:
240 235 if nerrs == 0:
241 236 print('seed:', hex(seed)[:-1], file=sys.stderr)
242 237 print("\np: '%s'" % p.encode("string_escape"), file=sys.stderr)
243 238 print("h: '%s'" % h.encode("string_escape"), file=sys.stderr)
244 239 print("r: '%s'" % r.encode("string_escape"), file=sys.stderr)
245 240 nerrs += 1
246 241 return nerrs
247 242
248 243
249 244 def main():
250 245 import getopt
251 246
252 247 # Empirically observed to take about a second to run
253 248 count = 100
254 249 seed = None
255 250 opts, args = getopt.getopt(
256 251 sys.argv[1:], 'c:s:', ['build', 'count=', 'seed=']
257 252 )
258 253 for o, a in opts:
259 254 if o in ('-c', '--count'):
260 255 count = int(a)
261 256 elif o in ('-s', '--seed'):
262 257 seed = int(a, base=0) # accepts base 10 or 16 strings
263 258 elif o == '--build':
264 259 buildprobtable(
265 260 sys.stdout,
266 261 'find .hg/store/data -type f && '
267 262 'cat .hg/store/fncache 2>/dev/null',
268 263 )
269 264 sys.exit(0)
270 265
271 266 if seed is None:
272 267 try:
273 268 seed = int(binascii.hexlify(os.urandom(16)), 16)
274 269 except AttributeError:
275 270 seed = int(time.time() * 1000)
276 271
277 272 rng = random.Random(seed)
278 273 if runtests(rng, seed, count):
279 274 sys.exit(1)
280 275
281 276
282 277 if __name__ == '__main__':
283 278 main()
@@ -1,125 +1,123
1 1 #require no-windows
2 2
3 3 $ . "$TESTDIR/remotefilelog-library.sh"
4 4
5 5 $ hg init repo
6 6 $ cd repo
7 7 $ cat >> .hg/hgrc <<EOF
8 8 > [remotefilelog]
9 9 > server=True
10 10 > EOF
11 11 $ echo x > x
12 12 $ echo y > y
13 13 $ echo z > z
14 14 $ hg commit -qAm xy
15 15 $ cd ..
16 16
17 17 $ cat > cacheprocess-logger.py <<EOF
18 18 > import os
19 19 > import shutil
20 20 > import sys
21 > if sys.version_info[0] > 2:
22 > xrange = range
23 21 > f = open('$TESTTMP/cachelog.log', 'w')
24 22 > srccache = os.path.join('$TESTTMP', 'oldhgcache')
25 23 > def log(message):
26 24 > f.write(message)
27 25 > f.flush()
28 26 > destcache = sys.argv[-1]
29 27 > try:
30 28 > while True:
31 29 > cmd = sys.stdin.readline().strip()
32 30 > log('got command %r\n' % cmd)
33 31 > if cmd == 'exit':
34 32 > sys.exit(0)
35 33 > elif cmd == 'get':
36 34 > count = int(sys.stdin.readline())
37 35 > log('client wants %r blobs\n' % count)
38 36 > wants = []
39 > for _ in xrange(count):
37 > for _ in range(count):
40 38 > key = sys.stdin.readline()[:-1]
41 39 > wants.append(key)
42 40 > if '\0' in key:
43 41 > _, key = key.split('\0')
44 42 > srcpath = os.path.join(srccache, key)
45 43 > if os.path.exists(srcpath):
46 44 > dest = os.path.join(destcache, key)
47 45 > destdir = os.path.dirname(dest)
48 46 > if not os.path.exists(destdir):
49 47 > os.makedirs(destdir)
50 48 > shutil.copyfile(srcpath, dest)
51 49 > else:
52 50 > # report a cache miss
53 51 > sys.stdout.write(key + '\n')
54 52 > sys.stdout.write('0\n')
55 53 > for key in sorted(wants):
56 54 > log('requested %r\n' % key)
57 55 > sys.stdout.flush()
58 56 > elif cmd == 'set':
59 57 > raise Exception('todo writing')
60 58 > else:
61 59 > raise Exception('unknown command! %r' % cmd)
62 60 > except Exception as e:
63 61 > log('Exception! %s\n' % e)
64 62 > raise
65 63 > EOF
66 64
67 65 $ cat >> $HGRCPATH <<EOF
68 66 > [remotefilelog]
69 67 > cacheprocess = "$PYTHON" $TESTTMP/cacheprocess-logger.py
70 68 > EOF
71 69
72 70 Test cache keys and cache misses.
73 71 $ hgcloneshallow ssh://user@dummy/repo clone -q
74 72 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob)
75 73 $ cat cachelog.log
76 74 got command 'get'
77 75 client wants 3 blobs
78 76 requested 'master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0'
79 77 requested 'master/39/5df8f7c51f007019cb30201c49e884b46b92fa/69a1b67522704ec122181c0890bd16e9d3e7516a'
80 78 requested 'master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/076f5e2225b3ff0400b98c92aa6cdf403ee24cca'
81 79 got command 'set'
82 80 Exception! todo writing
83 81
84 82 Test cache hits.
85 83 $ mv hgcache oldhgcache
86 84 $ rm cachelog.log
87 85 $ hgcloneshallow ssh://user@dummy/repo clone-cachehit -q
88 86 3 files fetched over 1 fetches - (0 misses, 100.00% hit ratio) over *s (glob)
89 87 $ cat cachelog.log | grep -v exit
90 88 got command 'get'
91 89 client wants 3 blobs
92 90 requested 'master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0'
93 91 requested 'master/39/5df8f7c51f007019cb30201c49e884b46b92fa/69a1b67522704ec122181c0890bd16e9d3e7516a'
94 92 requested 'master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/076f5e2225b3ff0400b98c92aa6cdf403ee24cca'
95 93
96 94 $ cat >> $HGRCPATH <<EOF
97 95 > [remotefilelog]
98 96 > cacheprocess.includepath = yes
99 97 > EOF
100 98
101 99 Test cache keys and cache misses with includepath.
102 100 $ rm -r hgcache oldhgcache
103 101 $ rm cachelog.log
104 102 $ hgcloneshallow ssh://user@dummy/repo clone-withpath -q
105 103 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob)
106 104 $ cat cachelog.log
107 105 got command 'get'
108 106 client wants 3 blobs
109 107 requested 'x\x00master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0'
110 108 requested 'y\x00master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/076f5e2225b3ff0400b98c92aa6cdf403ee24cca'
111 109 requested 'z\x00master/39/5df8f7c51f007019cb30201c49e884b46b92fa/69a1b67522704ec122181c0890bd16e9d3e7516a'
112 110 got command 'set'
113 111 Exception! todo writing
114 112
115 113 Test cache hits with includepath.
116 114 $ mv hgcache oldhgcache
117 115 $ rm cachelog.log
118 116 $ hgcloneshallow ssh://user@dummy/repo clone-withpath-cachehit -q
119 117 3 files fetched over 1 fetches - (0 misses, 100.00% hit ratio) over *s (glob)
120 118 $ cat cachelog.log | grep -v exit
121 119 got command 'get'
122 120 client wants 3 blobs
123 121 requested 'x\x00master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0'
124 122 requested 'y\x00master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/076f5e2225b3ff0400b98c92aa6cdf403ee24cca'
125 123 requested 'z\x00master/39/5df8f7c51f007019cb30201c49e884b46b92fa/69a1b67522704ec122181c0890bd16e9d3e7516a'
@@ -1,67 +1,64
1 1 import os
2 2 from mercurial import (
3 3 pycompat,
4 4 ui as uimod,
5 5 )
6 6
7 if pycompat.ispy3:
8 xrange = range
9
10 7 hgrc = os.environ['HGRCPATH']
11 8 f = open(hgrc)
12 9 basehgrc = f.read()
13 10 f.close()
14 11
15 12 print(' hgrc settings command line options final result ')
16 13 print(' quiet verbo debug quiet verbo debug quiet verbo debug')
17 14
18 for i in xrange(64):
15 for i in range(64):
19 16 hgrc_quiet = bool(i & 1 << 0)
20 17 hgrc_verbose = bool(i & 1 << 1)
21 18 hgrc_debug = bool(i & 1 << 2)
22 19 cmd_quiet = bool(i & 1 << 3)
23 20 cmd_verbose = bool(i & 1 << 4)
24 21 cmd_debug = bool(i & 1 << 5)
25 22
26 23 f = open(hgrc, 'w')
27 24 f.write(basehgrc)
28 25 f.write('\n[ui]\n')
29 26 if hgrc_quiet:
30 27 f.write('quiet = True\n')
31 28 if hgrc_verbose:
32 29 f.write('verbose = True\n')
33 30 if hgrc_debug:
34 31 f.write('debug = True\n')
35 32 f.close()
36 33
37 34 u = uimod.ui.load()
38 35 if cmd_quiet or cmd_debug or cmd_verbose:
39 36 u.setconfig(b'ui', b'quiet', pycompat.bytestr(bool(cmd_quiet)))
40 37 u.setconfig(b'ui', b'verbose', pycompat.bytestr(bool(cmd_verbose)))
41 38 u.setconfig(b'ui', b'debug', pycompat.bytestr(bool(cmd_debug)))
42 39
43 40 check = ''
44 41 if u.debugflag:
45 42 if not u.verbose or u.quiet:
46 43 check = ' *'
47 44 elif u.verbose and u.quiet:
48 45 check = ' +'
49 46
50 47 print(
51 48 (
52 49 '%2d %5s %5s %5s %5s %5s %5s -> %5s %5s %5s%s'
53 50 % (
54 51 i,
55 52 hgrc_quiet,
56 53 hgrc_verbose,
57 54 hgrc_debug,
58 55 cmd_quiet,
59 56 cmd_verbose,
60 57 cmd_debug,
61 58 u.quiet,
62 59 u.verbose,
63 60 u.debugflag,
64 61 check,
65 62 )
66 63 )
67 64 )
General Comments 0
You need to be logged in to leave comments. Login now