##// END OF EJS Templates
testing: allow Hypothesis tests to disable extensions...
David R. MacIver -
r28279:c1fbc92d default
parent child Browse files
Show More
@@ -1,593 +1,597 b''
1 from __future__ import print_function, absolute_import
1 from __future__ import print_function, absolute_import
2
2
3 """Fuzz testing for operations against a Mercurial repository
3 """Fuzz testing for operations against a Mercurial repository
4
4
5 This uses Hypothesis's stateful testing to generate random repository
5 This uses Hypothesis's stateful testing to generate random repository
6 operations and test Mercurial using them, both to see if there are any
6 operations and test Mercurial using them, both to see if there are any
7 unexpected errors and to compare different versions of it."""
7 unexpected errors and to compare different versions of it."""
8
8
9 import os
9 import os
10 import sys
10 import sys
11
11
12 # These tests require Hypothesis and pytz to be installed.
12 # These tests require Hypothesis and pytz to be installed.
13 # Running 'pip install hypothesis pytz' will achieve that.
13 # Running 'pip install hypothesis pytz' will achieve that.
14 # Note: This won't work if you're running Python < 2.7.
14 # Note: This won't work if you're running Python < 2.7.
15 try:
15 try:
16 from hypothesis.extra.datetime import datetimes
16 from hypothesis.extra.datetime import datetimes
17 except ImportError:
17 except ImportError:
18 sys.stderr.write("skipped: hypothesis or pytz not installed" + os.linesep)
18 sys.stderr.write("skipped: hypothesis or pytz not installed" + os.linesep)
19 sys.exit(80)
19 sys.exit(80)
20
20
21 # If you are running an old version of pip you may find that the enum34
21 # If you are running an old version of pip you may find that the enum34
22 # backport is not installed automatically. If so 'pip install enum34' will
22 # backport is not installed automatically. If so 'pip install enum34' will
23 # fix this problem.
23 # fix this problem.
24 try:
24 try:
25 import enum
25 import enum
26 assert enum # Silence pyflakes
26 assert enum # Silence pyflakes
27 except ImportError:
27 except ImportError:
28 sys.stderr.write("skipped: enum34 not installed" + os.linesep)
28 sys.stderr.write("skipped: enum34 not installed" + os.linesep)
29 sys.exit(80)
29 sys.exit(80)
30
30
31 import binascii
31 import binascii
32 from contextlib import contextmanager
32 from contextlib import contextmanager
33 import errno
33 import errno
34 import pipes
34 import pipes
35 import shutil
35 import shutil
36 import silenttestrunner
36 import silenttestrunner
37 import subprocess
37 import subprocess
38
38
39 from hypothesis.errors import HypothesisException
39 from hypothesis.errors import HypothesisException
40 from hypothesis.stateful import (
40 from hypothesis.stateful import (
41 rule, RuleBasedStateMachine, Bundle, precondition)
41 rule, RuleBasedStateMachine, Bundle, precondition)
42 from hypothesis import settings, note, strategies as st
42 from hypothesis import settings, note, strategies as st
43 from hypothesis.configuration import set_hypothesis_home_dir
43 from hypothesis.configuration import set_hypothesis_home_dir
44 from hypothesis.database import ExampleDatabase
44 from hypothesis.database import ExampleDatabase
45
45
46 testdir = os.path.abspath(os.environ["TESTDIR"])
46 testdir = os.path.abspath(os.environ["TESTDIR"])
47
47
48 # We store Hypothesis examples here rather in the temporary test directory
48 # We store Hypothesis examples here rather in the temporary test directory
49 # so that when rerunning a failing test this always results in refinding the
49 # so that when rerunning a failing test this always results in refinding the
50 # previous failure. This directory is in .hgignore and should not be checked in
50 # previous failure. This directory is in .hgignore and should not be checked in
51 # but is useful to have for development.
51 # but is useful to have for development.
52 set_hypothesis_home_dir(os.path.join(testdir, ".hypothesis"))
52 set_hypothesis_home_dir(os.path.join(testdir, ".hypothesis"))
53
53
54 runtests = os.path.join(os.environ["RUNTESTDIR"], "run-tests.py")
54 runtests = os.path.join(os.environ["RUNTESTDIR"], "run-tests.py")
55 testtmp = os.environ["TESTTMP"]
55 testtmp = os.environ["TESTTMP"]
56 assert os.path.isdir(testtmp)
56 assert os.path.isdir(testtmp)
57
57
58 generatedtests = os.path.join(testdir, "hypothesis-generated")
58 generatedtests = os.path.join(testdir, "hypothesis-generated")
59
59
60 try:
60 try:
61 os.makedirs(generatedtests)
61 os.makedirs(generatedtests)
62 except OSError:
62 except OSError:
63 pass
63 pass
64
64
65 # We write out generated .t files to a file in order to ease debugging and to
65 # We write out generated .t files to a file in order to ease debugging and to
66 # give a starting point for turning failures Hypothesis finds into normal
66 # give a starting point for turning failures Hypothesis finds into normal
67 # tests. In order to ensure that multiple copies of this test can be run in
67 # tests. In order to ensure that multiple copies of this test can be run in
68 # parallel we use atomic file create to ensure that we always get a unique
68 # parallel we use atomic file create to ensure that we always get a unique
69 # name.
69 # name.
70 file_index = 0
70 file_index = 0
71 while True:
71 while True:
72 file_index += 1
72 file_index += 1
73 savefile = os.path.join(generatedtests, "test-generated-%d.t" % (
73 savefile = os.path.join(generatedtests, "test-generated-%d.t" % (
74 file_index,
74 file_index,
75 ))
75 ))
76 try:
76 try:
77 os.close(os.open(savefile, os.O_CREAT | os.O_EXCL | os.O_WRONLY))
77 os.close(os.open(savefile, os.O_CREAT | os.O_EXCL | os.O_WRONLY))
78 break
78 break
79 except OSError as e:
79 except OSError as e:
80 if e.errno != errno.EEXIST:
80 if e.errno != errno.EEXIST:
81 raise
81 raise
82 assert os.path.exists(savefile)
82 assert os.path.exists(savefile)
83
83
84 hgrc = os.path.join(".hg", "hgrc")
84 hgrc = os.path.join(".hg", "hgrc")
85
85
86 filecharacters = (
86 filecharacters = (
87 "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
87 "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
88 "[]^_`;=@{}~ !#$%&'()+,-"
88 "[]^_`;=@{}~ !#$%&'()+,-"
89 )
89 )
90
90
91 files = st.text(filecharacters, min_size=1).map(lambda x: x.strip()).filter(
91 files = st.text(filecharacters, min_size=1).map(lambda x: x.strip()).filter(
92 bool).map(lambda s: s.encode('ascii'))
92 bool).map(lambda s: s.encode('ascii'))
93
93
94 safetext = st.text(st.characters(
94 safetext = st.text(st.characters(
95 min_codepoint=1, max_codepoint=127,
95 min_codepoint=1, max_codepoint=127,
96 blacklist_categories=('Cc', 'Cs')), min_size=1).map(
96 blacklist_categories=('Cc', 'Cs')), min_size=1).map(
97 lambda s: s.encode('utf-8')
97 lambda s: s.encode('utf-8')
98 )
98 )
99
99
100 extensions = st.sampled_from(('shelve', 'mq', 'blackbox',))
101
100 @contextmanager
102 @contextmanager
101 def acceptableerrors(*args):
103 def acceptableerrors(*args):
102 """Sometimes we know an operation we're about to perform might fail, and
104 """Sometimes we know an operation we're about to perform might fail, and
103 we're OK with some of the failures. In those cases this may be used as a
105 we're OK with some of the failures. In those cases this may be used as a
104 context manager and will swallow expected failures, as identified by
106 context manager and will swallow expected failures, as identified by
105 substrings of the error message Mercurial emits."""
107 substrings of the error message Mercurial emits."""
106 try:
108 try:
107 yield
109 yield
108 except subprocess.CalledProcessError as e:
110 except subprocess.CalledProcessError as e:
109 if not any(a in e.output for a in args):
111 if not any(a in e.output for a in args):
110 note(e.output)
112 note(e.output)
111 raise
113 raise
112
114
113 reponames = st.text("abcdefghijklmnopqrstuvwxyz01234556789", min_size=1).map(
115 reponames = st.text("abcdefghijklmnopqrstuvwxyz01234556789", min_size=1).map(
114 lambda s: s.encode('ascii')
116 lambda s: s.encode('ascii')
115 )
117 )
116
118
117 class verifyingstatemachine(RuleBasedStateMachine):
119 class verifyingstatemachine(RuleBasedStateMachine):
118 """This defines the set of acceptable operations on a Mercurial repository
120 """This defines the set of acceptable operations on a Mercurial repository
119 using Hypothesis's RuleBasedStateMachine.
121 using Hypothesis's RuleBasedStateMachine.
120
122
121 The general concept is that we manage multiple repositories inside a
123 The general concept is that we manage multiple repositories inside a
122 repos/ directory in our temporary test location. Some of these are freshly
124 repos/ directory in our temporary test location. Some of these are freshly
123 inited, some are clones of the others. Our current working directory is
125 inited, some are clones of the others. Our current working directory is
124 always inside one of these repositories while the tests are running.
126 always inside one of these repositories while the tests are running.
125
127
126 Hypothesis then performs a series of operations against these repositories,
128 Hypothesis then performs a series of operations against these repositories,
127 including hg commands, generating contents and editing the .hgrc file.
129 including hg commands, generating contents and editing the .hgrc file.
128 If these operations fail in unexpected ways or behave differently in
130 If these operations fail in unexpected ways or behave differently in
129 different configurations of Mercurial, the test will fail and a minimized
131 different configurations of Mercurial, the test will fail and a minimized
130 .t test file will be written to the hypothesis-generated directory to
132 .t test file will be written to the hypothesis-generated directory to
131 exhibit that failure.
133 exhibit that failure.
132
134
133 Operations are defined as methods with @rule() decorators. See the
135 Operations are defined as methods with @rule() decorators. See the
134 Hypothesis documentation at
136 Hypothesis documentation at
135 http://hypothesis.readthedocs.org/en/release/stateful.html for more
137 http://hypothesis.readthedocs.org/en/release/stateful.html for more
136 details."""
138 details."""
137
139
138 # A bundle is a reusable collection of previously generated data which may
140 # A bundle is a reusable collection of previously generated data which may
139 # be provided as arguments to future operations.
141 # be provided as arguments to future operations.
140 repos = Bundle('repos')
142 repos = Bundle('repos')
141 paths = Bundle('paths')
143 paths = Bundle('paths')
142 contents = Bundle('contents')
144 contents = Bundle('contents')
143 branches = Bundle('branches')
145 branches = Bundle('branches')
144 committimes = Bundle('committimes')
146 committimes = Bundle('committimes')
145
147
146 def __init__(self):
148 def __init__(self):
147 super(verifyingstatemachine, self).__init__()
149 super(verifyingstatemachine, self).__init__()
148 self.repodir = os.path.join(testtmp, "repos")
150 self.repodir = os.path.join(testtmp, "repos")
149 if os.path.exists(self.repodir):
151 if os.path.exists(self.repodir):
150 shutil.rmtree(self.repodir)
152 shutil.rmtree(self.repodir)
151 os.chdir(testtmp)
153 os.chdir(testtmp)
152 self.log = []
154 self.log = []
153 self.failed = False
155 self.failed = False
156 self.configperrepo = {}
157 self.all_extensions = set()
158 self.non_skippable_extensions = set()
154
159
155 self.mkdirp("repos")
160 self.mkdirp("repos")
156 self.cd("repos")
161 self.cd("repos")
157 self.mkdirp("repo1")
162 self.mkdirp("repo1")
158 self.cd("repo1")
163 self.cd("repo1")
159 self.hg("init")
164 self.hg("init")
160 self.extensions = {}
161 self.all_extensions = set()
162 self.non_skippable_extensions = set()
163
165
164 def teardown(self):
166 def teardown(self):
165 """On teardown we clean up after ourselves as usual, but we also
167 """On teardown we clean up after ourselves as usual, but we also
166 do some additional testing: We generate a .t file based on our test
168 do some additional testing: We generate a .t file based on our test
167 run using run-test.py -i to get the correct output.
169 run using run-test.py -i to get the correct output.
168
170
169 We then test it in a number of other configurations, verifying that
171 We then test it in a number of other configurations, verifying that
170 each passes the same test."""
172 each passes the same test."""
171 super(verifyingstatemachine, self).teardown()
173 super(verifyingstatemachine, self).teardown()
172 try:
174 try:
173 shutil.rmtree(self.repodir)
175 shutil.rmtree(self.repodir)
174 except OSError:
176 except OSError:
175 pass
177 pass
176 ttest = os.linesep.join(" " + l for l in self.log)
178 ttest = os.linesep.join(" " + l for l in self.log)
177 os.chdir(testtmp)
179 os.chdir(testtmp)
178 path = os.path.join(testtmp, "test-generated.t")
180 path = os.path.join(testtmp, "test-generated.t")
179 with open(path, 'w') as o:
181 with open(path, 'w') as o:
180 o.write(ttest + os.linesep)
182 o.write(ttest + os.linesep)
181 with open(os.devnull, "w") as devnull:
183 with open(os.devnull, "w") as devnull:
182 rewriter = subprocess.Popen(
184 rewriter = subprocess.Popen(
183 [runtests, "--local", "-i", path], stdin=subprocess.PIPE,
185 [runtests, "--local", "-i", path], stdin=subprocess.PIPE,
184 stdout=devnull, stderr=devnull,
186 stdout=devnull, stderr=devnull,
185 )
187 )
186 rewriter.communicate("yes")
188 rewriter.communicate("yes")
187 with open(path, 'r') as i:
189 with open(path, 'r') as i:
188 ttest = i.read()
190 ttest = i.read()
189
191
190 e = None
192 e = None
191 if not self.failed:
193 if not self.failed:
192 try:
194 try:
193 for ext in (
194 self.all_extensions - self.non_skippable_extensions
195 ):
196 try:
197 os.environ["SKIP_EXTENSION"] = ext
198 output = subprocess.check_output([
199 runtests, path, "--local",
200 ], stderr=subprocess.STDOUT)
201 assert "Ran 1 test" in output, output
202 finally:
203 del os.environ["SKIP_EXTENSION"]
204 output = subprocess.check_output([
195 output = subprocess.check_output([
205 runtests, path, "--local", "--pure"
196 runtests, path, "--local", "--pure"
206 ], stderr=subprocess.STDOUT)
197 ], stderr=subprocess.STDOUT)
207 assert "Ran 1 test" in output, output
198 assert "Ran 1 test" in output, output
199 for ext in (
200 self.all_extensions - self.non_skippable_extensions
201 ):
202 tf = os.path.join(testtmp, "test-generated-no-%s.t" % (
203 ext,
204 ))
205 with open(tf, 'w') as o:
206 for l in ttest.splitlines():
207 if l.startswith(" $ hg"):
208 l = l.replace(
209 "--config %s=" % (
210 extensionconfigkey(ext),), "")
211 o.write(l + os.linesep)
212 with open(tf, 'r') as r:
213 t = r.read()
214 assert ext not in t, t
215 output = subprocess.check_output([
216 runtests, tf, "--local",
217 ], stderr=subprocess.STDOUT)
218 assert "Ran 1 test" in output, output
208 except subprocess.CalledProcessError as e:
219 except subprocess.CalledProcessError as e:
209 note(e.output)
220 note(e.output)
210 finally:
211 os.unlink(path)
212 try:
213 os.unlink(path + ".err")
214 except OSError:
215 pass
216 if self.failed or e is not None:
221 if self.failed or e is not None:
217 with open(savefile, "wb") as o:
222 with open(savefile, "wb") as o:
218 o.write(ttest)
223 o.write(ttest)
219 if e is not None:
224 if e is not None:
220 raise e
225 raise e
221
226
222 def execute_step(self, step):
227 def execute_step(self, step):
223 try:
228 try:
224 return super(verifyingstatemachine, self).execute_step(step)
229 return super(verifyingstatemachine, self).execute_step(step)
225 except (HypothesisException, KeyboardInterrupt):
230 except (HypothesisException, KeyboardInterrupt):
226 raise
231 raise
227 except Exception:
232 except Exception:
228 self.failed = True
233 self.failed = True
229 raise
234 raise
230
235
231 # Section: Basic commands.
236 # Section: Basic commands.
232 def mkdirp(self, path):
237 def mkdirp(self, path):
233 if os.path.exists(path):
238 if os.path.exists(path):
234 return
239 return
235 self.log.append(
240 self.log.append(
236 "$ mkdir -p -- %s" % (pipes.quote(os.path.relpath(path)),))
241 "$ mkdir -p -- %s" % (pipes.quote(os.path.relpath(path)),))
237 os.makedirs(path)
242 os.makedirs(path)
238
243
239 def cd(self, path):
244 def cd(self, path):
240 path = os.path.relpath(path)
245 path = os.path.relpath(path)
241 if path == ".":
246 if path == ".":
242 return
247 return
243 os.chdir(path)
248 os.chdir(path)
244 self.log.append("$ cd -- %s" % (pipes.quote(path),))
249 self.log.append("$ cd -- %s" % (pipes.quote(path),))
245
250
246 def hg(self, *args):
251 def hg(self, *args):
247 self.command("hg", *args)
252 extra_flags = []
253 for key, value in self.config.items():
254 extra_flags.append("--config")
255 extra_flags.append("%s=%s" % (key, value))
256 self.command("hg", *(tuple(extra_flags) + args))
248
257
249 def command(self, *args):
258 def command(self, *args):
250 self.log.append("$ " + ' '.join(map(pipes.quote, args)))
259 self.log.append("$ " + ' '.join(map(pipes.quote, args)))
251 subprocess.check_output(args, stderr=subprocess.STDOUT)
260 subprocess.check_output(args, stderr=subprocess.STDOUT)
252
261
253 # Section: Set up basic data
262 # Section: Set up basic data
254 # This section has no side effects but generates data that we will want
263 # This section has no side effects but generates data that we will want
255 # to use later.
264 # to use later.
256 @rule(
265 @rule(
257 target=paths,
266 target=paths,
258 source=st.lists(files, min_size=1).map(lambda l: os.path.join(*l)))
267 source=st.lists(files, min_size=1).map(lambda l: os.path.join(*l)))
259 def genpath(self, source):
268 def genpath(self, source):
260 return source
269 return source
261
270
262 @rule(
271 @rule(
263 target=committimes,
272 target=committimes,
264 when=datetimes(min_year=1970, max_year=2038) | st.none())
273 when=datetimes(min_year=1970, max_year=2038) | st.none())
265 def gentime(self, when):
274 def gentime(self, when):
266 return when
275 return when
267
276
268 @rule(
277 @rule(
269 target=contents,
278 target=contents,
270 content=st.one_of(
279 content=st.one_of(
271 st.binary(),
280 st.binary(),
272 st.text().map(lambda x: x.encode('utf-8'))
281 st.text().map(lambda x: x.encode('utf-8'))
273 ))
282 ))
274 def gencontent(self, content):
283 def gencontent(self, content):
275 return content
284 return content
276
285
277 @rule(
286 @rule(
278 target=branches,
287 target=branches,
279 name=safetext,
288 name=safetext,
280 )
289 )
281 def genbranch(self, name):
290 def genbranch(self, name):
282 return name
291 return name
283
292
284 @rule(target=paths, source=paths)
293 @rule(target=paths, source=paths)
285 def lowerpath(self, source):
294 def lowerpath(self, source):
286 return source.lower()
295 return source.lower()
287
296
288 @rule(target=paths, source=paths)
297 @rule(target=paths, source=paths)
289 def upperpath(self, source):
298 def upperpath(self, source):
290 return source.upper()
299 return source.upper()
291
300
292 # Section: Basic path operations
301 # Section: Basic path operations
293 @rule(path=paths, content=contents)
302 @rule(path=paths, content=contents)
294 def writecontent(self, path, content):
303 def writecontent(self, path, content):
295 self.unadded_changes = True
304 self.unadded_changes = True
296 if os.path.isdir(path):
305 if os.path.isdir(path):
297 return
306 return
298 parent = os.path.dirname(path)
307 parent = os.path.dirname(path)
299 if parent:
308 if parent:
300 try:
309 try:
301 self.mkdirp(parent)
310 self.mkdirp(parent)
302 except OSError:
311 except OSError:
303 # It may be the case that there is a regular file that has
312 # It may be the case that there is a regular file that has
304 # previously been created that has the same name as an ancestor
313 # previously been created that has the same name as an ancestor
305 # of the current path. This will cause mkdirp to fail with this
314 # of the current path. This will cause mkdirp to fail with this
306 # error. We just turn this into a no-op in that case.
315 # error. We just turn this into a no-op in that case.
307 return
316 return
308 with open(path, 'wb') as o:
317 with open(path, 'wb') as o:
309 o.write(content)
318 o.write(content)
310 self.log.append((
319 self.log.append((
311 "$ python -c 'import binascii; "
320 "$ python -c 'import binascii; "
312 "print(binascii.unhexlify(\"%s\"))' > %s") % (
321 "print(binascii.unhexlify(\"%s\"))' > %s") % (
313 binascii.hexlify(content),
322 binascii.hexlify(content),
314 pipes.quote(path),
323 pipes.quote(path),
315 ))
324 ))
316
325
317 @rule(path=paths)
326 @rule(path=paths)
318 def addpath(self, path):
327 def addpath(self, path):
319 if os.path.exists(path):
328 if os.path.exists(path):
320 self.hg("add", "--", path)
329 self.hg("add", "--", path)
321
330
322 @rule(path=paths)
331 @rule(path=paths)
323 def forgetpath(self, path):
332 def forgetpath(self, path):
324 if os.path.exists(path):
333 if os.path.exists(path):
325 with acceptableerrors(
334 with acceptableerrors(
326 "file is already untracked",
335 "file is already untracked",
327 ):
336 ):
328 self.hg("forget", "--", path)
337 self.hg("forget", "--", path)
329
338
330 @rule(s=st.none() | st.integers(0, 100))
339 @rule(s=st.none() | st.integers(0, 100))
331 def addremove(self, s):
340 def addremove(self, s):
332 args = ["addremove"]
341 args = ["addremove"]
333 if s is not None:
342 if s is not None:
334 args.extend(["-s", str(s)])
343 args.extend(["-s", str(s)])
335 self.hg(*args)
344 self.hg(*args)
336
345
337 @rule(path=paths)
346 @rule(path=paths)
338 def removepath(self, path):
347 def removepath(self, path):
339 if os.path.exists(path):
348 if os.path.exists(path):
340 with acceptableerrors(
349 with acceptableerrors(
341 'file is untracked',
350 'file is untracked',
342 'file has been marked for add',
351 'file has been marked for add',
343 'file is modified',
352 'file is modified',
344 ):
353 ):
345 self.hg("remove", "--", path)
354 self.hg("remove", "--", path)
346
355
347 @rule(
356 @rule(
348 message=safetext,
357 message=safetext,
349 amend=st.booleans(),
358 amend=st.booleans(),
350 when=committimes,
359 when=committimes,
351 addremove=st.booleans(),
360 addremove=st.booleans(),
352 secret=st.booleans(),
361 secret=st.booleans(),
353 close_branch=st.booleans(),
362 close_branch=st.booleans(),
354 )
363 )
355 def maybecommit(
364 def maybecommit(
356 self, message, amend, when, addremove, secret, close_branch
365 self, message, amend, when, addremove, secret, close_branch
357 ):
366 ):
358 command = ["commit"]
367 command = ["commit"]
359 errors = ["nothing changed"]
368 errors = ["nothing changed"]
360 if amend:
369 if amend:
361 errors.append("cannot amend public changesets")
370 errors.append("cannot amend public changesets")
362 command.append("--amend")
371 command.append("--amend")
363 command.append("-m" + pipes.quote(message))
372 command.append("-m" + pipes.quote(message))
364 if secret:
373 if secret:
365 command.append("--secret")
374 command.append("--secret")
366 if close_branch:
375 if close_branch:
367 command.append("--close-branch")
376 command.append("--close-branch")
368 errors.append("can only close branch heads")
377 errors.append("can only close branch heads")
369 if addremove:
378 if addremove:
370 command.append("--addremove")
379 command.append("--addremove")
371 if when is not None:
380 if when is not None:
372 if when.year == 1970:
381 if when.year == 1970:
373 errors.append('negative date value')
382 errors.append('negative date value')
374 if when.year == 2038:
383 if when.year == 2038:
375 errors.append('exceeds 32 bits')
384 errors.append('exceeds 32 bits')
376 command.append("--date=%s" % (
385 command.append("--date=%s" % (
377 when.strftime('%Y-%m-%d %H:%M:%S %z'),))
386 when.strftime('%Y-%m-%d %H:%M:%S %z'),))
378
387
379 with acceptableerrors(*errors):
388 with acceptableerrors(*errors):
380 self.hg(*command)
389 self.hg(*command)
381
390
382 # Section: Repository management
391 # Section: Repository management
383 @property
392 @property
384 def currentrepo(self):
393 def currentrepo(self):
385 return os.path.basename(os.getcwd())
394 return os.path.basename(os.getcwd())
386
395
396 @property
397 def config(self):
398 return self.configperrepo.setdefault(self.currentrepo, {})
399
387 @rule(
400 @rule(
388 target=repos,
401 target=repos,
389 source=repos,
402 source=repos,
390 name=reponames,
403 name=reponames,
391 )
404 )
392 def clone(self, source, name):
405 def clone(self, source, name):
393 if not os.path.exists(os.path.join("..", name)):
406 if not os.path.exists(os.path.join("..", name)):
394 self.cd("..")
407 self.cd("..")
395 self.hg("clone", source, name)
408 self.hg("clone", source, name)
396 self.cd(name)
409 self.cd(name)
397 return name
410 return name
398
411
399 @rule(
412 @rule(
400 target=repos,
413 target=repos,
401 name=reponames,
414 name=reponames,
402 )
415 )
403 def fresh(self, name):
416 def fresh(self, name):
404 if not os.path.exists(os.path.join("..", name)):
417 if not os.path.exists(os.path.join("..", name)):
405 self.cd("..")
418 self.cd("..")
406 self.mkdirp(name)
419 self.mkdirp(name)
407 self.cd(name)
420 self.cd(name)
408 self.hg("init")
421 self.hg("init")
409 return name
422 return name
410
423
411 @rule(name=repos)
424 @rule(name=repos)
412 def switch(self, name):
425 def switch(self, name):
413 self.cd(os.path.join("..", name))
426 self.cd(os.path.join("..", name))
414 assert self.currentrepo == name
427 assert self.currentrepo == name
415 assert os.path.exists(".hg")
428 assert os.path.exists(".hg")
416
429
417 @rule(target=repos)
430 @rule(target=repos)
418 def origin(self):
431 def origin(self):
419 return "repo1"
432 return "repo1"
420
433
421 @rule()
434 @rule()
422 def pull(self, repo=repos):
435 def pull(self, repo=repos):
423 with acceptableerrors(
436 with acceptableerrors(
424 "repository default not found",
437 "repository default not found",
425 "repository is unrelated",
438 "repository is unrelated",
426 ):
439 ):
427 self.hg("pull")
440 self.hg("pull")
428
441
429 @rule(newbranch=st.booleans())
442 @rule(newbranch=st.booleans())
430 def push(self, newbranch):
443 def push(self, newbranch):
431 with acceptableerrors(
444 with acceptableerrors(
432 "default repository not configured",
445 "default repository not configured",
433 "no changes found",
446 "no changes found",
434 ):
447 ):
435 if newbranch:
448 if newbranch:
436 self.hg("push", "--new-branch")
449 self.hg("push", "--new-branch")
437 else:
450 else:
438 with acceptableerrors(
451 with acceptableerrors(
439 "creates new branches"
452 "creates new branches"
440 ):
453 ):
441 self.hg("push")
454 self.hg("push")
442
455
443 # Section: Simple side effect free "check" operations
456 # Section: Simple side effect free "check" operations
444 @rule()
457 @rule()
445 def log(self):
458 def log(self):
446 self.hg("log")
459 self.hg("log")
447
460
448 @rule()
461 @rule()
449 def verify(self):
462 def verify(self):
450 self.hg("verify")
463 self.hg("verify")
451
464
452 @rule()
465 @rule()
453 def diff(self):
466 def diff(self):
454 self.hg("diff", "--nodates")
467 self.hg("diff", "--nodates")
455
468
456 @rule()
469 @rule()
457 def status(self):
470 def status(self):
458 self.hg("status")
471 self.hg("status")
459
472
460 @rule()
473 @rule()
461 def export(self):
474 def export(self):
462 self.hg("export")
475 self.hg("export")
463
476
464 # Section: Branch management
477 # Section: Branch management
465 @rule()
478 @rule()
466 def checkbranch(self):
479 def checkbranch(self):
467 self.hg("branch")
480 self.hg("branch")
468
481
469 @rule(branch=branches)
482 @rule(branch=branches)
470 def switchbranch(self, branch):
483 def switchbranch(self, branch):
471 with acceptableerrors(
484 with acceptableerrors(
472 'cannot use an integer as a name',
485 'cannot use an integer as a name',
473 'cannot be used in a name',
486 'cannot be used in a name',
474 'a branch of the same name already exists',
487 'a branch of the same name already exists',
475 'is reserved',
488 'is reserved',
476 ):
489 ):
477 self.hg("branch", "--", branch)
490 self.hg("branch", "--", branch)
478
491
479 @rule(branch=branches, clean=st.booleans())
492 @rule(branch=branches, clean=st.booleans())
480 def update(self, branch, clean):
493 def update(self, branch, clean):
481 with acceptableerrors(
494 with acceptableerrors(
482 'unknown revision',
495 'unknown revision',
483 'parse error',
496 'parse error',
484 ):
497 ):
485 if clean:
498 if clean:
486 self.hg("update", "-C", "--", branch)
499 self.hg("update", "-C", "--", branch)
487 else:
500 else:
488 self.hg("update", "--", branch)
501 self.hg("update", "--", branch)
489
502
490 # Section: Extension management
503 # Section: Extension management
491 def hasextension(self, extension):
504 def hasextension(self, extension):
492 repo = self.currentrepo
505 return extensionconfigkey(extension) in self.config
493 return repo in self.extensions and extension in self.extensions[repo]
494
506
495 def commandused(self, extension):
507 def commandused(self, extension):
496 assert extension in self.all_extensions
508 assert extension in self.all_extensions
497 self.non_skippable_extensions.add(extension)
509 self.non_skippable_extensions.add(extension)
498
510
499 @rule(extension=st.sampled_from((
511 @rule(extension=extensions)
500 'shelve', 'mq', 'blackbox',
501 )))
502 def addextension(self, extension):
512 def addextension(self, extension):
503 self.all_extensions.add(extension)
513 self.all_extensions.add(extension)
504 extensions = self.extensions.setdefault(self.currentrepo, set())
514 self.config[extensionconfigkey(extension)] = ""
505 if extension in extensions:
515
506 return
516 @rule(extension=extensions)
507 extensions.add(extension)
517 def removeextension(self, extension):
508 if not os.path.exists(hgrc):
518 self.config.pop(extensionconfigkey(extension), None)
509 self.command("touch", hgrc)
510 with open(hgrc, 'a') as o:
511 line = "[extensions]\n%s=\n" % (extension,)
512 o.write(line)
513 for l in line.splitlines():
514 self.log.append((
515 '$ if test "$SKIP_EXTENSION" != "%s" ; '
516 'then echo %r >> %s; fi') % (
517 extension, l, hgrc,))
518
519
519 # Section: Commands from the shelve extension
520 # Section: Commands from the shelve extension
520 @rule()
521 @rule()
521 @precondition(lambda self: self.hasextension("shelve"))
522 @precondition(lambda self: self.hasextension("shelve"))
522 def shelve(self):
523 def shelve(self):
523 self.commandused("shelve")
524 self.commandused("shelve")
524 with acceptableerrors("nothing changed"):
525 with acceptableerrors("nothing changed"):
525 self.hg("shelve")
526 self.hg("shelve")
526
527
527 @rule()
528 @rule()
528 @precondition(lambda self: self.hasextension("shelve"))
529 @precondition(lambda self: self.hasextension("shelve"))
529 def unshelve(self):
530 def unshelve(self):
530 self.commandused("shelve")
531 self.commandused("shelve")
531 with acceptableerrors("no shelved changes to apply"):
532 with acceptableerrors("no shelved changes to apply"):
532 self.hg("unshelve")
533 self.hg("unshelve")
533
534
534 class writeonlydatabase(ExampleDatabase):
535 class writeonlydatabase(ExampleDatabase):
535 def __init__(self, underlying):
536 def __init__(self, underlying):
536 super(ExampleDatabase, self).__init__()
537 super(ExampleDatabase, self).__init__()
537 self.underlying = underlying
538 self.underlying = underlying
538
539
539 def fetch(self, key):
540 def fetch(self, key):
540 return ()
541 return ()
541
542
542 def save(self, key, value):
543 def save(self, key, value):
543 self.underlying.save(key, value)
544 self.underlying.save(key, value)
544
545
545 def delete(self, key, value):
546 def delete(self, key, value):
546 self.underlying.delete(key, value)
547 self.underlying.delete(key, value)
547
548
548 def close(self):
549 def close(self):
549 self.underlying.close()
550 self.underlying.close()
550
551
552 def extensionconfigkey(extension):
553 return "extensions." + extension
554
551 settings.register_profile(
555 settings.register_profile(
552 'default', settings(
556 'default', settings(
553 timeout=300,
557 timeout=300,
554 stateful_step_count=50,
558 stateful_step_count=50,
555 max_examples=10,
559 max_examples=10,
556 )
560 )
557 )
561 )
558
562
559 settings.register_profile(
563 settings.register_profile(
560 'fast', settings(
564 'fast', settings(
561 timeout=10,
565 timeout=10,
562 stateful_step_count=20,
566 stateful_step_count=20,
563 max_examples=5,
567 max_examples=5,
564 min_satisfying_examples=1,
568 min_satisfying_examples=1,
565 max_shrinks=0,
569 max_shrinks=0,
566 )
570 )
567 )
571 )
568
572
569 settings.register_profile(
573 settings.register_profile(
570 'continuous', settings(
574 'continuous', settings(
571 timeout=-1,
575 timeout=-1,
572 stateful_step_count=1000,
576 stateful_step_count=1000,
573 max_examples=10 ** 8,
577 max_examples=10 ** 8,
574 max_iterations=10 ** 8,
578 max_iterations=10 ** 8,
575 database=writeonlydatabase(settings.default.database)
579 database=writeonlydatabase(settings.default.database)
576 )
580 )
577 )
581 )
578
582
579 settings.load_profile(os.getenv('HYPOTHESIS_PROFILE', 'default'))
583 settings.load_profile(os.getenv('HYPOTHESIS_PROFILE', 'default'))
580
584
581 verifyingtest = verifyingstatemachine.TestCase
585 verifyingtest = verifyingstatemachine.TestCase
582
586
583 verifyingtest.settings = settings.default
587 verifyingtest.settings = settings.default
584
588
585 if __name__ == '__main__':
589 if __name__ == '__main__':
586 try:
590 try:
587 silenttestrunner.main(__name__)
591 silenttestrunner.main(__name__)
588 finally:
592 finally:
589 # So as to prevent proliferation of useless test files, if we never
593 # So as to prevent proliferation of useless test files, if we never
590 # actually wrote a failing test we clean up after ourselves and delete
594 # actually wrote a failing test we clean up after ourselves and delete
591 # the file for doing so that we owned.
595 # the file for doing so that we owned.
592 if os.path.exists(savefile) and os.path.getsize(savefile) == 0:
596 if os.path.exists(savefile) and os.path.getsize(savefile) == 0:
593 os.unlink(savefile)
597 os.unlink(savefile)
General Comments 0
You need to be logged in to leave comments. Login now