##// END OF EJS Templates
testing: allow Hypothesis to enable extensions...
David R. MacIver -
r28258:fc7ee50a default
parent child Browse files
Show More
@@ -1,506 +1,565
1 from __future__ import print_function, absolute_import
1 from __future__ import print_function, absolute_import
2
2
3 """Fuzz testing for operations against a Mercurial repository
3 """Fuzz testing for operations against a Mercurial repository
4
4
5 This uses Hypothesis's stateful testing to generate random repository
5 This uses Hypothesis's stateful testing to generate random repository
6 operations and test Mercurial using them, both to see if there are any
6 operations and test Mercurial using them, both to see if there are any
7 unexpected errors and to compare different versions of it."""
7 unexpected errors and to compare different versions of it."""
8
8
9 import os
9 import os
10 import sys
10 import sys
11
11
12 # These tests require Hypothesis and pytz to be installed.
12 # These tests require Hypothesis and pytz to be installed.
13 # Running 'pip install hypothesis pytz' will achieve that.
13 # Running 'pip install hypothesis pytz' will achieve that.
14 # Note: This won't work if you're running Python < 2.7.
14 # Note: This won't work if you're running Python < 2.7.
15 try:
15 try:
16 from hypothesis.extra.datetime import datetimes
16 from hypothesis.extra.datetime import datetimes
17 except ImportError:
17 except ImportError:
18 sys.stderr.write("skipped: hypothesis or pytz not installed" + os.linesep)
18 sys.stderr.write("skipped: hypothesis or pytz not installed" + os.linesep)
19 sys.exit(80)
19 sys.exit(80)
20
20
21 # If you are running an old version of pip you may find that the enum34
21 # If you are running an old version of pip you may find that the enum34
22 # backport is not installed automatically. If so 'pip install enum34' will
22 # backport is not installed automatically. If so 'pip install enum34' will
23 # fix this problem.
23 # fix this problem.
24 try:
24 try:
25 import enum
25 import enum
26 assert enum # Silence pyflakes
26 assert enum # Silence pyflakes
27 except ImportError:
27 except ImportError:
28 sys.stderr.write("skipped: enum34 not installed" + os.linesep)
28 sys.stderr.write("skipped: enum34 not installed" + os.linesep)
29 sys.exit(80)
29 sys.exit(80)
30
30
31 import binascii
31 import binascii
32 from contextlib import contextmanager
32 from contextlib import contextmanager
33 import errno
33 import errno
34 import pipes
34 import pipes
35 import shutil
35 import shutil
36 import silenttestrunner
36 import silenttestrunner
37 import subprocess
37 import subprocess
38
38
39 from hypothesis.errors import HypothesisException
39 from hypothesis.errors import HypothesisException
40 from hypothesis.stateful import rule, RuleBasedStateMachine, Bundle
40 from hypothesis.stateful import (
41 rule, RuleBasedStateMachine, Bundle, precondition)
41 from hypothesis import settings, note, strategies as st
42 from hypothesis import settings, note, strategies as st
42 from hypothesis.configuration import set_hypothesis_home_dir
43 from hypothesis.configuration import set_hypothesis_home_dir
43
44
44 testdir = os.path.abspath(os.environ["TESTDIR"])
45 testdir = os.path.abspath(os.environ["TESTDIR"])
45
46
46 # We store Hypothesis examples here rather in the temporary test directory
47 # We store Hypothesis examples here rather in the temporary test directory
47 # so that when rerunning a failing test this always results in refinding the
48 # so that when rerunning a failing test this always results in refinding the
48 # previous failure. This directory is in .hgignore and should not be checked in
49 # previous failure. This directory is in .hgignore and should not be checked in
49 # but is useful to have for development.
50 # but is useful to have for development.
50 set_hypothesis_home_dir(os.path.join(testdir, ".hypothesis"))
51 set_hypothesis_home_dir(os.path.join(testdir, ".hypothesis"))
51
52
52 runtests = os.path.join(os.environ["RUNTESTDIR"], "run-tests.py")
53 runtests = os.path.join(os.environ["RUNTESTDIR"], "run-tests.py")
53 testtmp = os.environ["TESTTMP"]
54 testtmp = os.environ["TESTTMP"]
54 assert os.path.isdir(testtmp)
55 assert os.path.isdir(testtmp)
55
56
56 generatedtests = os.path.join(testdir, "hypothesis-generated")
57 generatedtests = os.path.join(testdir, "hypothesis-generated")
57
58
58 try:
59 try:
59 os.makedirs(generatedtests)
60 os.makedirs(generatedtests)
60 except OSError:
61 except OSError:
61 pass
62 pass
62
63
63 # We write out generated .t files to a file in order to ease debugging and to
64 # We write out generated .t files to a file in order to ease debugging and to
64 # give a starting point for turning failures Hypothesis finds into normal
65 # give a starting point for turning failures Hypothesis finds into normal
65 # tests. In order to ensure that multiple copies of this test can be run in
66 # tests. In order to ensure that multiple copies of this test can be run in
66 # parallel we use atomic file create to ensure that we always get a unique
67 # parallel we use atomic file create to ensure that we always get a unique
67 # name.
68 # name.
68 file_index = 0
69 file_index = 0
69 while True:
70 while True:
70 file_index += 1
71 file_index += 1
71 savefile = os.path.join(generatedtests, "test-generated-%d.t" % (
72 savefile = os.path.join(generatedtests, "test-generated-%d.t" % (
72 file_index,
73 file_index,
73 ))
74 ))
74 try:
75 try:
75 os.close(os.open(savefile, os.O_CREAT | os.O_EXCL | os.O_WRONLY))
76 os.close(os.open(savefile, os.O_CREAT | os.O_EXCL | os.O_WRONLY))
76 break
77 break
77 except OSError as e:
78 except OSError as e:
78 if e.errno != errno.EEXIST:
79 if e.errno != errno.EEXIST:
79 raise
80 raise
80 assert os.path.exists(savefile)
81 assert os.path.exists(savefile)
81
82
82 hgrc = os.path.join(".hg", "hgrc")
83 hgrc = os.path.join(".hg", "hgrc")
83
84
84 filecharacters = (
85 filecharacters = (
85 "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
86 "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
86 "[]^_`;=@{}~ !#$%&'()+,-"
87 "[]^_`;=@{}~ !#$%&'()+,-"
87 )
88 )
88
89
89 files = st.text(filecharacters, min_size=1).map(lambda x: x.strip()).filter(
90 files = st.text(filecharacters, min_size=1).map(lambda x: x.strip()).filter(
90 bool).map(lambda s: s.encode('ascii'))
91 bool).map(lambda s: s.encode('ascii'))
91
92
92 safetext = st.text(st.characters(
93 safetext = st.text(st.characters(
93 min_codepoint=1, max_codepoint=127,
94 min_codepoint=1, max_codepoint=127,
94 blacklist_categories=('Cc', 'Cs')), min_size=1).map(
95 blacklist_categories=('Cc', 'Cs')), min_size=1).map(
95 lambda s: s.encode('utf-8')
96 lambda s: s.encode('utf-8')
96 )
97 )
97
98
98 @contextmanager
99 @contextmanager
99 def acceptableerrors(*args):
100 def acceptableerrors(*args):
100 """Sometimes we know an operation we're about to perform might fail, and
101 """Sometimes we know an operation we're about to perform might fail, and
101 we're OK with some of the failures. In those cases this may be used as a
102 we're OK with some of the failures. In those cases this may be used as a
102 context manager and will swallow expected failures, as identified by
103 context manager and will swallow expected failures, as identified by
103 substrings of the error message Mercurial emits."""
104 substrings of the error message Mercurial emits."""
104 try:
105 try:
105 yield
106 yield
106 except subprocess.CalledProcessError as e:
107 except subprocess.CalledProcessError as e:
107 if not any(a in e.output for a in args):
108 if not any(a in e.output for a in args):
108 note(e.output)
109 note(e.output)
109 raise
110 raise
110
111
111 reponames = st.text("abcdefghijklmnopqrstuvwxyz01234556789", min_size=1).map(
112 reponames = st.text("abcdefghijklmnopqrstuvwxyz01234556789", min_size=1).map(
112 lambda s: s.encode('ascii')
113 lambda s: s.encode('ascii')
113 )
114 )
114
115
115 class verifyingstatemachine(RuleBasedStateMachine):
116 class verifyingstatemachine(RuleBasedStateMachine):
116 """This defines the set of acceptable operations on a Mercurial repository
117 """This defines the set of acceptable operations on a Mercurial repository
117 using Hypothesis's RuleBasedStateMachine.
118 using Hypothesis's RuleBasedStateMachine.
118
119
119 The general concept is that we manage multiple repositories inside a
120 The general concept is that we manage multiple repositories inside a
120 repos/ directory in our temporary test location. Some of these are freshly
121 repos/ directory in our temporary test location. Some of these are freshly
121 inited, some are clones of the others. Our current working directory is
122 inited, some are clones of the others. Our current working directory is
122 always inside one of these repositories while the tests are running.
123 always inside one of these repositories while the tests are running.
123
124
124 Hypothesis then performs a series of operations against these repositories,
125 Hypothesis then performs a series of operations against these repositories,
125 including hg commands, generating contents and editing the .hgrc file.
126 including hg commands, generating contents and editing the .hgrc file.
126 If these operations fail in unexpected ways or behave differently in
127 If these operations fail in unexpected ways or behave differently in
127 different configurations of Mercurial, the test will fail and a minimized
128 different configurations of Mercurial, the test will fail and a minimized
128 .t test file will be written to the hypothesis-generated directory to
129 .t test file will be written to the hypothesis-generated directory to
129 exhibit that failure.
130 exhibit that failure.
130
131
131 Operations are defined as methods with @rule() decorators. See the
132 Operations are defined as methods with @rule() decorators. See the
132 Hypothesis documentation at
133 Hypothesis documentation at
133 http://hypothesis.readthedocs.org/en/release/stateful.html for more
134 http://hypothesis.readthedocs.org/en/release/stateful.html for more
134 details."""
135 details."""
135
136
136 # A bundle is a reusable collection of previously generated data which may
137 # A bundle is a reusable collection of previously generated data which may
137 # be provided as arguments to future operations.
138 # be provided as arguments to future operations.
138 repos = Bundle('repos')
139 repos = Bundle('repos')
139 paths = Bundle('paths')
140 paths = Bundle('paths')
140 contents = Bundle('contents')
141 contents = Bundle('contents')
141 branches = Bundle('branches')
142 branches = Bundle('branches')
142 committimes = Bundle('committimes')
143 committimes = Bundle('committimes')
143
144
144 def __init__(self):
145 def __init__(self):
145 super(verifyingstatemachine, self).__init__()
146 super(verifyingstatemachine, self).__init__()
146 self.repodir = os.path.join(testtmp, "repos")
147 self.repodir = os.path.join(testtmp, "repos")
147 if os.path.exists(self.repodir):
148 if os.path.exists(self.repodir):
148 shutil.rmtree(self.repodir)
149 shutil.rmtree(self.repodir)
149 os.chdir(testtmp)
150 os.chdir(testtmp)
150 self.log = []
151 self.log = []
151 self.failed = False
152 self.failed = False
152
153
153 self.mkdirp("repos")
154 self.mkdirp("repos")
154 self.cd("repos")
155 self.cd("repos")
155 self.mkdirp("repo1")
156 self.mkdirp("repo1")
156 self.cd("repo1")
157 self.cd("repo1")
157 self.hg("init")
158 self.hg("init")
159 self.extensions = {}
160 self.all_extensions = set()
161 self.non_skippable_extensions = set()
158
162
159 def teardown(self):
163 def teardown(self):
160 """On teardown we clean up after ourselves as usual, but we also
164 """On teardown we clean up after ourselves as usual, but we also
161 do some additional testing: We generate a .t file based on our test
165 do some additional testing: We generate a .t file based on our test
162 run using run-test.py -i to get the correct output.
166 run using run-test.py -i to get the correct output.
163
167
164 We then test it in a number of other configurations, verifying that
168 We then test it in a number of other configurations, verifying that
165 each passes the same test."""
169 each passes the same test."""
166 super(verifyingstatemachine, self).teardown()
170 super(verifyingstatemachine, self).teardown()
167 try:
171 try:
168 shutil.rmtree(self.repodir)
172 shutil.rmtree(self.repodir)
169 except OSError:
173 except OSError:
170 pass
174 pass
171 ttest = os.linesep.join(" " + l for l in self.log)
175 ttest = os.linesep.join(" " + l for l in self.log)
172 os.chdir(testtmp)
176 os.chdir(testtmp)
173 path = os.path.join(testtmp, "test-generated.t")
177 path = os.path.join(testtmp, "test-generated.t")
174 with open(path, 'w') as o:
178 with open(path, 'w') as o:
175 o.write(ttest + os.linesep)
179 o.write(ttest + os.linesep)
176 with open(os.devnull, "w") as devnull:
180 with open(os.devnull, "w") as devnull:
177 rewriter = subprocess.Popen(
181 rewriter = subprocess.Popen(
178 [runtests, "--local", "-i", path], stdin=subprocess.PIPE,
182 [runtests, "--local", "-i", path], stdin=subprocess.PIPE,
179 stdout=devnull, stderr=devnull,
183 stdout=devnull, stderr=devnull,
180 )
184 )
181 rewriter.communicate("yes")
185 rewriter.communicate("yes")
182 with open(path, 'r') as i:
186 with open(path, 'r') as i:
183 ttest = i.read()
187 ttest = i.read()
184
188
185 e = None
189 e = None
186 if not self.failed:
190 if not self.failed:
187 try:
191 try:
192 for ext in (
193 self.all_extensions - self.non_skippable_extensions
194 ):
195 try:
196 os.environ["SKIP_EXTENSION"] = ext
197 output = subprocess.check_output([
198 runtests, path, "--local",
199 ], stderr=subprocess.STDOUT)
200 assert "Ran 1 test" in output, output
201 finally:
202 del os.environ["SKIP_EXTENSION"]
188 output = subprocess.check_output([
203 output = subprocess.check_output([
189 runtests, path, "--local", "--pure"
204 runtests, path, "--local", "--pure"
190 ], stderr=subprocess.STDOUT)
205 ], stderr=subprocess.STDOUT)
191 assert "Ran 1 test" in output, output
206 assert "Ran 1 test" in output, output
192 except subprocess.CalledProcessError as e:
207 except subprocess.CalledProcessError as e:
193 note(e.output)
208 note(e.output)
194 finally:
209 finally:
195 os.unlink(path)
210 os.unlink(path)
196 try:
211 try:
197 os.unlink(path + ".err")
212 os.unlink(path + ".err")
198 except OSError:
213 except OSError:
199 pass
214 pass
200 if self.failed or e is not None:
215 if self.failed or e is not None:
201 with open(savefile, "wb") as o:
216 with open(savefile, "wb") as o:
202 o.write(ttest)
217 o.write(ttest)
203 if e is not None:
218 if e is not None:
204 raise e
219 raise e
205
220
206 def execute_step(self, step):
221 def execute_step(self, step):
207 try:
222 try:
208 return super(verifyingstatemachine, self).execute_step(step)
223 return super(verifyingstatemachine, self).execute_step(step)
209 except (HypothesisException, KeyboardInterrupt):
224 except (HypothesisException, KeyboardInterrupt):
210 raise
225 raise
211 except Exception:
226 except Exception:
212 self.failed = True
227 self.failed = True
213 raise
228 raise
214
229
215 # Section: Basic commands.
230 # Section: Basic commands.
216 def mkdirp(self, path):
231 def mkdirp(self, path):
217 if os.path.exists(path):
232 if os.path.exists(path):
218 return
233 return
219 self.log.append(
234 self.log.append(
220 "$ mkdir -p -- %s" % (pipes.quote(os.path.relpath(path)),))
235 "$ mkdir -p -- %s" % (pipes.quote(os.path.relpath(path)),))
221 os.makedirs(path)
236 os.makedirs(path)
222
237
223 def cd(self, path):
238 def cd(self, path):
224 path = os.path.relpath(path)
239 path = os.path.relpath(path)
225 if path == ".":
240 if path == ".":
226 return
241 return
227 os.chdir(path)
242 os.chdir(path)
228 self.log.append("$ cd -- %s" % (pipes.quote(path),))
243 self.log.append("$ cd -- %s" % (pipes.quote(path),))
229
244
230 def hg(self, *args):
245 def hg(self, *args):
231 self.command("hg", *args)
246 self.command("hg", *args)
232
247
233 def command(self, *args):
248 def command(self, *args):
234 self.log.append("$ " + ' '.join(map(pipes.quote, args)))
249 self.log.append("$ " + ' '.join(map(pipes.quote, args)))
235 subprocess.check_output(args, stderr=subprocess.STDOUT)
250 subprocess.check_output(args, stderr=subprocess.STDOUT)
236
251
237 # Section: Set up basic data
252 # Section: Set up basic data
238 # This section has no side effects but generates data that we will want
253 # This section has no side effects but generates data that we will want
239 # to use later.
254 # to use later.
240 @rule(
255 @rule(
241 target=paths,
256 target=paths,
242 source=st.lists(files, min_size=1).map(lambda l: os.path.join(*l)))
257 source=st.lists(files, min_size=1).map(lambda l: os.path.join(*l)))
243 def genpath(self, source):
258 def genpath(self, source):
244 return source
259 return source
245
260
246 @rule(
261 @rule(
247 target=committimes,
262 target=committimes,
248 when=datetimes(min_year=1970, max_year=2038) | st.none())
263 when=datetimes(min_year=1970, max_year=2038) | st.none())
249 def gentime(self, when):
264 def gentime(self, when):
250 return when
265 return when
251
266
252 @rule(
267 @rule(
253 target=contents,
268 target=contents,
254 content=st.one_of(
269 content=st.one_of(
255 st.binary(),
270 st.binary(),
256 st.text().map(lambda x: x.encode('utf-8'))
271 st.text().map(lambda x: x.encode('utf-8'))
257 ))
272 ))
258 def gencontent(self, content):
273 def gencontent(self, content):
259 return content
274 return content
260
275
261 @rule(
276 @rule(
262 target=branches,
277 target=branches,
263 name=safetext,
278 name=safetext,
264 )
279 )
265 def genbranch(self, name):
280 def genbranch(self, name):
266 return name
281 return name
267
282
268 @rule(target=paths, source=paths)
283 @rule(target=paths, source=paths)
269 def lowerpath(self, source):
284 def lowerpath(self, source):
270 return source.lower()
285 return source.lower()
271
286
272 @rule(target=paths, source=paths)
287 @rule(target=paths, source=paths)
273 def upperpath(self, source):
288 def upperpath(self, source):
274 return source.upper()
289 return source.upper()
275
290
276 # Section: Basic path operations
291 # Section: Basic path operations
277 @rule(path=paths, content=contents)
292 @rule(path=paths, content=contents)
278 def writecontent(self, path, content):
293 def writecontent(self, path, content):
279 self.unadded_changes = True
294 self.unadded_changes = True
280 if os.path.isdir(path):
295 if os.path.isdir(path):
281 return
296 return
282 parent = os.path.dirname(path)
297 parent = os.path.dirname(path)
283 if parent:
298 if parent:
284 try:
299 try:
285 self.mkdirp(parent)
300 self.mkdirp(parent)
286 except OSError:
301 except OSError:
287 # It may be the case that there is a regular file that has
302 # It may be the case that there is a regular file that has
288 # previously been created that has the same name as an ancestor
303 # previously been created that has the same name as an ancestor
289 # of the current path. This will cause mkdirp to fail with this
304 # of the current path. This will cause mkdirp to fail with this
290 # error. We just turn this into a no-op in that case.
305 # error. We just turn this into a no-op in that case.
291 return
306 return
292 with open(path, 'wb') as o:
307 with open(path, 'wb') as o:
293 o.write(content)
308 o.write(content)
294 self.log.append((
309 self.log.append((
295 "$ python -c 'import binascii; "
310 "$ python -c 'import binascii; "
296 "print(binascii.unhexlify(\"%s\"))' > %s") % (
311 "print(binascii.unhexlify(\"%s\"))' > %s") % (
297 binascii.hexlify(content),
312 binascii.hexlify(content),
298 pipes.quote(path),
313 pipes.quote(path),
299 ))
314 ))
300
315
301 @rule(path=paths)
316 @rule(path=paths)
302 def addpath(self, path):
317 def addpath(self, path):
303 if os.path.exists(path):
318 if os.path.exists(path):
304 self.hg("add", "--", path)
319 self.hg("add", "--", path)
305
320
306 @rule(path=paths)
321 @rule(path=paths)
307 def forgetpath(self, path):
322 def forgetpath(self, path):
308 if os.path.exists(path):
323 if os.path.exists(path):
309 with acceptableerrors(
324 with acceptableerrors(
310 "file is already untracked",
325 "file is already untracked",
311 ):
326 ):
312 self.hg("forget", "--", path)
327 self.hg("forget", "--", path)
313
328
314 @rule(s=st.none() | st.integers(0, 100))
329 @rule(s=st.none() | st.integers(0, 100))
315 def addremove(self, s):
330 def addremove(self, s):
316 args = ["addremove"]
331 args = ["addremove"]
317 if s is not None:
332 if s is not None:
318 args.extend(["-s", str(s)])
333 args.extend(["-s", str(s)])
319 self.hg(*args)
334 self.hg(*args)
320
335
321 @rule(path=paths)
336 @rule(path=paths)
322 def removepath(self, path):
337 def removepath(self, path):
323 if os.path.exists(path):
338 if os.path.exists(path):
324 with acceptableerrors(
339 with acceptableerrors(
325 'file is untracked',
340 'file is untracked',
326 'file has been marked for add',
341 'file has been marked for add',
327 'file is modified',
342 'file is modified',
328 ):
343 ):
329 self.hg("remove", "--", path)
344 self.hg("remove", "--", path)
330
345
331 @rule(
346 @rule(
332 message=safetext,
347 message=safetext,
333 amend=st.booleans(),
348 amend=st.booleans(),
334 when=committimes,
349 when=committimes,
335 addremove=st.booleans(),
350 addremove=st.booleans(),
336 secret=st.booleans(),
351 secret=st.booleans(),
337 close_branch=st.booleans(),
352 close_branch=st.booleans(),
338 )
353 )
339 def maybecommit(
354 def maybecommit(
340 self, message, amend, when, addremove, secret, close_branch
355 self, message, amend, when, addremove, secret, close_branch
341 ):
356 ):
342 command = ["commit"]
357 command = ["commit"]
343 errors = ["nothing changed"]
358 errors = ["nothing changed"]
344 if amend:
359 if amend:
345 errors.append("cannot amend public changesets")
360 errors.append("cannot amend public changesets")
346 command.append("--amend")
361 command.append("--amend")
347 command.append("-m" + pipes.quote(message))
362 command.append("-m" + pipes.quote(message))
348 if secret:
363 if secret:
349 command.append("--secret")
364 command.append("--secret")
350 if close_branch:
365 if close_branch:
351 command.append("--close-branch")
366 command.append("--close-branch")
352 errors.append("can only close branch heads")
367 errors.append("can only close branch heads")
353 if addremove:
368 if addremove:
354 command.append("--addremove")
369 command.append("--addremove")
355 if when is not None:
370 if when is not None:
356 if when.year == 1970:
371 if when.year == 1970:
357 errors.append('negative date value')
372 errors.append('negative date value')
358 if when.year == 2038:
373 if when.year == 2038:
359 errors.append('exceeds 32 bits')
374 errors.append('exceeds 32 bits')
360 command.append("--date=%s" % (
375 command.append("--date=%s" % (
361 when.strftime('%Y-%m-%d %H:%M:%S %z'),))
376 when.strftime('%Y-%m-%d %H:%M:%S %z'),))
362
377
363 with acceptableerrors(*errors):
378 with acceptableerrors(*errors):
364 self.hg(*command)
379 self.hg(*command)
365
380
366 # Section: Repository management
381 # Section: Repository management
367 @property
382 @property
368 def currentrepo(self):
383 def currentrepo(self):
369 return os.path.basename(os.getcwd())
384 return os.path.basename(os.getcwd())
370
385
371 @rule(
386 @rule(
372 target=repos,
387 target=repos,
373 source=repos,
388 source=repos,
374 name=reponames,
389 name=reponames,
375 )
390 )
376 def clone(self, source, name):
391 def clone(self, source, name):
377 if not os.path.exists(os.path.join("..", name)):
392 if not os.path.exists(os.path.join("..", name)):
378 self.cd("..")
393 self.cd("..")
379 self.hg("clone", source, name)
394 self.hg("clone", source, name)
380 self.cd(name)
395 self.cd(name)
381 return name
396 return name
382
397
383 @rule(
398 @rule(
384 target=repos,
399 target=repos,
385 name=reponames,
400 name=reponames,
386 )
401 )
387 def fresh(self, name):
402 def fresh(self, name):
388 if not os.path.exists(os.path.join("..", name)):
403 if not os.path.exists(os.path.join("..", name)):
389 self.cd("..")
404 self.cd("..")
390 self.mkdirp(name)
405 self.mkdirp(name)
391 self.cd(name)
406 self.cd(name)
392 self.hg("init")
407 self.hg("init")
393 return name
408 return name
394
409
395 @rule(name=repos)
410 @rule(name=repos)
396 def switch(self, name):
411 def switch(self, name):
397 self.cd(os.path.join("..", name))
412 self.cd(os.path.join("..", name))
398 assert self.currentrepo == name
413 assert self.currentrepo == name
399 assert os.path.exists(".hg")
414 assert os.path.exists(".hg")
400
415
401 @rule(target=repos)
416 @rule(target=repos)
402 def origin(self):
417 def origin(self):
403 return "repo1"
418 return "repo1"
404
419
405 @rule()
420 @rule()
406 def pull(self, repo=repos):
421 def pull(self, repo=repos):
407 with acceptableerrors(
422 with acceptableerrors(
408 "repository default not found",
423 "repository default not found",
409 "repository is unrelated",
424 "repository is unrelated",
410 ):
425 ):
411 self.hg("pull")
426 self.hg("pull")
412
427
413 @rule(newbranch=st.booleans())
428 @rule(newbranch=st.booleans())
414 def push(self, newbranch):
429 def push(self, newbranch):
415 with acceptableerrors(
430 with acceptableerrors(
416 "default repository not configured",
431 "default repository not configured",
417 "no changes found",
432 "no changes found",
418 ):
433 ):
419 if newbranch:
434 if newbranch:
420 self.hg("push", "--new-branch")
435 self.hg("push", "--new-branch")
421 else:
436 else:
422 with acceptableerrors(
437 with acceptableerrors(
423 "creates new branches"
438 "creates new branches"
424 ):
439 ):
425 self.hg("push")
440 self.hg("push")
426
441
427 # Section: Simple side effect free "check" operations
442 # Section: Simple side effect free "check" operations
428 @rule()
443 @rule()
429 def log(self):
444 def log(self):
430 self.hg("log")
445 self.hg("log")
431
446
432 @rule()
447 @rule()
433 def verify(self):
448 def verify(self):
434 self.hg("verify")
449 self.hg("verify")
435
450
436 @rule()
451 @rule()
437 def diff(self):
452 def diff(self):
438 self.hg("diff", "--nodates")
453 self.hg("diff", "--nodates")
439
454
440 @rule()
455 @rule()
441 def status(self):
456 def status(self):
442 self.hg("status")
457 self.hg("status")
443
458
444 @rule()
459 @rule()
445 def export(self):
460 def export(self):
446 self.hg("export")
461 self.hg("export")
447
462
448 # Section: Branch management
463 # Section: Branch management
449 @rule()
464 @rule()
450 def checkbranch(self):
465 def checkbranch(self):
451 self.hg("branch")
466 self.hg("branch")
452
467
453 @rule(branch=branches)
468 @rule(branch=branches)
454 def switchbranch(self, branch):
469 def switchbranch(self, branch):
455 with acceptableerrors(
470 with acceptableerrors(
456 'cannot use an integer as a name',
471 'cannot use an integer as a name',
457 'cannot be used in a name',
472 'cannot be used in a name',
458 'a branch of the same name already exists',
473 'a branch of the same name already exists',
459 'is reserved',
474 'is reserved',
460 ):
475 ):
461 self.hg("branch", "--", branch)
476 self.hg("branch", "--", branch)
462
477
463 @rule(branch=branches, clean=st.booleans())
478 @rule(branch=branches, clean=st.booleans())
464 def update(self, branch, clean):
479 def update(self, branch, clean):
465 with acceptableerrors(
480 with acceptableerrors(
466 'unknown revision',
481 'unknown revision',
467 'parse error',
482 'parse error',
468 ):
483 ):
469 if clean:
484 if clean:
470 self.hg("update", "-C", "--", branch)
485 self.hg("update", "-C", "--", branch)
471 else:
486 else:
472 self.hg("update", "--", branch)
487 self.hg("update", "--", branch)
473
488
489 # Section: Extension management
490 def hasextension(self, extension):
491 repo = self.currentrepo
492 return repo in self.extensions and extension in self.extensions[repo]
493
494 def commandused(self, extension):
495 assert extension in self.all_extensions
496 self.non_skippable_extensions.add(extension)
497
498 @rule(extension=st.sampled_from((
499 'shelve', 'mq', 'blackbox',
500 )))
501 def addextension(self, extension):
502 self.all_extensions.add(extension)
503 extensions = self.extensions.setdefault(self.currentrepo, set())
504 if extension in extensions:
505 return
506 extensions.add(extension)
507 if not os.path.exists(hgrc):
508 self.command("touch", hgrc)
509 with open(hgrc, 'a') as o:
510 line = "[extensions]\n%s=\n" % (extension,)
511 o.write(line)
512 for l in line.splitlines():
513 self.log.append((
514 '$ if test "$SKIP_EXTENSION" != "%s" ; '
515 'then echo %r >> %s; fi') % (
516 extension, l, hgrc,))
517
518 # Section: Commands from the shelve extension
519 @rule()
520 @precondition(lambda self: self.hasextension("shelve"))
521 def shelve(self):
522 self.commandused("shelve")
523 with acceptableerrors("nothing changed"):
524 self.hg("shelve")
525
526 @rule()
527 @precondition(lambda self: self.hasextension("shelve"))
528 def unshelve(self):
529 self.commandused("shelve")
530 with acceptableerrors("no shelved changes to apply"):
531 self.hg("unshelve")
532
474 settings.register_profile(
533 settings.register_profile(
475 'default', settings(
534 'default', settings(
476 timeout=300,
535 timeout=300,
477 stateful_step_count=50,
536 stateful_step_count=50,
478 max_examples=10,
537 max_examples=10,
479 )
538 )
480 )
539 )
481
540
482 settings.register_profile(
541 settings.register_profile(
483 'fast', settings(
542 'fast', settings(
484 timeout=10,
543 timeout=10,
485 stateful_step_count=20,
544 stateful_step_count=20,
486 max_examples=5,
545 max_examples=5,
487 min_satisfying_examples=1,
546 min_satisfying_examples=1,
488 max_shrinks=0,
547 max_shrinks=0,
489 )
548 )
490 )
549 )
491
550
492 settings.load_profile(os.getenv('HYPOTHESIS_PROFILE', 'default'))
551 settings.load_profile(os.getenv('HYPOTHESIS_PROFILE', 'default'))
493
552
494 verifyingtest = verifyingstatemachine.TestCase
553 verifyingtest = verifyingstatemachine.TestCase
495
554
496 verifyingtest.settings = settings.default
555 verifyingtest.settings = settings.default
497
556
498 if __name__ == '__main__':
557 if __name__ == '__main__':
499 try:
558 try:
500 silenttestrunner.main(__name__)
559 silenttestrunner.main(__name__)
501 finally:
560 finally:
502 # So as to prevent proliferation of useless test files, if we never
561 # So as to prevent proliferation of useless test files, if we never
503 # actually wrote a failing test we clean up after ourselves and delete
562 # actually wrote a failing test we clean up after ourselves and delete
504 # the file for doing so that we owned.
563 # the file for doing so that we owned.
505 if os.path.exists(savefile) and os.path.getsize(savefile) == 0:
564 if os.path.exists(savefile) and os.path.getsize(savefile) == 0:
506 os.unlink(savefile)
565 os.unlink(savefile)
General Comments 0
You need to be logged in to leave comments. Login now