##// END OF EJS Templates
tests: make test-verify-repo-operations.py not run by default...
Martin von Zweigbergk -
r28499:8b90367c default
parent child Browse files
Show More
@@ -1,597 +1,603
1 from __future__ import print_function, absolute_import
1 from __future__ import print_function, absolute_import
2
2
3 """Fuzz testing for operations against a Mercurial repository
3 """Fuzz testing for operations against a Mercurial repository
4
4
5 This uses Hypothesis's stateful testing to generate random repository
5 This uses Hypothesis's stateful testing to generate random repository
6 operations and test Mercurial using them, both to see if there are any
6 operations and test Mercurial using them, both to see if there are any
7 unexpected errors and to compare different versions of it."""
7 unexpected errors and to compare different versions of it."""
8
8
9 import os
9 import os
10 import subprocess
10 import sys
11 import sys
11
12
13 # Only run if slow tests are allowed
14 if subprocess.call(['python', '%s/hghave' % os.environ['TESTDIR'],
15 'slow']):
16 sys.exit(80)
17
12 # These tests require Hypothesis and pytz to be installed.
18 # These tests require Hypothesis and pytz to be installed.
13 # Running 'pip install hypothesis pytz' will achieve that.
19 # Running 'pip install hypothesis pytz' will achieve that.
14 # Note: This won't work if you're running Python < 2.7.
20 # Note: This won't work if you're running Python < 2.7.
15 try:
21 try:
16 from hypothesis.extra.datetime import datetimes
22 from hypothesis.extra.datetime import datetimes
17 except ImportError:
23 except ImportError:
18 sys.stderr.write("skipped: hypothesis or pytz not installed" + os.linesep)
24 sys.stderr.write("skipped: hypothesis or pytz not installed" + os.linesep)
19 sys.exit(80)
25 sys.exit(80)
20
26
21 # If you are running an old version of pip you may find that the enum34
27 # If you are running an old version of pip you may find that the enum34
22 # backport is not installed automatically. If so 'pip install enum34' will
28 # backport is not installed automatically. If so 'pip install enum34' will
23 # fix this problem.
29 # fix this problem.
24 try:
30 try:
25 import enum
31 import enum
26 assert enum # Silence pyflakes
32 assert enum # Silence pyflakes
27 except ImportError:
33 except ImportError:
28 sys.stderr.write("skipped: enum34 not installed" + os.linesep)
34 sys.stderr.write("skipped: enum34 not installed" + os.linesep)
29 sys.exit(80)
35 sys.exit(80)
30
36
31 import binascii
37 import binascii
32 from contextlib import contextmanager
38 from contextlib import contextmanager
33 import errno
39 import errno
34 import pipes
40 import pipes
35 import shutil
41 import shutil
36 import silenttestrunner
42 import silenttestrunner
37 import subprocess
43 import subprocess
38
44
39 from hypothesis.errors import HypothesisException
45 from hypothesis.errors import HypothesisException
40 from hypothesis.stateful import (
46 from hypothesis.stateful import (
41 rule, RuleBasedStateMachine, Bundle, precondition)
47 rule, RuleBasedStateMachine, Bundle, precondition)
42 from hypothesis import settings, note, strategies as st
48 from hypothesis import settings, note, strategies as st
43 from hypothesis.configuration import set_hypothesis_home_dir
49 from hypothesis.configuration import set_hypothesis_home_dir
44 from hypothesis.database import ExampleDatabase
50 from hypothesis.database import ExampleDatabase
45
51
46 testdir = os.path.abspath(os.environ["TESTDIR"])
52 testdir = os.path.abspath(os.environ["TESTDIR"])
47
53
48 # We store Hypothesis examples here rather in the temporary test directory
54 # We store Hypothesis examples here rather in the temporary test directory
49 # so that when rerunning a failing test this always results in refinding the
55 # so that when rerunning a failing test this always results in refinding the
50 # previous failure. This directory is in .hgignore and should not be checked in
56 # previous failure. This directory is in .hgignore and should not be checked in
51 # but is useful to have for development.
57 # but is useful to have for development.
52 set_hypothesis_home_dir(os.path.join(testdir, ".hypothesis"))
58 set_hypothesis_home_dir(os.path.join(testdir, ".hypothesis"))
53
59
54 runtests = os.path.join(os.environ["RUNTESTDIR"], "run-tests.py")
60 runtests = os.path.join(os.environ["RUNTESTDIR"], "run-tests.py")
55 testtmp = os.environ["TESTTMP"]
61 testtmp = os.environ["TESTTMP"]
56 assert os.path.isdir(testtmp)
62 assert os.path.isdir(testtmp)
57
63
58 generatedtests = os.path.join(testdir, "hypothesis-generated")
64 generatedtests = os.path.join(testdir, "hypothesis-generated")
59
65
60 try:
66 try:
61 os.makedirs(generatedtests)
67 os.makedirs(generatedtests)
62 except OSError:
68 except OSError:
63 pass
69 pass
64
70
65 # We write out generated .t files to a file in order to ease debugging and to
71 # We write out generated .t files to a file in order to ease debugging and to
66 # give a starting point for turning failures Hypothesis finds into normal
72 # give a starting point for turning failures Hypothesis finds into normal
67 # tests. In order to ensure that multiple copies of this test can be run in
73 # tests. In order to ensure that multiple copies of this test can be run in
68 # parallel we use atomic file create to ensure that we always get a unique
74 # parallel we use atomic file create to ensure that we always get a unique
69 # name.
75 # name.
70 file_index = 0
76 file_index = 0
71 while True:
77 while True:
72 file_index += 1
78 file_index += 1
73 savefile = os.path.join(generatedtests, "test-generated-%d.t" % (
79 savefile = os.path.join(generatedtests, "test-generated-%d.t" % (
74 file_index,
80 file_index,
75 ))
81 ))
76 try:
82 try:
77 os.close(os.open(savefile, os.O_CREAT | os.O_EXCL | os.O_WRONLY))
83 os.close(os.open(savefile, os.O_CREAT | os.O_EXCL | os.O_WRONLY))
78 break
84 break
79 except OSError as e:
85 except OSError as e:
80 if e.errno != errno.EEXIST:
86 if e.errno != errno.EEXIST:
81 raise
87 raise
82 assert os.path.exists(savefile)
88 assert os.path.exists(savefile)
83
89
84 hgrc = os.path.join(".hg", "hgrc")
90 hgrc = os.path.join(".hg", "hgrc")
85
91
86 filecharacters = (
92 filecharacters = (
87 "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
93 "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
88 "[]^_`;=@{}~ !#$%&'()+,-"
94 "[]^_`;=@{}~ !#$%&'()+,-"
89 )
95 )
90
96
91 files = st.text(filecharacters, min_size=1).map(lambda x: x.strip()).filter(
97 files = st.text(filecharacters, min_size=1).map(lambda x: x.strip()).filter(
92 bool).map(lambda s: s.encode('ascii'))
98 bool).map(lambda s: s.encode('ascii'))
93
99
94 safetext = st.text(st.characters(
100 safetext = st.text(st.characters(
95 min_codepoint=1, max_codepoint=127,
101 min_codepoint=1, max_codepoint=127,
96 blacklist_categories=('Cc', 'Cs')), min_size=1).map(
102 blacklist_categories=('Cc', 'Cs')), min_size=1).map(
97 lambda s: s.encode('utf-8')
103 lambda s: s.encode('utf-8')
98 )
104 )
99
105
100 extensions = st.sampled_from(('shelve', 'mq', 'blackbox',))
106 extensions = st.sampled_from(('shelve', 'mq', 'blackbox',))
101
107
102 @contextmanager
108 @contextmanager
103 def acceptableerrors(*args):
109 def acceptableerrors(*args):
104 """Sometimes we know an operation we're about to perform might fail, and
110 """Sometimes we know an operation we're about to perform might fail, and
105 we're OK with some of the failures. In those cases this may be used as a
111 we're OK with some of the failures. In those cases this may be used as a
106 context manager and will swallow expected failures, as identified by
112 context manager and will swallow expected failures, as identified by
107 substrings of the error message Mercurial emits."""
113 substrings of the error message Mercurial emits."""
108 try:
114 try:
109 yield
115 yield
110 except subprocess.CalledProcessError as e:
116 except subprocess.CalledProcessError as e:
111 if not any(a in e.output for a in args):
117 if not any(a in e.output for a in args):
112 note(e.output)
118 note(e.output)
113 raise
119 raise
114
120
115 reponames = st.text("abcdefghijklmnopqrstuvwxyz01234556789", min_size=1).map(
121 reponames = st.text("abcdefghijklmnopqrstuvwxyz01234556789", min_size=1).map(
116 lambda s: s.encode('ascii')
122 lambda s: s.encode('ascii')
117 )
123 )
118
124
119 class verifyingstatemachine(RuleBasedStateMachine):
125 class verifyingstatemachine(RuleBasedStateMachine):
120 """This defines the set of acceptable operations on a Mercurial repository
126 """This defines the set of acceptable operations on a Mercurial repository
121 using Hypothesis's RuleBasedStateMachine.
127 using Hypothesis's RuleBasedStateMachine.
122
128
123 The general concept is that we manage multiple repositories inside a
129 The general concept is that we manage multiple repositories inside a
124 repos/ directory in our temporary test location. Some of these are freshly
130 repos/ directory in our temporary test location. Some of these are freshly
125 inited, some are clones of the others. Our current working directory is
131 inited, some are clones of the others. Our current working directory is
126 always inside one of these repositories while the tests are running.
132 always inside one of these repositories while the tests are running.
127
133
128 Hypothesis then performs a series of operations against these repositories,
134 Hypothesis then performs a series of operations against these repositories,
129 including hg commands, generating contents and editing the .hgrc file.
135 including hg commands, generating contents and editing the .hgrc file.
130 If these operations fail in unexpected ways or behave differently in
136 If these operations fail in unexpected ways or behave differently in
131 different configurations of Mercurial, the test will fail and a minimized
137 different configurations of Mercurial, the test will fail and a minimized
132 .t test file will be written to the hypothesis-generated directory to
138 .t test file will be written to the hypothesis-generated directory to
133 exhibit that failure.
139 exhibit that failure.
134
140
135 Operations are defined as methods with @rule() decorators. See the
141 Operations are defined as methods with @rule() decorators. See the
136 Hypothesis documentation at
142 Hypothesis documentation at
137 http://hypothesis.readthedocs.org/en/release/stateful.html for more
143 http://hypothesis.readthedocs.org/en/release/stateful.html for more
138 details."""
144 details."""
139
145
140 # A bundle is a reusable collection of previously generated data which may
146 # A bundle is a reusable collection of previously generated data which may
141 # be provided as arguments to future operations.
147 # be provided as arguments to future operations.
142 repos = Bundle('repos')
148 repos = Bundle('repos')
143 paths = Bundle('paths')
149 paths = Bundle('paths')
144 contents = Bundle('contents')
150 contents = Bundle('contents')
145 branches = Bundle('branches')
151 branches = Bundle('branches')
146 committimes = Bundle('committimes')
152 committimes = Bundle('committimes')
147
153
148 def __init__(self):
154 def __init__(self):
149 super(verifyingstatemachine, self).__init__()
155 super(verifyingstatemachine, self).__init__()
150 self.repodir = os.path.join(testtmp, "repos")
156 self.repodir = os.path.join(testtmp, "repos")
151 if os.path.exists(self.repodir):
157 if os.path.exists(self.repodir):
152 shutil.rmtree(self.repodir)
158 shutil.rmtree(self.repodir)
153 os.chdir(testtmp)
159 os.chdir(testtmp)
154 self.log = []
160 self.log = []
155 self.failed = False
161 self.failed = False
156 self.configperrepo = {}
162 self.configperrepo = {}
157 self.all_extensions = set()
163 self.all_extensions = set()
158 self.non_skippable_extensions = set()
164 self.non_skippable_extensions = set()
159
165
160 self.mkdirp("repos")
166 self.mkdirp("repos")
161 self.cd("repos")
167 self.cd("repos")
162 self.mkdirp("repo1")
168 self.mkdirp("repo1")
163 self.cd("repo1")
169 self.cd("repo1")
164 self.hg("init")
170 self.hg("init")
165
171
166 def teardown(self):
172 def teardown(self):
167 """On teardown we clean up after ourselves as usual, but we also
173 """On teardown we clean up after ourselves as usual, but we also
168 do some additional testing: We generate a .t file based on our test
174 do some additional testing: We generate a .t file based on our test
169 run using run-test.py -i to get the correct output.
175 run using run-test.py -i to get the correct output.
170
176
171 We then test it in a number of other configurations, verifying that
177 We then test it in a number of other configurations, verifying that
172 each passes the same test."""
178 each passes the same test."""
173 super(verifyingstatemachine, self).teardown()
179 super(verifyingstatemachine, self).teardown()
174 try:
180 try:
175 shutil.rmtree(self.repodir)
181 shutil.rmtree(self.repodir)
176 except OSError:
182 except OSError:
177 pass
183 pass
178 ttest = os.linesep.join(" " + l for l in self.log)
184 ttest = os.linesep.join(" " + l for l in self.log)
179 os.chdir(testtmp)
185 os.chdir(testtmp)
180 path = os.path.join(testtmp, "test-generated.t")
186 path = os.path.join(testtmp, "test-generated.t")
181 with open(path, 'w') as o:
187 with open(path, 'w') as o:
182 o.write(ttest + os.linesep)
188 o.write(ttest + os.linesep)
183 with open(os.devnull, "w") as devnull:
189 with open(os.devnull, "w") as devnull:
184 rewriter = subprocess.Popen(
190 rewriter = subprocess.Popen(
185 [runtests, "--local", "-i", path], stdin=subprocess.PIPE,
191 [runtests, "--local", "-i", path], stdin=subprocess.PIPE,
186 stdout=devnull, stderr=devnull,
192 stdout=devnull, stderr=devnull,
187 )
193 )
188 rewriter.communicate("yes")
194 rewriter.communicate("yes")
189 with open(path, 'r') as i:
195 with open(path, 'r') as i:
190 ttest = i.read()
196 ttest = i.read()
191
197
192 e = None
198 e = None
193 if not self.failed:
199 if not self.failed:
194 try:
200 try:
195 output = subprocess.check_output([
201 output = subprocess.check_output([
196 runtests, path, "--local", "--pure"
202 runtests, path, "--local", "--pure"
197 ], stderr=subprocess.STDOUT)
203 ], stderr=subprocess.STDOUT)
198 assert "Ran 1 test" in output, output
204 assert "Ran 1 test" in output, output
199 for ext in (
205 for ext in (
200 self.all_extensions - self.non_skippable_extensions
206 self.all_extensions - self.non_skippable_extensions
201 ):
207 ):
202 tf = os.path.join(testtmp, "test-generated-no-%s.t" % (
208 tf = os.path.join(testtmp, "test-generated-no-%s.t" % (
203 ext,
209 ext,
204 ))
210 ))
205 with open(tf, 'w') as o:
211 with open(tf, 'w') as o:
206 for l in ttest.splitlines():
212 for l in ttest.splitlines():
207 if l.startswith(" $ hg"):
213 if l.startswith(" $ hg"):
208 l = l.replace(
214 l = l.replace(
209 "--config %s=" % (
215 "--config %s=" % (
210 extensionconfigkey(ext),), "")
216 extensionconfigkey(ext),), "")
211 o.write(l + os.linesep)
217 o.write(l + os.linesep)
212 with open(tf, 'r') as r:
218 with open(tf, 'r') as r:
213 t = r.read()
219 t = r.read()
214 assert ext not in t, t
220 assert ext not in t, t
215 output = subprocess.check_output([
221 output = subprocess.check_output([
216 runtests, tf, "--local",
222 runtests, tf, "--local",
217 ], stderr=subprocess.STDOUT)
223 ], stderr=subprocess.STDOUT)
218 assert "Ran 1 test" in output, output
224 assert "Ran 1 test" in output, output
219 except subprocess.CalledProcessError as e:
225 except subprocess.CalledProcessError as e:
220 note(e.output)
226 note(e.output)
221 if self.failed or e is not None:
227 if self.failed or e is not None:
222 with open(savefile, "wb") as o:
228 with open(savefile, "wb") as o:
223 o.write(ttest)
229 o.write(ttest)
224 if e is not None:
230 if e is not None:
225 raise e
231 raise e
226
232
227 def execute_step(self, step):
233 def execute_step(self, step):
228 try:
234 try:
229 return super(verifyingstatemachine, self).execute_step(step)
235 return super(verifyingstatemachine, self).execute_step(step)
230 except (HypothesisException, KeyboardInterrupt):
236 except (HypothesisException, KeyboardInterrupt):
231 raise
237 raise
232 except Exception:
238 except Exception:
233 self.failed = True
239 self.failed = True
234 raise
240 raise
235
241
236 # Section: Basic commands.
242 # Section: Basic commands.
237 def mkdirp(self, path):
243 def mkdirp(self, path):
238 if os.path.exists(path):
244 if os.path.exists(path):
239 return
245 return
240 self.log.append(
246 self.log.append(
241 "$ mkdir -p -- %s" % (pipes.quote(os.path.relpath(path)),))
247 "$ mkdir -p -- %s" % (pipes.quote(os.path.relpath(path)),))
242 os.makedirs(path)
248 os.makedirs(path)
243
249
244 def cd(self, path):
250 def cd(self, path):
245 path = os.path.relpath(path)
251 path = os.path.relpath(path)
246 if path == ".":
252 if path == ".":
247 return
253 return
248 os.chdir(path)
254 os.chdir(path)
249 self.log.append("$ cd -- %s" % (pipes.quote(path),))
255 self.log.append("$ cd -- %s" % (pipes.quote(path),))
250
256
251 def hg(self, *args):
257 def hg(self, *args):
252 extra_flags = []
258 extra_flags = []
253 for key, value in self.config.items():
259 for key, value in self.config.items():
254 extra_flags.append("--config")
260 extra_flags.append("--config")
255 extra_flags.append("%s=%s" % (key, value))
261 extra_flags.append("%s=%s" % (key, value))
256 self.command("hg", *(tuple(extra_flags) + args))
262 self.command("hg", *(tuple(extra_flags) + args))
257
263
258 def command(self, *args):
264 def command(self, *args):
259 self.log.append("$ " + ' '.join(map(pipes.quote, args)))
265 self.log.append("$ " + ' '.join(map(pipes.quote, args)))
260 subprocess.check_output(args, stderr=subprocess.STDOUT)
266 subprocess.check_output(args, stderr=subprocess.STDOUT)
261
267
262 # Section: Set up basic data
268 # Section: Set up basic data
263 # This section has no side effects but generates data that we will want
269 # This section has no side effects but generates data that we will want
264 # to use later.
270 # to use later.
265 @rule(
271 @rule(
266 target=paths,
272 target=paths,
267 source=st.lists(files, min_size=1).map(lambda l: os.path.join(*l)))
273 source=st.lists(files, min_size=1).map(lambda l: os.path.join(*l)))
268 def genpath(self, source):
274 def genpath(self, source):
269 return source
275 return source
270
276
271 @rule(
277 @rule(
272 target=committimes,
278 target=committimes,
273 when=datetimes(min_year=1970, max_year=2038) | st.none())
279 when=datetimes(min_year=1970, max_year=2038) | st.none())
274 def gentime(self, when):
280 def gentime(self, when):
275 return when
281 return when
276
282
277 @rule(
283 @rule(
278 target=contents,
284 target=contents,
279 content=st.one_of(
285 content=st.one_of(
280 st.binary(),
286 st.binary(),
281 st.text().map(lambda x: x.encode('utf-8'))
287 st.text().map(lambda x: x.encode('utf-8'))
282 ))
288 ))
283 def gencontent(self, content):
289 def gencontent(self, content):
284 return content
290 return content
285
291
286 @rule(
292 @rule(
287 target=branches,
293 target=branches,
288 name=safetext,
294 name=safetext,
289 )
295 )
290 def genbranch(self, name):
296 def genbranch(self, name):
291 return name
297 return name
292
298
293 @rule(target=paths, source=paths)
299 @rule(target=paths, source=paths)
294 def lowerpath(self, source):
300 def lowerpath(self, source):
295 return source.lower()
301 return source.lower()
296
302
297 @rule(target=paths, source=paths)
303 @rule(target=paths, source=paths)
298 def upperpath(self, source):
304 def upperpath(self, source):
299 return source.upper()
305 return source.upper()
300
306
301 # Section: Basic path operations
307 # Section: Basic path operations
302 @rule(path=paths, content=contents)
308 @rule(path=paths, content=contents)
303 def writecontent(self, path, content):
309 def writecontent(self, path, content):
304 self.unadded_changes = True
310 self.unadded_changes = True
305 if os.path.isdir(path):
311 if os.path.isdir(path):
306 return
312 return
307 parent = os.path.dirname(path)
313 parent = os.path.dirname(path)
308 if parent:
314 if parent:
309 try:
315 try:
310 self.mkdirp(parent)
316 self.mkdirp(parent)
311 except OSError:
317 except OSError:
312 # It may be the case that there is a regular file that has
318 # It may be the case that there is a regular file that has
313 # previously been created that has the same name as an ancestor
319 # previously been created that has the same name as an ancestor
314 # of the current path. This will cause mkdirp to fail with this
320 # of the current path. This will cause mkdirp to fail with this
315 # error. We just turn this into a no-op in that case.
321 # error. We just turn this into a no-op in that case.
316 return
322 return
317 with open(path, 'wb') as o:
323 with open(path, 'wb') as o:
318 o.write(content)
324 o.write(content)
319 self.log.append((
325 self.log.append((
320 "$ python -c 'import binascii; "
326 "$ python -c 'import binascii; "
321 "print(binascii.unhexlify(\"%s\"))' > %s") % (
327 "print(binascii.unhexlify(\"%s\"))' > %s") % (
322 binascii.hexlify(content),
328 binascii.hexlify(content),
323 pipes.quote(path),
329 pipes.quote(path),
324 ))
330 ))
325
331
326 @rule(path=paths)
332 @rule(path=paths)
327 def addpath(self, path):
333 def addpath(self, path):
328 if os.path.exists(path):
334 if os.path.exists(path):
329 self.hg("add", "--", path)
335 self.hg("add", "--", path)
330
336
331 @rule(path=paths)
337 @rule(path=paths)
332 def forgetpath(self, path):
338 def forgetpath(self, path):
333 if os.path.exists(path):
339 if os.path.exists(path):
334 with acceptableerrors(
340 with acceptableerrors(
335 "file is already untracked",
341 "file is already untracked",
336 ):
342 ):
337 self.hg("forget", "--", path)
343 self.hg("forget", "--", path)
338
344
339 @rule(s=st.none() | st.integers(0, 100))
345 @rule(s=st.none() | st.integers(0, 100))
340 def addremove(self, s):
346 def addremove(self, s):
341 args = ["addremove"]
347 args = ["addremove"]
342 if s is not None:
348 if s is not None:
343 args.extend(["-s", str(s)])
349 args.extend(["-s", str(s)])
344 self.hg(*args)
350 self.hg(*args)
345
351
346 @rule(path=paths)
352 @rule(path=paths)
347 def removepath(self, path):
353 def removepath(self, path):
348 if os.path.exists(path):
354 if os.path.exists(path):
349 with acceptableerrors(
355 with acceptableerrors(
350 'file is untracked',
356 'file is untracked',
351 'file has been marked for add',
357 'file has been marked for add',
352 'file is modified',
358 'file is modified',
353 ):
359 ):
354 self.hg("remove", "--", path)
360 self.hg("remove", "--", path)
355
361
356 @rule(
362 @rule(
357 message=safetext,
363 message=safetext,
358 amend=st.booleans(),
364 amend=st.booleans(),
359 when=committimes,
365 when=committimes,
360 addremove=st.booleans(),
366 addremove=st.booleans(),
361 secret=st.booleans(),
367 secret=st.booleans(),
362 close_branch=st.booleans(),
368 close_branch=st.booleans(),
363 )
369 )
364 def maybecommit(
370 def maybecommit(
365 self, message, amend, when, addremove, secret, close_branch
371 self, message, amend, when, addremove, secret, close_branch
366 ):
372 ):
367 command = ["commit"]
373 command = ["commit"]
368 errors = ["nothing changed"]
374 errors = ["nothing changed"]
369 if amend:
375 if amend:
370 errors.append("cannot amend public changesets")
376 errors.append("cannot amend public changesets")
371 command.append("--amend")
377 command.append("--amend")
372 command.append("-m" + pipes.quote(message))
378 command.append("-m" + pipes.quote(message))
373 if secret:
379 if secret:
374 command.append("--secret")
380 command.append("--secret")
375 if close_branch:
381 if close_branch:
376 command.append("--close-branch")
382 command.append("--close-branch")
377 errors.append("can only close branch heads")
383 errors.append("can only close branch heads")
378 if addremove:
384 if addremove:
379 command.append("--addremove")
385 command.append("--addremove")
380 if when is not None:
386 if when is not None:
381 if when.year == 1970:
387 if when.year == 1970:
382 errors.append('negative date value')
388 errors.append('negative date value')
383 if when.year == 2038:
389 if when.year == 2038:
384 errors.append('exceeds 32 bits')
390 errors.append('exceeds 32 bits')
385 command.append("--date=%s" % (
391 command.append("--date=%s" % (
386 when.strftime('%Y-%m-%d %H:%M:%S %z'),))
392 when.strftime('%Y-%m-%d %H:%M:%S %z'),))
387
393
388 with acceptableerrors(*errors):
394 with acceptableerrors(*errors):
389 self.hg(*command)
395 self.hg(*command)
390
396
391 # Section: Repository management
397 # Section: Repository management
392 @property
398 @property
393 def currentrepo(self):
399 def currentrepo(self):
394 return os.path.basename(os.getcwd())
400 return os.path.basename(os.getcwd())
395
401
396 @property
402 @property
397 def config(self):
403 def config(self):
398 return self.configperrepo.setdefault(self.currentrepo, {})
404 return self.configperrepo.setdefault(self.currentrepo, {})
399
405
400 @rule(
406 @rule(
401 target=repos,
407 target=repos,
402 source=repos,
408 source=repos,
403 name=reponames,
409 name=reponames,
404 )
410 )
405 def clone(self, source, name):
411 def clone(self, source, name):
406 if not os.path.exists(os.path.join("..", name)):
412 if not os.path.exists(os.path.join("..", name)):
407 self.cd("..")
413 self.cd("..")
408 self.hg("clone", source, name)
414 self.hg("clone", source, name)
409 self.cd(name)
415 self.cd(name)
410 return name
416 return name
411
417
412 @rule(
418 @rule(
413 target=repos,
419 target=repos,
414 name=reponames,
420 name=reponames,
415 )
421 )
416 def fresh(self, name):
422 def fresh(self, name):
417 if not os.path.exists(os.path.join("..", name)):
423 if not os.path.exists(os.path.join("..", name)):
418 self.cd("..")
424 self.cd("..")
419 self.mkdirp(name)
425 self.mkdirp(name)
420 self.cd(name)
426 self.cd(name)
421 self.hg("init")
427 self.hg("init")
422 return name
428 return name
423
429
424 @rule(name=repos)
430 @rule(name=repos)
425 def switch(self, name):
431 def switch(self, name):
426 self.cd(os.path.join("..", name))
432 self.cd(os.path.join("..", name))
427 assert self.currentrepo == name
433 assert self.currentrepo == name
428 assert os.path.exists(".hg")
434 assert os.path.exists(".hg")
429
435
430 @rule(target=repos)
436 @rule(target=repos)
431 def origin(self):
437 def origin(self):
432 return "repo1"
438 return "repo1"
433
439
434 @rule()
440 @rule()
435 def pull(self, repo=repos):
441 def pull(self, repo=repos):
436 with acceptableerrors(
442 with acceptableerrors(
437 "repository default not found",
443 "repository default not found",
438 "repository is unrelated",
444 "repository is unrelated",
439 ):
445 ):
440 self.hg("pull")
446 self.hg("pull")
441
447
442 @rule(newbranch=st.booleans())
448 @rule(newbranch=st.booleans())
443 def push(self, newbranch):
449 def push(self, newbranch):
444 with acceptableerrors(
450 with acceptableerrors(
445 "default repository not configured",
451 "default repository not configured",
446 "no changes found",
452 "no changes found",
447 ):
453 ):
448 if newbranch:
454 if newbranch:
449 self.hg("push", "--new-branch")
455 self.hg("push", "--new-branch")
450 else:
456 else:
451 with acceptableerrors(
457 with acceptableerrors(
452 "creates new branches"
458 "creates new branches"
453 ):
459 ):
454 self.hg("push")
460 self.hg("push")
455
461
456 # Section: Simple side effect free "check" operations
462 # Section: Simple side effect free "check" operations
457 @rule()
463 @rule()
458 def log(self):
464 def log(self):
459 self.hg("log")
465 self.hg("log")
460
466
461 @rule()
467 @rule()
462 def verify(self):
468 def verify(self):
463 self.hg("verify")
469 self.hg("verify")
464
470
465 @rule()
471 @rule()
466 def diff(self):
472 def diff(self):
467 self.hg("diff", "--nodates")
473 self.hg("diff", "--nodates")
468
474
469 @rule()
475 @rule()
470 def status(self):
476 def status(self):
471 self.hg("status")
477 self.hg("status")
472
478
473 @rule()
479 @rule()
474 def export(self):
480 def export(self):
475 self.hg("export")
481 self.hg("export")
476
482
477 # Section: Branch management
483 # Section: Branch management
478 @rule()
484 @rule()
479 def checkbranch(self):
485 def checkbranch(self):
480 self.hg("branch")
486 self.hg("branch")
481
487
482 @rule(branch=branches)
488 @rule(branch=branches)
483 def switchbranch(self, branch):
489 def switchbranch(self, branch):
484 with acceptableerrors(
490 with acceptableerrors(
485 'cannot use an integer as a name',
491 'cannot use an integer as a name',
486 'cannot be used in a name',
492 'cannot be used in a name',
487 'a branch of the same name already exists',
493 'a branch of the same name already exists',
488 'is reserved',
494 'is reserved',
489 ):
495 ):
490 self.hg("branch", "--", branch)
496 self.hg("branch", "--", branch)
491
497
492 @rule(branch=branches, clean=st.booleans())
498 @rule(branch=branches, clean=st.booleans())
493 def update(self, branch, clean):
499 def update(self, branch, clean):
494 with acceptableerrors(
500 with acceptableerrors(
495 'unknown revision',
501 'unknown revision',
496 'parse error',
502 'parse error',
497 ):
503 ):
498 if clean:
504 if clean:
499 self.hg("update", "-C", "--", branch)
505 self.hg("update", "-C", "--", branch)
500 else:
506 else:
501 self.hg("update", "--", branch)
507 self.hg("update", "--", branch)
502
508
503 # Section: Extension management
509 # Section: Extension management
504 def hasextension(self, extension):
510 def hasextension(self, extension):
505 return extensionconfigkey(extension) in self.config
511 return extensionconfigkey(extension) in self.config
506
512
507 def commandused(self, extension):
513 def commandused(self, extension):
508 assert extension in self.all_extensions
514 assert extension in self.all_extensions
509 self.non_skippable_extensions.add(extension)
515 self.non_skippable_extensions.add(extension)
510
516
511 @rule(extension=extensions)
517 @rule(extension=extensions)
512 def addextension(self, extension):
518 def addextension(self, extension):
513 self.all_extensions.add(extension)
519 self.all_extensions.add(extension)
514 self.config[extensionconfigkey(extension)] = ""
520 self.config[extensionconfigkey(extension)] = ""
515
521
516 @rule(extension=extensions)
522 @rule(extension=extensions)
517 def removeextension(self, extension):
523 def removeextension(self, extension):
518 self.config.pop(extensionconfigkey(extension), None)
524 self.config.pop(extensionconfigkey(extension), None)
519
525
520 # Section: Commands from the shelve extension
526 # Section: Commands from the shelve extension
521 @rule()
527 @rule()
522 @precondition(lambda self: self.hasextension("shelve"))
528 @precondition(lambda self: self.hasextension("shelve"))
523 def shelve(self):
529 def shelve(self):
524 self.commandused("shelve")
530 self.commandused("shelve")
525 with acceptableerrors("nothing changed"):
531 with acceptableerrors("nothing changed"):
526 self.hg("shelve")
532 self.hg("shelve")
527
533
528 @rule()
534 @rule()
529 @precondition(lambda self: self.hasextension("shelve"))
535 @precondition(lambda self: self.hasextension("shelve"))
530 def unshelve(self):
536 def unshelve(self):
531 self.commandused("shelve")
537 self.commandused("shelve")
532 with acceptableerrors("no shelved changes to apply"):
538 with acceptableerrors("no shelved changes to apply"):
533 self.hg("unshelve")
539 self.hg("unshelve")
534
540
535 class writeonlydatabase(ExampleDatabase):
541 class writeonlydatabase(ExampleDatabase):
536 def __init__(self, underlying):
542 def __init__(self, underlying):
537 super(ExampleDatabase, self).__init__()
543 super(ExampleDatabase, self).__init__()
538 self.underlying = underlying
544 self.underlying = underlying
539
545
540 def fetch(self, key):
546 def fetch(self, key):
541 return ()
547 return ()
542
548
543 def save(self, key, value):
549 def save(self, key, value):
544 self.underlying.save(key, value)
550 self.underlying.save(key, value)
545
551
546 def delete(self, key, value):
552 def delete(self, key, value):
547 self.underlying.delete(key, value)
553 self.underlying.delete(key, value)
548
554
549 def close(self):
555 def close(self):
550 self.underlying.close()
556 self.underlying.close()
551
557
552 def extensionconfigkey(extension):
558 def extensionconfigkey(extension):
553 return "extensions." + extension
559 return "extensions." + extension
554
560
555 settings.register_profile(
561 settings.register_profile(
556 'default', settings(
562 'default', settings(
557 timeout=300,
563 timeout=300,
558 stateful_step_count=50,
564 stateful_step_count=50,
559 max_examples=10,
565 max_examples=10,
560 )
566 )
561 )
567 )
562
568
563 settings.register_profile(
569 settings.register_profile(
564 'fast', settings(
570 'fast', settings(
565 timeout=10,
571 timeout=10,
566 stateful_step_count=20,
572 stateful_step_count=20,
567 max_examples=5,
573 max_examples=5,
568 min_satisfying_examples=1,
574 min_satisfying_examples=1,
569 max_shrinks=0,
575 max_shrinks=0,
570 )
576 )
571 )
577 )
572
578
573 settings.register_profile(
579 settings.register_profile(
574 'continuous', settings(
580 'continuous', settings(
575 timeout=-1,
581 timeout=-1,
576 stateful_step_count=1000,
582 stateful_step_count=1000,
577 max_examples=10 ** 8,
583 max_examples=10 ** 8,
578 max_iterations=10 ** 8,
584 max_iterations=10 ** 8,
579 database=writeonlydatabase(settings.default.database)
585 database=writeonlydatabase(settings.default.database)
580 )
586 )
581 )
587 )
582
588
583 settings.load_profile(os.getenv('HYPOTHESIS_PROFILE', 'default'))
589 settings.load_profile(os.getenv('HYPOTHESIS_PROFILE', 'default'))
584
590
585 verifyingtest = verifyingstatemachine.TestCase
591 verifyingtest = verifyingstatemachine.TestCase
586
592
587 verifyingtest.settings = settings.default
593 verifyingtest.settings = settings.default
588
594
589 if __name__ == '__main__':
595 if __name__ == '__main__':
590 try:
596 try:
591 silenttestrunner.main(__name__)
597 silenttestrunner.main(__name__)
592 finally:
598 finally:
593 # So as to prevent proliferation of useless test files, if we never
599 # So as to prevent proliferation of useless test files, if we never
594 # actually wrote a failing test we clean up after ourselves and delete
600 # actually wrote a failing test we clean up after ourselves and delete
595 # the file for doing so that we owned.
601 # the file for doing so that we owned.
596 if os.path.exists(savefile) and os.path.getsize(savefile) == 0:
602 if os.path.exists(savefile) and os.path.getsize(savefile) == 0:
597 os.unlink(savefile)
603 os.unlink(savefile)
General Comments 0
You need to be logged in to leave comments. Login now