##// END OF EJS Templates
merge with stable
Martin von Zweigbergk -
r44960:a08bbdf8 merge default
parent child Browse files
Show More
@@ -0,0 +1,91 b''
1 ====================================
2 Testing head checking code: Case E-1
3 ====================================
4
5 Mercurial checks for the introduction of new heads on push. Evolution comes
6 into play to detect if existing branches on the server are being replaced by
7 some of the new one we push.
8
9 This case is part of a series of tests checking this behavior.
10
11 Category E: case involving changeset on multiple branch
12 TestCase 8: moving a branch to another location
13
14 .. old-state:
15 ..
16 .. * 1-changeset on branch default
17 .. * 1-changeset on branch Z (above default)
18 ..
19 .. new-state:
20 ..
21 .. * 1-changeset on branch default
22 .. * 1-changeset on branch Z (rebased away from A0)
23 ..
24 .. expected-result:
25 ..
26 .. * push allowed
27 ..
28 .. graph-summary:
29 ..
30 .. B ΓΈβ‡ β—” B'
31 .. | |
32 .. A β—” |
33 .. |/
34 .. ●
35
36 $ . $TESTDIR/testlib/push-checkheads-util.sh
37
38 Test setup
39 ----------
40
41 $ mkdir E1
42 $ cd E1
43 $ setuprepos
44 creating basic server and client repo
45 updating to branch default
46 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
47 $ cd client
48 $ hg branch Z
49 marked working directory as branch Z
50 (branches are permanent and global, did you want a bookmark?)
51 $ mkcommit B0
52 $ hg push --new-branch
53 pushing to $TESTTMP/E1/server
54 searching for changes
55 adding changesets
56 adding manifests
57 adding file changes
58 added 1 changesets with 1 changes to 1 files
59 $ hg up 0
60 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
61 $ hg branch --force Z
62 marked working directory as branch Z
63 $ mkcommit B1
64 created new head
65 $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
66 1 new obsolescence markers
67 obsoleted 1 changesets
68 $ hg log -G --hidden
69 @ c98b855401e7 (draft): B1
70 |
71 | x 93e5c1321ece (draft): B0
72 | |
73 | o 8aaa48160adc (draft): A0
74 |/
75 o 1e4be0697311 (public): root
76
77
78 Actual testing
79 --------------
80
81 $ hg push
82 pushing to $TESTTMP/E1/server
83 searching for changes
84 adding changesets
85 adding manifests
86 adding file changes
87 added 1 changesets with 1 changes to 1 files (+1 heads)
88 1 new obsolescence markers
89 obsoleted 1 changesets
90
91 $ cd ../..
@@ -0,0 +1,105 b''
1 ====================================
2 Testing head checking code: Case E-2
3 ====================================
4
5 Mercurial checks for the introduction of new heads on push. Evolution comes
6 into play to detect if existing branches on the server are being replaced by
7 some of the new one we push.
8
9 This case is part of a series of tests checking this behavior.
10
11 Category E: case involving changeset on multiple branch
12 TestCase 8: moving interleaved branch away from each other
13
14 .. old-state:
15 ..
16 .. * 2-changeset on branch default
17 .. * 1-changeset on branch Z (between the two other)
18 ..
19 .. new-state:
20 ..
21 .. * 2-changeset on branch default, aligned
22 .. * 1-changeset on branch Z (at the same location)
23 ..
24 .. expected-result:
25 ..
26 .. * push allowed
27 ..
28 .. graph-summary:
29 ..
30 .. C ΓΈβ‡ β—” C'
31 .. | |
32 .. B β—” |
33 .. | |
34 .. A ΓΈβ‡ β—” A'
35 .. |/
36 .. ●
37
38 $ . $TESTDIR/testlib/push-checkheads-util.sh
39
40 Test setup
41 ----------
42
43 $ mkdir E1
44 $ cd E1
45 $ setuprepos
46 creating basic server and client repo
47 updating to branch default
48 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
49 $ cd client
50 $ hg branch Z
51 marked working directory as branch Z
52 (branches are permanent and global, did you want a bookmark?)
53 $ mkcommit B0
54 $ hg branch default --force
55 marked working directory as branch default
56 $ mkcommit C0
57 created new head
58 $ hg push --new-branch
59 pushing to $TESTTMP/E1/server
60 searching for changes
61 adding changesets
62 adding manifests
63 adding file changes
64 added 2 changesets with 2 changes to 2 files
65 $ hg up 0
66 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
67 $ mkcommit A1
68 created new head
69 $ mkcommit C1
70 $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
71 1 new obsolescence markers
72 obsoleted 1 changesets
73 2 new orphan changesets
74 $ hg debugobsolete `getid "desc(C0)" ` `getid "desc(C1)"`
75 1 new obsolescence markers
76 obsoleted 1 changesets
77 $ hg log -G --hidden
78 @ 0c76bc104656 (draft): C1
79 |
80 o f6082bc4ffef (draft): A1
81 |
82 | x afc55ba2ce61 (draft): C0
83 | |
84 | * 93e5c1321ece (draft): B0
85 | |
86 | x 8aaa48160adc (draft): A0
87 |/
88 o 1e4be0697311 (public): root
89
90
91 Actual testing
92 --------------
93
94 $ hg push -r 'desc("C1")'
95 pushing to $TESTTMP/E1/server
96 searching for changes
97 adding changesets
98 adding manifests
99 adding file changes
100 added 2 changesets with 2 changes to 2 files (+1 heads)
101 2 new obsolescence markers
102 obsoleted 2 changesets
103 1 new orphan changesets
104
105 $ cd ../..
@@ -0,0 +1,94 b''
1 ====================================
2 Testing head checking code: Case E-3
3 ====================================
4
5 Mercurial checks for the introduction of new heads on push. Evolution comes
6 into play to detect if existing branches on the server are being replaced by
7 some of the new one we push.
8
9 This case is part of a series of tests checking this behavior.
10
11 Category E: case involving changeset on multiple branch
12 TestCase 8: moving only part of the interleaved branch away, creating 2 heads
13
14 .. old-state:
15 ..
16 .. * 2-changeset on branch default
17 .. * 1-changeset on branch Z (between the two other)
18 ..
19 .. new-state:
20 ..
21 .. * 2-changeset on branch default, on untouched, the other moved
22 .. * 1-changeset on branch Z (at the same location)
23 ..
24 .. expected-result:
25 ..
26 .. * push rejected
27 ..
28 .. graph-summary:
29 ..
30 .. C ΓΈβ‡ β—” C'
31 .. | |
32 .. B β—” |
33 .. | |
34 .. A β—” |
35 .. |/
36 .. ●
37
38 $ . $TESTDIR/testlib/push-checkheads-util.sh
39
40 Test setup
41 ----------
42
43 $ mkdir E1
44 $ cd E1
45 $ setuprepos
46 creating basic server and client repo
47 updating to branch default
48 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
49 $ cd client
50 $ hg branch Z
51 marked working directory as branch Z
52 (branches are permanent and global, did you want a bookmark?)
53 $ mkcommit B0
54 $ hg branch default --force
55 marked working directory as branch default
56 $ mkcommit C0
57 created new head
58 $ hg push --new-branch
59 pushing to $TESTTMP/E1/server
60 searching for changes
61 adding changesets
62 adding manifests
63 adding file changes
64 added 2 changesets with 2 changes to 2 files
65 $ hg up 0
66 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
67 $ mkcommit C1
68 created new head
69 $ hg debugobsolete `getid "desc(C0)" ` `getid "desc(C1)"`
70 1 new obsolescence markers
71 obsoleted 1 changesets
72 $ hg log -G --hidden
73 @ dc44c53142f0 (draft): C1
74 |
75 | x afc55ba2ce61 (draft): C0
76 | |
77 | o 93e5c1321ece (draft): B0
78 | |
79 | o 8aaa48160adc (draft): A0
80 |/
81 o 1e4be0697311 (public): root
82
83
84 Actual testing
85 --------------
86
87 $ hg push -r 'desc("C1")'
88 pushing to $TESTTMP/E1/server
89 searching for changes
90 abort: push creates new remote head dc44c53142f0!
91 (merge or see 'hg help push' for details about pushing new heads)
92 [255]
93
94 $ cd ../..
@@ -1,593 +1,598 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 hex,
14 hex,
15 nullid,
15 nullid,
16 short,
16 short,
17 )
17 )
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 branchmap,
21 branchmap,
22 error,
22 error,
23 phases,
23 phases,
24 pycompat,
24 pycompat,
25 scmutil,
25 scmutil,
26 setdiscovery,
26 setdiscovery,
27 treediscovery,
27 treediscovery,
28 util,
28 util,
29 )
29 )
30
30
31
31
32 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
32 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
33 """Return a tuple (common, anyincoming, heads) used to identify the common
33 """Return a tuple (common, anyincoming, heads) used to identify the common
34 subset of nodes between repo and remote.
34 subset of nodes between repo and remote.
35
35
36 "common" is a list of (at least) the heads of the common subset.
36 "common" is a list of (at least) the heads of the common subset.
37 "anyincoming" is testable as a boolean indicating if any nodes are missing
37 "anyincoming" is testable as a boolean indicating if any nodes are missing
38 locally. If remote does not support getbundle, this actually is a list of
38 locally. If remote does not support getbundle, this actually is a list of
39 roots of the nodes that would be incoming, to be supplied to
39 roots of the nodes that would be incoming, to be supplied to
40 changegroupsubset. No code except for pull should be relying on this fact
40 changegroupsubset. No code except for pull should be relying on this fact
41 any longer.
41 any longer.
42 "heads" is either the supplied heads, or else the remote's heads.
42 "heads" is either the supplied heads, or else the remote's heads.
43 "ancestorsof" if not None, restrict the discovery to a subset defined by
43 "ancestorsof" if not None, restrict the discovery to a subset defined by
44 these nodes. Changeset outside of this set won't be considered (and
44 these nodes. Changeset outside of this set won't be considered (and
45 won't appears in "common")
45 won't appears in "common")
46
46
47 If you pass heads and they are all known locally, the response lists just
47 If you pass heads and they are all known locally, the response lists just
48 these heads in "common" and in "heads".
48 these heads in "common" and in "heads".
49
49
50 Please use findcommonoutgoing to compute the set of outgoing nodes to give
50 Please use findcommonoutgoing to compute the set of outgoing nodes to give
51 extensions a good hook into outgoing.
51 extensions a good hook into outgoing.
52 """
52 """
53
53
54 if not remote.capable(b'getbundle'):
54 if not remote.capable(b'getbundle'):
55 return treediscovery.findcommonincoming(repo, remote, heads, force)
55 return treediscovery.findcommonincoming(repo, remote, heads, force)
56
56
57 if heads:
57 if heads:
58 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
58 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
59 if all(knownnode(h) for h in heads):
59 if all(knownnode(h) for h in heads):
60 return (heads, False, heads)
60 return (heads, False, heads)
61
61
62 res = setdiscovery.findcommonheads(
62 res = setdiscovery.findcommonheads(
63 repo.ui,
63 repo.ui,
64 repo,
64 repo,
65 remote,
65 remote,
66 abortwhenunrelated=not force,
66 abortwhenunrelated=not force,
67 ancestorsof=ancestorsof,
67 ancestorsof=ancestorsof,
68 )
68 )
69 common, anyinc, srvheads = res
69 common, anyinc, srvheads = res
70 return (list(common), anyinc, heads or list(srvheads))
70 return (list(common), anyinc, heads or list(srvheads))
71
71
72
72
73 class outgoing(object):
73 class outgoing(object):
74 '''Represents the set of nodes present in a local repo but not in a
74 '''Represents the set of nodes present in a local repo but not in a
75 (possibly) remote one.
75 (possibly) remote one.
76
76
77 Members:
77 Members:
78
78
79 missing is a list of all nodes present in local but not in remote.
79 missing is a list of all nodes present in local but not in remote.
80 common is a list of all nodes shared between the two repos.
80 common is a list of all nodes shared between the two repos.
81 excluded is the list of missing changeset that shouldn't be sent remotely.
81 excluded is the list of missing changeset that shouldn't be sent remotely.
82 missingheads is the list of heads of missing.
82 missingheads is the list of heads of missing.
83 commonheads is the list of heads of common.
83 commonheads is the list of heads of common.
84
84
85 The sets are computed on demand from the heads, unless provided upfront
85 The sets are computed on demand from the heads, unless provided upfront
86 by discovery.'''
86 by discovery.'''
87
87
88 def __init__(
88 def __init__(
89 self, repo, commonheads=None, missingheads=None, missingroots=None
89 self, repo, commonheads=None, missingheads=None, missingroots=None
90 ):
90 ):
91 # at least one of them must not be set
91 # at least one of them must not be set
92 assert None in (commonheads, missingroots)
92 assert None in (commonheads, missingroots)
93 cl = repo.changelog
93 cl = repo.changelog
94 if missingheads is None:
94 if missingheads is None:
95 missingheads = cl.heads()
95 missingheads = cl.heads()
96 if missingroots:
96 if missingroots:
97 discbases = []
97 discbases = []
98 for n in missingroots:
98 for n in missingroots:
99 discbases.extend([p for p in cl.parents(n) if p != nullid])
99 discbases.extend([p for p in cl.parents(n) if p != nullid])
100 # TODO remove call to nodesbetween.
100 # TODO remove call to nodesbetween.
101 # TODO populate attributes on outgoing instance instead of setting
101 # TODO populate attributes on outgoing instance instead of setting
102 # discbases.
102 # discbases.
103 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
103 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
104 included = set(csets)
104 included = set(csets)
105 missingheads = heads
105 missingheads = heads
106 commonheads = [n for n in discbases if n not in included]
106 commonheads = [n for n in discbases if n not in included]
107 elif not commonheads:
107 elif not commonheads:
108 commonheads = [nullid]
108 commonheads = [nullid]
109 self.commonheads = commonheads
109 self.commonheads = commonheads
110 self.missingheads = missingheads
110 self.missingheads = missingheads
111 self._revlog = cl
111 self._revlog = cl
112 self._common = None
112 self._common = None
113 self._missing = None
113 self._missing = None
114 self.excluded = []
114 self.excluded = []
115
115
116 def _computecommonmissing(self):
116 def _computecommonmissing(self):
117 sets = self._revlog.findcommonmissing(
117 sets = self._revlog.findcommonmissing(
118 self.commonheads, self.missingheads
118 self.commonheads, self.missingheads
119 )
119 )
120 self._common, self._missing = sets
120 self._common, self._missing = sets
121
121
122 @util.propertycache
122 @util.propertycache
123 def common(self):
123 def common(self):
124 if self._common is None:
124 if self._common is None:
125 self._computecommonmissing()
125 self._computecommonmissing()
126 return self._common
126 return self._common
127
127
128 @util.propertycache
128 @util.propertycache
129 def missing(self):
129 def missing(self):
130 if self._missing is None:
130 if self._missing is None:
131 self._computecommonmissing()
131 self._computecommonmissing()
132 return self._missing
132 return self._missing
133
133
134
134
135 def findcommonoutgoing(
135 def findcommonoutgoing(
136 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
136 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
137 ):
137 ):
138 '''Return an outgoing instance to identify the nodes present in repo but
138 '''Return an outgoing instance to identify the nodes present in repo but
139 not in other.
139 not in other.
140
140
141 If onlyheads is given, only nodes ancestral to nodes in onlyheads
141 If onlyheads is given, only nodes ancestral to nodes in onlyheads
142 (inclusive) are included. If you already know the local repo's heads,
142 (inclusive) are included. If you already know the local repo's heads,
143 passing them in onlyheads is faster than letting them be recomputed here.
143 passing them in onlyheads is faster than letting them be recomputed here.
144
144
145 If commoninc is given, it must be the result of a prior call to
145 If commoninc is given, it must be the result of a prior call to
146 findcommonincoming(repo, other, force) to avoid recomputing it here.
146 findcommonincoming(repo, other, force) to avoid recomputing it here.
147
147
148 If portable is given, compute more conservative common and missingheads,
148 If portable is given, compute more conservative common and missingheads,
149 to make bundles created from the instance more portable.'''
149 to make bundles created from the instance more portable.'''
150 # declare an empty outgoing object to be filled later
150 # declare an empty outgoing object to be filled later
151 og = outgoing(repo, None, None)
151 og = outgoing(repo, None, None)
152
152
153 # get common set if not provided
153 # get common set if not provided
154 if commoninc is None:
154 if commoninc is None:
155 commoninc = findcommonincoming(
155 commoninc = findcommonincoming(
156 repo, other, force=force, ancestorsof=onlyheads
156 repo, other, force=force, ancestorsof=onlyheads
157 )
157 )
158 og.commonheads, _any, _hds = commoninc
158 og.commonheads, _any, _hds = commoninc
159
159
160 # compute outgoing
160 # compute outgoing
161 mayexclude = repo._phasecache.phaseroots[phases.secret] or repo.obsstore
161 mayexclude = repo._phasecache.phaseroots[phases.secret] or repo.obsstore
162 if not mayexclude:
162 if not mayexclude:
163 og.missingheads = onlyheads or repo.heads()
163 og.missingheads = onlyheads or repo.heads()
164 elif onlyheads is None:
164 elif onlyheads is None:
165 # use visible heads as it should be cached
165 # use visible heads as it should be cached
166 og.missingheads = repo.filtered(b"served").heads()
166 og.missingheads = repo.filtered(b"served").heads()
167 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
167 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
168 else:
168 else:
169 # compute common, missing and exclude secret stuff
169 # compute common, missing and exclude secret stuff
170 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
170 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
171 og._common, allmissing = sets
171 og._common, allmissing = sets
172 og._missing = missing = []
172 og._missing = missing = []
173 og.excluded = excluded = []
173 og.excluded = excluded = []
174 for node in allmissing:
174 for node in allmissing:
175 ctx = repo[node]
175 ctx = repo[node]
176 if ctx.phase() >= phases.secret or ctx.extinct():
176 if ctx.phase() >= phases.secret or ctx.extinct():
177 excluded.append(node)
177 excluded.append(node)
178 else:
178 else:
179 missing.append(node)
179 missing.append(node)
180 if len(missing) == len(allmissing):
180 if len(missing) == len(allmissing):
181 missingheads = onlyheads
181 missingheads = onlyheads
182 else: # update missing heads
182 else: # update missing heads
183 missingheads = phases.newheads(repo, onlyheads, excluded)
183 missingheads = phases.newheads(repo, onlyheads, excluded)
184 og.missingheads = missingheads
184 og.missingheads = missingheads
185 if portable:
185 if portable:
186 # recompute common and missingheads as if -r<rev> had been given for
186 # recompute common and missingheads as if -r<rev> had been given for
187 # each head of missing, and --base <rev> for each head of the proper
187 # each head of missing, and --base <rev> for each head of the proper
188 # ancestors of missing
188 # ancestors of missing
189 og._computecommonmissing()
189 og._computecommonmissing()
190 cl = repo.changelog
190 cl = repo.changelog
191 missingrevs = {cl.rev(n) for n in og._missing}
191 missingrevs = {cl.rev(n) for n in og._missing}
192 og._common = set(cl.ancestors(missingrevs)) - missingrevs
192 og._common = set(cl.ancestors(missingrevs)) - missingrevs
193 commonheads = set(og.commonheads)
193 commonheads = set(og.commonheads)
194 og.missingheads = [h for h in og.missingheads if h not in commonheads]
194 og.missingheads = [h for h in og.missingheads if h not in commonheads]
195
195
196 return og
196 return og
197
197
198
198
199 def _headssummary(pushop):
199 def _headssummary(pushop):
200 """compute a summary of branch and heads status before and after push
200 """compute a summary of branch and heads status before and after push
201
201
202 return {'branch': ([remoteheads], [newheads],
202 return {'branch': ([remoteheads], [newheads],
203 [unsyncedheads], [discardedheads])} mapping
203 [unsyncedheads], [discardedheads])} mapping
204
204
205 - branch: the branch name,
205 - branch: the branch name,
206 - remoteheads: the list of remote heads known locally
206 - remoteheads: the list of remote heads known locally
207 None if the branch is new,
207 None if the branch is new,
208 - newheads: the new remote heads (known locally) with outgoing pushed,
208 - newheads: the new remote heads (known locally) with outgoing pushed,
209 - unsyncedheads: the list of remote heads unknown locally,
209 - unsyncedheads: the list of remote heads unknown locally,
210 - discardedheads: the list of heads made obsolete by the push.
210 - discardedheads: the list of heads made obsolete by the push.
211 """
211 """
212 repo = pushop.repo.unfiltered()
212 repo = pushop.repo.unfiltered()
213 remote = pushop.remote
213 remote = pushop.remote
214 outgoing = pushop.outgoing
214 outgoing = pushop.outgoing
215 cl = repo.changelog
215 cl = repo.changelog
216 headssum = {}
216 headssum = {}
217 missingctx = set()
217 missingctx = set()
218 # A. Create set of branches involved in the push.
218 # A. Create set of branches involved in the push.
219 branches = set()
219 branches = set()
220 for n in outgoing.missing:
220 for n in outgoing.missing:
221 ctx = repo[n]
221 ctx = repo[n]
222 missingctx.add(ctx)
222 missingctx.add(ctx)
223 branches.add(ctx.branch())
223 branches.add(ctx.branch())
224
224
225 with remote.commandexecutor() as e:
225 with remote.commandexecutor() as e:
226 remotemap = e.callcommand(b'branchmap', {}).result()
226 remotemap = e.callcommand(b'branchmap', {}).result()
227
227
228 knownnode = cl.hasnode # do not use nodemap until it is filtered
228 knownnode = cl.hasnode # do not use nodemap until it is filtered
229 # A. register remote heads of branches which are in outgoing set
229 # A. register remote heads of branches which are in outgoing set
230 for branch, heads in pycompat.iteritems(remotemap):
230 for branch, heads in pycompat.iteritems(remotemap):
231 # don't add head info about branches which we don't have locally
231 # don't add head info about branches which we don't have locally
232 if branch not in branches:
232 if branch not in branches:
233 continue
233 continue
234 known = []
234 known = []
235 unsynced = []
235 unsynced = []
236 for h in heads:
236 for h in heads:
237 if knownnode(h):
237 if knownnode(h):
238 known.append(h)
238 known.append(h)
239 else:
239 else:
240 unsynced.append(h)
240 unsynced.append(h)
241 headssum[branch] = (known, list(known), unsynced)
241 headssum[branch] = (known, list(known), unsynced)
242
242
243 # B. add new branch data
243 # B. add new branch data
244 for branch in branches:
244 for branch in branches:
245 if branch not in headssum:
245 if branch not in headssum:
246 headssum[branch] = (None, [], [])
246 headssum[branch] = (None, [], [])
247
247
248 # C. Update newmap with outgoing changes.
248 # C. Update newmap with outgoing changes.
249 # This will possibly add new heads and remove existing ones.
249 # This will possibly add new heads and remove existing ones.
250 newmap = branchmap.remotebranchcache(
250 newmap = branchmap.remotebranchcache(
251 (branch, heads[1])
251 (branch, heads[1])
252 for branch, heads in pycompat.iteritems(headssum)
252 for branch, heads in pycompat.iteritems(headssum)
253 if heads[0] is not None
253 if heads[0] is not None
254 )
254 )
255 newmap.update(repo, (ctx.rev() for ctx in missingctx))
255 newmap.update(repo, (ctx.rev() for ctx in missingctx))
256 for branch, newheads in pycompat.iteritems(newmap):
256 for branch, newheads in pycompat.iteritems(newmap):
257 headssum[branch][1][:] = newheads
257 headssum[branch][1][:] = newheads
258 for branch, items in pycompat.iteritems(headssum):
258 for branch, items in pycompat.iteritems(headssum):
259 for l in items:
259 for l in items:
260 if l is not None:
260 if l is not None:
261 l.sort()
261 l.sort()
262 headssum[branch] = items + ([],)
262 headssum[branch] = items + ([],)
263
263
264 # If there are no obsstore, no post processing are needed.
264 # If there are no obsstore, no post processing are needed.
265 if repo.obsstore:
265 if repo.obsstore:
266 torev = repo.changelog.rev
266 torev = repo.changelog.rev
267 futureheads = {torev(h) for h in outgoing.missingheads}
267 futureheads = {torev(h) for h in outgoing.missingheads}
268 futureheads |= {torev(h) for h in outgoing.commonheads}
268 futureheads |= {torev(h) for h in outgoing.commonheads}
269 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
269 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
270 for branch, heads in sorted(pycompat.iteritems(headssum)):
270 for branch, heads in sorted(pycompat.iteritems(headssum)):
271 remoteheads, newheads, unsyncedheads, placeholder = heads
271 remoteheads, newheads, unsyncedheads, placeholder = heads
272 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
272 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
273 headssum[branch] = (
273 headssum[branch] = (
274 remoteheads,
274 remoteheads,
275 sorted(result[0]),
275 sorted(result[0]),
276 unsyncedheads,
276 unsyncedheads,
277 sorted(result[1]),
277 sorted(result[1]),
278 )
278 )
279 return headssum
279 return headssum
280
280
281
281
282 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
282 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
283 """Compute branchmapsummary for repo without branchmap support"""
283 """Compute branchmapsummary for repo without branchmap support"""
284
284
285 # 1-4b. old servers: Check for new topological heads.
285 # 1-4b. old servers: Check for new topological heads.
286 # Construct {old,new}map with branch = None (topological branch).
286 # Construct {old,new}map with branch = None (topological branch).
287 # (code based on update)
287 # (code based on update)
288 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
288 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
289 oldheads = sorted(h for h in remoteheads if knownnode(h))
289 oldheads = sorted(h for h in remoteheads if knownnode(h))
290 # all nodes in outgoing.missing are children of either:
290 # all nodes in outgoing.missing are children of either:
291 # - an element of oldheads
291 # - an element of oldheads
292 # - another element of outgoing.missing
292 # - another element of outgoing.missing
293 # - nullrev
293 # - nullrev
294 # This explains why the new head are very simple to compute.
294 # This explains why the new head are very simple to compute.
295 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
295 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
296 newheads = sorted(c.node() for c in r)
296 newheads = sorted(c.node() for c in r)
297 # set some unsynced head to issue the "unsynced changes" warning
297 # set some unsynced head to issue the "unsynced changes" warning
298 if inc:
298 if inc:
299 unsynced = [None]
299 unsynced = [None]
300 else:
300 else:
301 unsynced = []
301 unsynced = []
302 return {None: (oldheads, newheads, unsynced, [])}
302 return {None: (oldheads, newheads, unsynced, [])}
303
303
304
304
305 def _nowarnheads(pushop):
305 def _nowarnheads(pushop):
306 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
306 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
307 repo = pushop.repo.unfiltered()
307 repo = pushop.repo.unfiltered()
308 remote = pushop.remote
308 remote = pushop.remote
309 localbookmarks = repo._bookmarks
309 localbookmarks = repo._bookmarks
310
310
311 with remote.commandexecutor() as e:
311 with remote.commandexecutor() as e:
312 remotebookmarks = e.callcommand(
312 remotebookmarks = e.callcommand(
313 b'listkeys', {b'namespace': b'bookmarks',}
313 b'listkeys', {b'namespace': b'bookmarks',}
314 ).result()
314 ).result()
315
315
316 bookmarkedheads = set()
316 bookmarkedheads = set()
317
317
318 # internal config: bookmarks.pushing
318 # internal config: bookmarks.pushing
319 newbookmarks = [
319 newbookmarks = [
320 localbookmarks.expandname(b)
320 localbookmarks.expandname(b)
321 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
321 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
322 ]
322 ]
323
323
324 for bm in localbookmarks:
324 for bm in localbookmarks:
325 rnode = remotebookmarks.get(bm)
325 rnode = remotebookmarks.get(bm)
326 if rnode and rnode in repo:
326 if rnode and rnode in repo:
327 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
327 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
328 if bookmarks.validdest(repo, rctx, lctx):
328 if bookmarks.validdest(repo, rctx, lctx):
329 bookmarkedheads.add(lctx.node())
329 bookmarkedheads.add(lctx.node())
330 else:
330 else:
331 if bm in newbookmarks and bm not in remotebookmarks:
331 if bm in newbookmarks and bm not in remotebookmarks:
332 bookmarkedheads.add(localbookmarks[bm])
332 bookmarkedheads.add(localbookmarks[bm])
333
333
334 return bookmarkedheads
334 return bookmarkedheads
335
335
336
336
337 def checkheads(pushop):
337 def checkheads(pushop):
338 """Check that a push won't add any outgoing head
338 """Check that a push won't add any outgoing head
339
339
340 raise Abort error and display ui message as needed.
340 raise Abort error and display ui message as needed.
341 """
341 """
342
342
343 repo = pushop.repo.unfiltered()
343 repo = pushop.repo.unfiltered()
344 remote = pushop.remote
344 remote = pushop.remote
345 outgoing = pushop.outgoing
345 outgoing = pushop.outgoing
346 remoteheads = pushop.remoteheads
346 remoteheads = pushop.remoteheads
347 newbranch = pushop.newbranch
347 newbranch = pushop.newbranch
348 inc = bool(pushop.incoming)
348 inc = bool(pushop.incoming)
349
349
350 # Check for each named branch if we're creating new remote heads.
350 # Check for each named branch if we're creating new remote heads.
351 # To be a remote head after push, node must be either:
351 # To be a remote head after push, node must be either:
352 # - unknown locally
352 # - unknown locally
353 # - a local outgoing head descended from update
353 # - a local outgoing head descended from update
354 # - a remote head that's known locally and not
354 # - a remote head that's known locally and not
355 # ancestral to an outgoing head
355 # ancestral to an outgoing head
356 if remoteheads == [nullid]:
356 if remoteheads == [nullid]:
357 # remote is empty, nothing to check.
357 # remote is empty, nothing to check.
358 return
358 return
359
359
360 if remote.capable(b'branchmap'):
360 if remote.capable(b'branchmap'):
361 headssum = _headssummary(pushop)
361 headssum = _headssummary(pushop)
362 else:
362 else:
363 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
363 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
364 pushop.pushbranchmap = headssum
364 pushop.pushbranchmap = headssum
365 newbranches = [
365 newbranches = [
366 branch
366 branch
367 for branch, heads in pycompat.iteritems(headssum)
367 for branch, heads in pycompat.iteritems(headssum)
368 if heads[0] is None
368 if heads[0] is None
369 ]
369 ]
370 # 1. Check for new branches on the remote.
370 # 1. Check for new branches on the remote.
371 if newbranches and not newbranch: # new branch requires --new-branch
371 if newbranches and not newbranch: # new branch requires --new-branch
372 branchnames = b', '.join(sorted(newbranches))
372 branchnames = b', '.join(sorted(newbranches))
373 # Calculate how many of the new branches are closed branches
373 # Calculate how many of the new branches are closed branches
374 closedbranches = set()
374 closedbranches = set()
375 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
375 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
376 if isclosed:
376 if isclosed:
377 closedbranches.add(tag)
377 closedbranches.add(tag)
378 closedbranches = closedbranches & set(newbranches)
378 closedbranches = closedbranches & set(newbranches)
379 if closedbranches:
379 if closedbranches:
380 errmsg = _(b"push creates new remote branches: %s (%d closed)!") % (
380 errmsg = _(b"push creates new remote branches: %s (%d closed)!") % (
381 branchnames,
381 branchnames,
382 len(closedbranches),
382 len(closedbranches),
383 )
383 )
384 else:
384 else:
385 errmsg = _(b"push creates new remote branches: %s!") % branchnames
385 errmsg = _(b"push creates new remote branches: %s!") % branchnames
386 hint = _(b"use 'hg push --new-branch' to create new remote branches")
386 hint = _(b"use 'hg push --new-branch' to create new remote branches")
387 raise error.Abort(errmsg, hint=hint)
387 raise error.Abort(errmsg, hint=hint)
388
388
389 # 2. Find heads that we need not warn about
389 # 2. Find heads that we need not warn about
390 nowarnheads = _nowarnheads(pushop)
390 nowarnheads = _nowarnheads(pushop)
391
391
392 # 3. Check for new heads.
392 # 3. Check for new heads.
393 # If there are more heads after the push than before, a suitable
393 # If there are more heads after the push than before, a suitable
394 # error message, depending on unsynced status, is displayed.
394 # error message, depending on unsynced status, is displayed.
395 errormsg = None
395 errormsg = None
396 for branch, heads in sorted(pycompat.iteritems(headssum)):
396 for branch, heads in sorted(pycompat.iteritems(headssum)):
397 remoteheads, newheads, unsyncedheads, discardedheads = heads
397 remoteheads, newheads, unsyncedheads, discardedheads = heads
398 # add unsynced data
398 # add unsynced data
399 if remoteheads is None:
399 if remoteheads is None:
400 oldhs = set()
400 oldhs = set()
401 else:
401 else:
402 oldhs = set(remoteheads)
402 oldhs = set(remoteheads)
403 oldhs.update(unsyncedheads)
403 oldhs.update(unsyncedheads)
404 dhs = None # delta heads, the new heads on branch
404 dhs = None # delta heads, the new heads on branch
405 newhs = set(newheads)
405 newhs = set(newheads)
406 newhs.update(unsyncedheads)
406 newhs.update(unsyncedheads)
407 if unsyncedheads:
407 if unsyncedheads:
408 if None in unsyncedheads:
408 if None in unsyncedheads:
409 # old remote, no heads data
409 # old remote, no heads data
410 heads = None
410 heads = None
411 else:
411 else:
412 heads = scmutil.nodesummaries(repo, unsyncedheads)
412 heads = scmutil.nodesummaries(repo, unsyncedheads)
413 if heads is None:
413 if heads is None:
414 repo.ui.status(
414 repo.ui.status(
415 _(b"remote has heads that are not known locally\n")
415 _(b"remote has heads that are not known locally\n")
416 )
416 )
417 elif branch is None:
417 elif branch is None:
418 repo.ui.status(
418 repo.ui.status(
419 _(b"remote has heads that are not known locally: %s\n")
419 _(b"remote has heads that are not known locally: %s\n")
420 % heads
420 % heads
421 )
421 )
422 else:
422 else:
423 repo.ui.status(
423 repo.ui.status(
424 _(
424 _(
425 b"remote has heads on branch '%s' that are "
425 b"remote has heads on branch '%s' that are "
426 b"not known locally: %s\n"
426 b"not known locally: %s\n"
427 )
427 )
428 % (branch, heads)
428 % (branch, heads)
429 )
429 )
430 if remoteheads is None:
430 if remoteheads is None:
431 if len(newhs) > 1:
431 if len(newhs) > 1:
432 dhs = list(newhs)
432 dhs = list(newhs)
433 if errormsg is None:
433 if errormsg is None:
434 errormsg = (
434 errormsg = (
435 _(b"push creates new branch '%s' with multiple heads")
435 _(b"push creates new branch '%s' with multiple heads")
436 % branch
436 % branch
437 )
437 )
438 hint = _(
438 hint = _(
439 b"merge or"
439 b"merge or"
440 b" see 'hg help push' for details about"
440 b" see 'hg help push' for details about"
441 b" pushing new heads"
441 b" pushing new heads"
442 )
442 )
443 elif len(newhs) > len(oldhs):
443 elif len(newhs) > len(oldhs):
444 # remove bookmarked or existing remote heads from the new heads list
444 # remove bookmarked or existing remote heads from the new heads list
445 dhs = sorted(newhs - nowarnheads - oldhs)
445 dhs = sorted(newhs - nowarnheads - oldhs)
446 if dhs:
446 if dhs:
447 if errormsg is None:
447 if errormsg is None:
448 if branch not in (b'default', None):
448 if branch not in (b'default', None):
449 errormsg = _(
449 errormsg = _(
450 b"push creates new remote head %s on branch '%s'!"
450 b"push creates new remote head %s on branch '%s'!"
451 ) % (short(dhs[0]), branch,)
451 ) % (short(dhs[0]), branch,)
452 elif repo[dhs[0]].bookmarks():
452 elif repo[dhs[0]].bookmarks():
453 errormsg = _(
453 errormsg = _(
454 b"push creates new remote head %s "
454 b"push creates new remote head %s "
455 b"with bookmark '%s'!"
455 b"with bookmark '%s'!"
456 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
456 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
457 else:
457 else:
458 errormsg = _(b"push creates new remote head %s!") % short(
458 errormsg = _(b"push creates new remote head %s!") % short(
459 dhs[0]
459 dhs[0]
460 )
460 )
461 if unsyncedheads:
461 if unsyncedheads:
462 hint = _(
462 hint = _(
463 b"pull and merge or"
463 b"pull and merge or"
464 b" see 'hg help push' for details about"
464 b" see 'hg help push' for details about"
465 b" pushing new heads"
465 b" pushing new heads"
466 )
466 )
467 else:
467 else:
468 hint = _(
468 hint = _(
469 b"merge or"
469 b"merge or"
470 b" see 'hg help push' for details about"
470 b" see 'hg help push' for details about"
471 b" pushing new heads"
471 b" pushing new heads"
472 )
472 )
473 if branch is None:
473 if branch is None:
474 repo.ui.note(_(b"new remote heads:\n"))
474 repo.ui.note(_(b"new remote heads:\n"))
475 else:
475 else:
476 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
476 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
477 for h in dhs:
477 for h in dhs:
478 repo.ui.note(b" %s\n" % short(h))
478 repo.ui.note(b" %s\n" % short(h))
479 if errormsg:
479 if errormsg:
480 raise error.Abort(errormsg, hint=hint)
480 raise error.Abort(errormsg, hint=hint)
481
481
482
482
483 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
483 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
484 """post process the list of new heads with obsolescence information
484 """post process the list of new heads with obsolescence information
485
485
486 Exists as a sub-function to contain the complexity and allow extensions to
486 Exists as a sub-function to contain the complexity and allow extensions to
487 experiment with smarter logic.
487 experiment with smarter logic.
488
488
489 Returns (newheads, discarded_heads) tuple
489 Returns (newheads, discarded_heads) tuple
490 """
490 """
491 # known issue
491 # known issue
492 #
492 #
493 # * We "silently" skip processing on all changeset unknown locally
493 # * We "silently" skip processing on all changeset unknown locally
494 #
494 #
495 # * if <nh> is public on the remote, it won't be affected by obsolete
495 # * if <nh> is public on the remote, it won't be affected by obsolete
496 # marker and a new is created
496 # marker and a new is created
497
497
498 # define various utilities and containers
498 # define various utilities and containers
499 repo = pushop.repo
499 repo = pushop.repo
500 unfi = repo.unfiltered()
500 unfi = repo.unfiltered()
501 tonode = unfi.changelog.node
502 torev = unfi.changelog.index.get_rev
501 torev = unfi.changelog.index.get_rev
503 public = phases.public
502 public = phases.public
504 getphase = unfi._phasecache.phase
503 getphase = unfi._phasecache.phase
505 ispublic = lambda r: getphase(unfi, r) == public
504 ispublic = lambda r: getphase(unfi, r) == public
506 ispushed = lambda n: torev(n) in futurecommon
505 ispushed = lambda n: torev(n) in futurecommon
507 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
506 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
508 successorsmarkers = unfi.obsstore.successors
507 successorsmarkers = unfi.obsstore.successors
509 newhs = set() # final set of new heads
508 newhs = set() # final set of new heads
510 discarded = set() # new head of fully replaced branch
509 discarded = set() # new head of fully replaced branch
511
510
512 localcandidate = set() # candidate heads known locally
511 localcandidate = set() # candidate heads known locally
513 unknownheads = set() # candidate heads unknown locally
512 unknownheads = set() # candidate heads unknown locally
514 for h in candidate_newhs:
513 for h in candidate_newhs:
515 if h in unfi:
514 if h in unfi:
516 localcandidate.add(h)
515 localcandidate.add(h)
517 else:
516 else:
518 if successorsmarkers.get(h) is not None:
517 if successorsmarkers.get(h) is not None:
519 msg = (
518 msg = (
520 b'checkheads: remote head unknown locally has'
519 b'checkheads: remote head unknown locally has'
521 b' local marker: %s\n'
520 b' local marker: %s\n'
522 )
521 )
523 repo.ui.debug(msg % hex(h))
522 repo.ui.debug(msg % hex(h))
524 unknownheads.add(h)
523 unknownheads.add(h)
525
524
526 # fast path the simple case
525 # fast path the simple case
527 if len(localcandidate) == 1:
526 if len(localcandidate) == 1:
528 return unknownheads | set(candidate_newhs), set()
527 return unknownheads | set(candidate_newhs), set()
529
528
530 # actually process branch replacement
529 # actually process branch replacement
531 while localcandidate:
530 while localcandidate:
532 nh = localcandidate.pop()
531 nh = localcandidate.pop()
532 current_branch = unfi[nh].branch()
533 # run this check early to skip the evaluation of the whole branch
533 # run this check early to skip the evaluation of the whole branch
534 if torev(nh) in futurecommon or ispublic(torev(nh)):
534 if torev(nh) in futurecommon or ispublic(torev(nh)):
535 newhs.add(nh)
535 newhs.add(nh)
536 continue
536 continue
537
537
538 # Get all revs/nodes on the branch exclusive to this head
538 # Get all revs/nodes on the branch exclusive to this head
539 # (already filtered heads are "ignored"))
539 # (already filtered heads are "ignored"))
540 branchrevs = unfi.revs(
540 branchrevs = unfi.revs(
541 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
541 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
542 )
542 )
543 branchnodes = [tonode(r) for r in branchrevs]
543
544 branchnodes = []
545 for r in branchrevs:
546 c = unfi[r]
547 if c.branch() == current_branch:
548 branchnodes.append(c.node())
544
549
545 # The branch won't be hidden on the remote if
550 # The branch won't be hidden on the remote if
546 # * any part of it is public,
551 # * any part of it is public,
547 # * any part of it is considered part of the result by previous logic,
552 # * any part of it is considered part of the result by previous logic,
548 # * if we have no markers to push to obsolete it.
553 # * if we have no markers to push to obsolete it.
549 if (
554 if (
550 any(ispublic(r) for r in branchrevs)
555 any(ispublic(r) for r in branchrevs)
551 or any(torev(n) in futurecommon for n in branchnodes)
556 or any(torev(n) in futurecommon for n in branchnodes)
552 or any(not hasoutmarker(n) for n in branchnodes)
557 or any(not hasoutmarker(n) for n in branchnodes)
553 ):
558 ):
554 newhs.add(nh)
559 newhs.add(nh)
555 else:
560 else:
556 # note: there is a corner case if there is a merge in the branch.
561 # note: there is a corner case if there is a merge in the branch.
557 # we might end up with -more- heads. However, these heads are not
562 # we might end up with -more- heads. However, these heads are not
558 # "added" by the push, but more by the "removal" on the remote so I
563 # "added" by the push, but more by the "removal" on the remote so I
559 # think is a okay to ignore them,
564 # think is a okay to ignore them,
560 discarded.add(nh)
565 discarded.add(nh)
561 newhs |= unknownheads
566 newhs |= unknownheads
562 return newhs, discarded
567 return newhs, discarded
563
568
564
569
565 def pushingmarkerfor(obsstore, ispushed, node):
570 def pushingmarkerfor(obsstore, ispushed, node):
566 """true if some markers are to be pushed for node
571 """true if some markers are to be pushed for node
567
572
568 We cannot just look in to the pushed obsmarkers from the pushop because
573 We cannot just look in to the pushed obsmarkers from the pushop because
569 discovery might have filtered relevant markers. In addition listing all
574 discovery might have filtered relevant markers. In addition listing all
570 markers relevant to all changesets in the pushed set would be too expensive
575 markers relevant to all changesets in the pushed set would be too expensive
571 (O(len(repo)))
576 (O(len(repo)))
572
577
573 (note: There are cache opportunity in this function. but it would requires
578 (note: There are cache opportunity in this function. but it would requires
574 a two dimensional stack.)
579 a two dimensional stack.)
575 """
580 """
576 successorsmarkers = obsstore.successors
581 successorsmarkers = obsstore.successors
577 stack = [node]
582 stack = [node]
578 seen = set(stack)
583 seen = set(stack)
579 while stack:
584 while stack:
580 current = stack.pop()
585 current = stack.pop()
581 if ispushed(current):
586 if ispushed(current):
582 return True
587 return True
583 markers = successorsmarkers.get(current, ())
588 markers = successorsmarkers.get(current, ())
584 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
589 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
585 for m in markers:
590 for m in markers:
586 nexts = m[1] # successors
591 nexts = m[1] # successors
587 if not nexts: # this is a prune marker
592 if not nexts: # this is a prune marker
588 nexts = m[5] or () # parents
593 nexts = m[5] or () # parents
589 for n in nexts:
594 for n in nexts:
590 if n not in seen:
595 if n not in seen:
591 seen.add(n)
596 seen.add(n)
592 stack.append(n)
597 stack.append(n)
593 return False
598 return False
@@ -1,3719 +1,3719 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 #
2 #
3 # run-tests.py - Run a set of tests on Mercurial
3 # run-tests.py - Run a set of tests on Mercurial
4 #
4 #
5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 # Modifying this script is tricky because it has many modes:
10 # Modifying this script is tricky because it has many modes:
11 # - serial (default) vs parallel (-jN, N > 1)
11 # - serial (default) vs parallel (-jN, N > 1)
12 # - no coverage (default) vs coverage (-c, -C, -s)
12 # - no coverage (default) vs coverage (-c, -C, -s)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 # - tests are a mix of shell scripts and Python scripts
14 # - tests are a mix of shell scripts and Python scripts
15 #
15 #
16 # If you change this script, it is recommended that you ensure you
16 # If you change this script, it is recommended that you ensure you
17 # haven't broken it by running it in various modes with a representative
17 # haven't broken it by running it in various modes with a representative
18 # sample of test scripts. For example:
18 # sample of test scripts. For example:
19 #
19 #
20 # 1) serial, no coverage, temp install:
20 # 1) serial, no coverage, temp install:
21 # ./run-tests.py test-s*
21 # ./run-tests.py test-s*
22 # 2) serial, no coverage, local hg:
22 # 2) serial, no coverage, local hg:
23 # ./run-tests.py --local test-s*
23 # ./run-tests.py --local test-s*
24 # 3) serial, coverage, temp install:
24 # 3) serial, coverage, temp install:
25 # ./run-tests.py -c test-s*
25 # ./run-tests.py -c test-s*
26 # 4) serial, coverage, local hg:
26 # 4) serial, coverage, local hg:
27 # ./run-tests.py -c --local test-s* # unsupported
27 # ./run-tests.py -c --local test-s* # unsupported
28 # 5) parallel, no coverage, temp install:
28 # 5) parallel, no coverage, temp install:
29 # ./run-tests.py -j2 test-s*
29 # ./run-tests.py -j2 test-s*
30 # 6) parallel, no coverage, local hg:
30 # 6) parallel, no coverage, local hg:
31 # ./run-tests.py -j2 --local test-s*
31 # ./run-tests.py -j2 --local test-s*
32 # 7) parallel, coverage, temp install:
32 # 7) parallel, coverage, temp install:
33 # ./run-tests.py -j2 -c test-s* # currently broken
33 # ./run-tests.py -j2 -c test-s* # currently broken
34 # 8) parallel, coverage, local install:
34 # 8) parallel, coverage, local install:
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 # 9) parallel, custom tmp dir:
36 # 9) parallel, custom tmp dir:
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 # 10) parallel, pure, tests that call run-tests:
38 # 10) parallel, pure, tests that call run-tests:
39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 #
40 #
41 # (You could use any subset of the tests: test-s* happens to match
41 # (You could use any subset of the tests: test-s* happens to match
42 # enough that it's worth doing parallel runs, few enough that it
42 # enough that it's worth doing parallel runs, few enough that it
43 # completes fairly quickly, includes both shell and Python scripts, and
43 # completes fairly quickly, includes both shell and Python scripts, and
44 # includes some scripts that run daemon processes.)
44 # includes some scripts that run daemon processes.)
45
45
46 from __future__ import absolute_import, print_function
46 from __future__ import absolute_import, print_function
47
47
48 import argparse
48 import argparse
49 import collections
49 import collections
50 import difflib
50 import difflib
51 import distutils.version as version
51 import distutils.version as version
52 import errno
52 import errno
53 import json
53 import json
54 import multiprocessing
54 import multiprocessing
55 import os
55 import os
56 import random
56 import random
57 import re
57 import re
58 import shutil
58 import shutil
59 import signal
59 import signal
60 import socket
60 import socket
61 import subprocess
61 import subprocess
62 import sys
62 import sys
63 import sysconfig
63 import sysconfig
64 import tempfile
64 import tempfile
65 import threading
65 import threading
66 import time
66 import time
67 import unittest
67 import unittest
68 import uuid
68 import uuid
69 import xml.dom.minidom as minidom
69 import xml.dom.minidom as minidom
70
70
71 try:
71 try:
72 import Queue as queue
72 import Queue as queue
73 except ImportError:
73 except ImportError:
74 import queue
74 import queue
75
75
76 try:
76 try:
77 import shlex
77 import shlex
78
78
79 shellquote = shlex.quote
79 shellquote = shlex.quote
80 except (ImportError, AttributeError):
80 except (ImportError, AttributeError):
81 import pipes
81 import pipes
82
82
83 shellquote = pipes.quote
83 shellquote = pipes.quote
84
84
85 processlock = threading.Lock()
85 processlock = threading.Lock()
86
86
87 pygmentspresent = False
87 pygmentspresent = False
88 # ANSI color is unsupported prior to Windows 10
88 # ANSI color is unsupported prior to Windows 10
89 if os.name != 'nt':
89 if os.name != 'nt':
90 try: # is pygments installed
90 try: # is pygments installed
91 import pygments
91 import pygments
92 import pygments.lexers as lexers
92 import pygments.lexers as lexers
93 import pygments.lexer as lexer
93 import pygments.lexer as lexer
94 import pygments.formatters as formatters
94 import pygments.formatters as formatters
95 import pygments.token as token
95 import pygments.token as token
96 import pygments.style as style
96 import pygments.style as style
97
97
98 pygmentspresent = True
98 pygmentspresent = True
99 difflexer = lexers.DiffLexer()
99 difflexer = lexers.DiffLexer()
100 terminal256formatter = formatters.Terminal256Formatter()
100 terminal256formatter = formatters.Terminal256Formatter()
101 except ImportError:
101 except ImportError:
102 pass
102 pass
103
103
104 if pygmentspresent:
104 if pygmentspresent:
105
105
106 class TestRunnerStyle(style.Style):
106 class TestRunnerStyle(style.Style):
107 default_style = ""
107 default_style = ""
108 skipped = token.string_to_tokentype("Token.Generic.Skipped")
108 skipped = token.string_to_tokentype("Token.Generic.Skipped")
109 failed = token.string_to_tokentype("Token.Generic.Failed")
109 failed = token.string_to_tokentype("Token.Generic.Failed")
110 skippedname = token.string_to_tokentype("Token.Generic.SName")
110 skippedname = token.string_to_tokentype("Token.Generic.SName")
111 failedname = token.string_to_tokentype("Token.Generic.FName")
111 failedname = token.string_to_tokentype("Token.Generic.FName")
112 styles = {
112 styles = {
113 skipped: '#e5e5e5',
113 skipped: '#e5e5e5',
114 skippedname: '#00ffff',
114 skippedname: '#00ffff',
115 failed: '#7f0000',
115 failed: '#7f0000',
116 failedname: '#ff0000',
116 failedname: '#ff0000',
117 }
117 }
118
118
119 class TestRunnerLexer(lexer.RegexLexer):
119 class TestRunnerLexer(lexer.RegexLexer):
120 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
120 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
121 tokens = {
121 tokens = {
122 'root': [
122 'root': [
123 (r'^Skipped', token.Generic.Skipped, 'skipped'),
123 (r'^Skipped', token.Generic.Skipped, 'skipped'),
124 (r'^Failed ', token.Generic.Failed, 'failed'),
124 (r'^Failed ', token.Generic.Failed, 'failed'),
125 (r'^ERROR: ', token.Generic.Failed, 'failed'),
125 (r'^ERROR: ', token.Generic.Failed, 'failed'),
126 ],
126 ],
127 'skipped': [
127 'skipped': [
128 (testpattern, token.Generic.SName),
128 (testpattern, token.Generic.SName),
129 (r':.*', token.Generic.Skipped),
129 (r':.*', token.Generic.Skipped),
130 ],
130 ],
131 'failed': [
131 'failed': [
132 (testpattern, token.Generic.FName),
132 (testpattern, token.Generic.FName),
133 (r'(:| ).*', token.Generic.Failed),
133 (r'(:| ).*', token.Generic.Failed),
134 ],
134 ],
135 }
135 }
136
136
137 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
137 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
138 runnerlexer = TestRunnerLexer()
138 runnerlexer = TestRunnerLexer()
139
139
140 origenviron = os.environ.copy()
140 origenviron = os.environ.copy()
141
141
142 if sys.version_info > (3, 5, 0):
142 if sys.version_info > (3, 5, 0):
143 PYTHON3 = True
143 PYTHON3 = True
144 xrange = range # we use xrange in one place, and we'd rather not use range
144 xrange = range # we use xrange in one place, and we'd rather not use range
145
145
146 def _sys2bytes(p):
146 def _sys2bytes(p):
147 if p is None:
147 if p is None:
148 return p
148 return p
149 return p.encode('utf-8')
149 return p.encode('utf-8')
150
150
151 def _bytes2sys(p):
151 def _bytes2sys(p):
152 if p is None:
152 if p is None:
153 return p
153 return p
154 return p.decode('utf-8')
154 return p.decode('utf-8')
155
155
156 osenvironb = getattr(os, 'environb', None)
156 osenvironb = getattr(os, 'environb', None)
157 if osenvironb is None:
157 if osenvironb is None:
158 # Windows lacks os.environb, for instance. A proxy over the real thing
158 # Windows lacks os.environb, for instance. A proxy over the real thing
159 # instead of a copy allows the environment to be updated via bytes on
159 # instead of a copy allows the environment to be updated via bytes on
160 # all platforms.
160 # all platforms.
161 class environbytes(object):
161 class environbytes(object):
162 def __init__(self, strenv):
162 def __init__(self, strenv):
163 self.__len__ = strenv.__len__
163 self.__len__ = strenv.__len__
164 self.clear = strenv.clear
164 self.clear = strenv.clear
165 self._strenv = strenv
165 self._strenv = strenv
166
166
167 def __getitem__(self, k):
167 def __getitem__(self, k):
168 v = self._strenv.__getitem__(_bytes2sys(k))
168 v = self._strenv.__getitem__(_bytes2sys(k))
169 return _sys2bytes(v)
169 return _sys2bytes(v)
170
170
171 def __setitem__(self, k, v):
171 def __setitem__(self, k, v):
172 self._strenv.__setitem__(_bytes2sys(k), _bytes2sys(v))
172 self._strenv.__setitem__(_bytes2sys(k), _bytes2sys(v))
173
173
174 def __delitem__(self, k):
174 def __delitem__(self, k):
175 self._strenv.__delitem__(_bytes2sys(k))
175 self._strenv.__delitem__(_bytes2sys(k))
176
176
177 def __contains__(self, k):
177 def __contains__(self, k):
178 return self._strenv.__contains__(_bytes2sys(k))
178 return self._strenv.__contains__(_bytes2sys(k))
179
179
180 def __iter__(self):
180 def __iter__(self):
181 return iter([_sys2bytes(k) for k in iter(self._strenv)])
181 return iter([_sys2bytes(k) for k in iter(self._strenv)])
182
182
183 def get(self, k, default=None):
183 def get(self, k, default=None):
184 v = self._strenv.get(_bytes2sys(k), _bytes2sys(default))
184 v = self._strenv.get(_bytes2sys(k), _bytes2sys(default))
185 return _sys2bytes(v)
185 return _sys2bytes(v)
186
186
187 def pop(self, k, default=None):
187 def pop(self, k, default=None):
188 v = self._strenv.pop(_bytes2sys(k), _bytes2sys(default))
188 v = self._strenv.pop(_bytes2sys(k), _bytes2sys(default))
189 return _sys2bytes(v)
189 return _sys2bytes(v)
190
190
191 osenvironb = environbytes(os.environ)
191 osenvironb = environbytes(os.environ)
192
192
193 getcwdb = getattr(os, 'getcwdb')
193 getcwdb = getattr(os, 'getcwdb')
194 if not getcwdb or os.name == 'nt':
194 if not getcwdb or os.name == 'nt':
195 getcwdb = lambda: _sys2bytes(os.getcwd())
195 getcwdb = lambda: _sys2bytes(os.getcwd())
196
196
197 elif sys.version_info >= (3, 0, 0):
197 elif sys.version_info >= (3, 0, 0):
198 print(
198 print(
199 '%s is only supported on Python 3.5+ and 2.7, not %s'
199 '%s is only supported on Python 3.5+ and 2.7, not %s'
200 % (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))
200 % (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))
201 )
201 )
202 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
202 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
203 else:
203 else:
204 PYTHON3 = False
204 PYTHON3 = False
205
205
206 # In python 2.x, path operations are generally done using
206 # In python 2.x, path operations are generally done using
207 # bytestrings by default, so we don't have to do any extra
207 # bytestrings by default, so we don't have to do any extra
208 # fiddling there. We define the wrapper functions anyway just to
208 # fiddling there. We define the wrapper functions anyway just to
209 # help keep code consistent between platforms.
209 # help keep code consistent between platforms.
210 def _sys2bytes(p):
210 def _sys2bytes(p):
211 return p
211 return p
212
212
213 _bytes2sys = _sys2bytes
213 _bytes2sys = _sys2bytes
214 osenvironb = os.environ
214 osenvironb = os.environ
215 getcwdb = os.getcwd
215 getcwdb = os.getcwd
216
216
217 # For Windows support
217 # For Windows support
218 wifexited = getattr(os, "WIFEXITED", lambda x: False)
218 wifexited = getattr(os, "WIFEXITED", lambda x: False)
219
219
220 # Whether to use IPv6
220 # Whether to use IPv6
221 def checksocketfamily(name, port=20058):
221 def checksocketfamily(name, port=20058):
222 """return true if we can listen on localhost using family=name
222 """return true if we can listen on localhost using family=name
223
223
224 name should be either 'AF_INET', or 'AF_INET6'.
224 name should be either 'AF_INET', or 'AF_INET6'.
225 port being used is okay - EADDRINUSE is considered as successful.
225 port being used is okay - EADDRINUSE is considered as successful.
226 """
226 """
227 family = getattr(socket, name, None)
227 family = getattr(socket, name, None)
228 if family is None:
228 if family is None:
229 return False
229 return False
230 try:
230 try:
231 s = socket.socket(family, socket.SOCK_STREAM)
231 s = socket.socket(family, socket.SOCK_STREAM)
232 s.bind(('localhost', port))
232 s.bind(('localhost', port))
233 s.close()
233 s.close()
234 return True
234 return True
235 except socket.error as exc:
235 except socket.error as exc:
236 if exc.errno == errno.EADDRINUSE:
236 if exc.errno == errno.EADDRINUSE:
237 return True
237 return True
238 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
238 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
239 return False
239 return False
240 else:
240 else:
241 raise
241 raise
242 else:
242 else:
243 return False
243 return False
244
244
245
245
246 # useipv6 will be set by parseargs
246 # useipv6 will be set by parseargs
247 useipv6 = None
247 useipv6 = None
248
248
249
249
250 def checkportisavailable(port):
250 def checkportisavailable(port):
251 """return true if a port seems free to bind on localhost"""
251 """return true if a port seems free to bind on localhost"""
252 if useipv6:
252 if useipv6:
253 family = socket.AF_INET6
253 family = socket.AF_INET6
254 else:
254 else:
255 family = socket.AF_INET
255 family = socket.AF_INET
256 try:
256 try:
257 s = socket.socket(family, socket.SOCK_STREAM)
257 s = socket.socket(family, socket.SOCK_STREAM)
258 s.bind(('localhost', port))
258 s.bind(('localhost', port))
259 s.close()
259 s.close()
260 return True
260 return True
261 except socket.error as exc:
261 except socket.error as exc:
262 if exc.errno not in (
262 if exc.errno not in (
263 errno.EADDRINUSE,
263 errno.EADDRINUSE,
264 errno.EADDRNOTAVAIL,
264 errno.EADDRNOTAVAIL,
265 errno.EPROTONOSUPPORT,
265 errno.EPROTONOSUPPORT,
266 ):
266 ):
267 raise
267 raise
268 return False
268 return False
269
269
270
270
271 closefds = os.name == 'posix'
271 closefds = os.name == 'posix'
272
272
273
273
274 def Popen4(cmd, wd, timeout, env=None):
274 def Popen4(cmd, wd, timeout, env=None):
275 processlock.acquire()
275 processlock.acquire()
276 p = subprocess.Popen(
276 p = subprocess.Popen(
277 _bytes2sys(cmd),
277 _bytes2sys(cmd),
278 shell=True,
278 shell=True,
279 bufsize=-1,
279 bufsize=-1,
280 cwd=_bytes2sys(wd),
280 cwd=_bytes2sys(wd),
281 env=env,
281 env=env,
282 close_fds=closefds,
282 close_fds=closefds,
283 stdin=subprocess.PIPE,
283 stdin=subprocess.PIPE,
284 stdout=subprocess.PIPE,
284 stdout=subprocess.PIPE,
285 stderr=subprocess.STDOUT,
285 stderr=subprocess.STDOUT,
286 )
286 )
287 processlock.release()
287 processlock.release()
288
288
289 p.fromchild = p.stdout
289 p.fromchild = p.stdout
290 p.tochild = p.stdin
290 p.tochild = p.stdin
291 p.childerr = p.stderr
291 p.childerr = p.stderr
292
292
293 p.timeout = False
293 p.timeout = False
294 if timeout:
294 if timeout:
295
295
296 def t():
296 def t():
297 start = time.time()
297 start = time.time()
298 while time.time() - start < timeout and p.returncode is None:
298 while time.time() - start < timeout and p.returncode is None:
299 time.sleep(0.1)
299 time.sleep(0.1)
300 p.timeout = True
300 p.timeout = True
301 if p.returncode is None:
301 if p.returncode is None:
302 terminate(p)
302 terminate(p)
303
303
304 threading.Thread(target=t).start()
304 threading.Thread(target=t).start()
305
305
306 return p
306 return p
307
307
308
308
309 if sys.executable:
309 if sys.executable:
310 sysexecutable = sys.executable
310 sysexecutable = sys.executable
311 elif os.environ.get('PYTHONEXECUTABLE'):
311 elif os.environ.get('PYTHONEXECUTABLE'):
312 sysexecutable = os.environ['PYTHONEXECUTABLE']
312 sysexecutable = os.environ['PYTHONEXECUTABLE']
313 elif os.environ.get('PYTHON'):
313 elif os.environ.get('PYTHON'):
314 sysexecutable = os.environ['PYTHON']
314 sysexecutable = os.environ['PYTHON']
315 else:
315 else:
316 raise AssertionError('Could not find Python interpreter')
316 raise AssertionError('Could not find Python interpreter')
317
317
318 PYTHON = _sys2bytes(sysexecutable.replace('\\', '/'))
318 PYTHON = _sys2bytes(sysexecutable.replace('\\', '/'))
319 IMPL_PATH = b'PYTHONPATH'
319 IMPL_PATH = b'PYTHONPATH'
320 if 'java' in sys.platform:
320 if 'java' in sys.platform:
321 IMPL_PATH = b'JYTHONPATH'
321 IMPL_PATH = b'JYTHONPATH'
322
322
323 defaults = {
323 defaults = {
324 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
324 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
325 'timeout': ('HGTEST_TIMEOUT', 180),
325 'timeout': ('HGTEST_TIMEOUT', 180),
326 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 1500),
326 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 1500),
327 'port': ('HGTEST_PORT', 20059),
327 'port': ('HGTEST_PORT', 20059),
328 'shell': ('HGTEST_SHELL', 'sh'),
328 'shell': ('HGTEST_SHELL', 'sh'),
329 }
329 }
330
330
331
331
332 def canonpath(path):
332 def canonpath(path):
333 return os.path.realpath(os.path.expanduser(path))
333 return os.path.realpath(os.path.expanduser(path))
334
334
335
335
336 def parselistfiles(files, listtype, warn=True):
336 def parselistfiles(files, listtype, warn=True):
337 entries = dict()
337 entries = dict()
338 for filename in files:
338 for filename in files:
339 try:
339 try:
340 path = os.path.expanduser(os.path.expandvars(filename))
340 path = os.path.expanduser(os.path.expandvars(filename))
341 f = open(path, "rb")
341 f = open(path, "rb")
342 except IOError as err:
342 except IOError as err:
343 if err.errno != errno.ENOENT:
343 if err.errno != errno.ENOENT:
344 raise
344 raise
345 if warn:
345 if warn:
346 print("warning: no such %s file: %s" % (listtype, filename))
346 print("warning: no such %s file: %s" % (listtype, filename))
347 continue
347 continue
348
348
349 for line in f.readlines():
349 for line in f.readlines():
350 line = line.split(b'#', 1)[0].strip()
350 line = line.split(b'#', 1)[0].strip()
351 if line:
351 if line:
352 entries[line] = filename
352 entries[line] = filename
353
353
354 f.close()
354 f.close()
355 return entries
355 return entries
356
356
357
357
358 def parsettestcases(path):
358 def parsettestcases(path):
359 """read a .t test file, return a set of test case names
359 """read a .t test file, return a set of test case names
360
360
361 If path does not exist, return an empty set.
361 If path does not exist, return an empty set.
362 """
362 """
363 cases = []
363 cases = []
364 try:
364 try:
365 with open(path, 'rb') as f:
365 with open(path, 'rb') as f:
366 for l in f:
366 for l in f:
367 if l.startswith(b'#testcases '):
367 if l.startswith(b'#testcases '):
368 cases.append(sorted(l[11:].split()))
368 cases.append(sorted(l[11:].split()))
369 except IOError as ex:
369 except IOError as ex:
370 if ex.errno != errno.ENOENT:
370 if ex.errno != errno.ENOENT:
371 raise
371 raise
372 return cases
372 return cases
373
373
374
374
375 def getparser():
375 def getparser():
376 """Obtain the OptionParser used by the CLI."""
376 """Obtain the OptionParser used by the CLI."""
377 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
377 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
378
378
379 selection = parser.add_argument_group('Test Selection')
379 selection = parser.add_argument_group('Test Selection')
380 selection.add_argument(
380 selection.add_argument(
381 '--allow-slow-tests',
381 '--allow-slow-tests',
382 action='store_true',
382 action='store_true',
383 help='allow extremely slow tests',
383 help='allow extremely slow tests',
384 )
384 )
385 selection.add_argument(
385 selection.add_argument(
386 "--blacklist",
386 "--blacklist",
387 action="append",
387 action="append",
388 help="skip tests listed in the specified blacklist file",
388 help="skip tests listed in the specified blacklist file",
389 )
389 )
390 selection.add_argument(
390 selection.add_argument(
391 "--changed",
391 "--changed",
392 help="run tests that are changed in parent rev or working directory",
392 help="run tests that are changed in parent rev or working directory",
393 )
393 )
394 selection.add_argument(
394 selection.add_argument(
395 "-k", "--keywords", help="run tests matching keywords"
395 "-k", "--keywords", help="run tests matching keywords"
396 )
396 )
397 selection.add_argument(
397 selection.add_argument(
398 "-r", "--retest", action="store_true", help="retest failed tests"
398 "-r", "--retest", action="store_true", help="retest failed tests"
399 )
399 )
400 selection.add_argument(
400 selection.add_argument(
401 "--test-list",
401 "--test-list",
402 action="append",
402 action="append",
403 help="read tests to run from the specified file",
403 help="read tests to run from the specified file",
404 )
404 )
405 selection.add_argument(
405 selection.add_argument(
406 "--whitelist",
406 "--whitelist",
407 action="append",
407 action="append",
408 help="always run tests listed in the specified whitelist file",
408 help="always run tests listed in the specified whitelist file",
409 )
409 )
410 selection.add_argument(
410 selection.add_argument(
411 'tests', metavar='TESTS', nargs='*', help='Tests to run'
411 'tests', metavar='TESTS', nargs='*', help='Tests to run'
412 )
412 )
413
413
414 harness = parser.add_argument_group('Test Harness Behavior')
414 harness = parser.add_argument_group('Test Harness Behavior')
415 harness.add_argument(
415 harness.add_argument(
416 '--bisect-repo',
416 '--bisect-repo',
417 metavar='bisect_repo',
417 metavar='bisect_repo',
418 help=(
418 help=(
419 "Path of a repo to bisect. Use together with " "--known-good-rev"
419 "Path of a repo to bisect. Use together with " "--known-good-rev"
420 ),
420 ),
421 )
421 )
422 harness.add_argument(
422 harness.add_argument(
423 "-d",
423 "-d",
424 "--debug",
424 "--debug",
425 action="store_true",
425 action="store_true",
426 help="debug mode: write output of test scripts to console"
426 help="debug mode: write output of test scripts to console"
427 " rather than capturing and diffing it (disables timeout)",
427 " rather than capturing and diffing it (disables timeout)",
428 )
428 )
429 harness.add_argument(
429 harness.add_argument(
430 "-f",
430 "-f",
431 "--first",
431 "--first",
432 action="store_true",
432 action="store_true",
433 help="exit on the first test failure",
433 help="exit on the first test failure",
434 )
434 )
435 harness.add_argument(
435 harness.add_argument(
436 "-i",
436 "-i",
437 "--interactive",
437 "--interactive",
438 action="store_true",
438 action="store_true",
439 help="prompt to accept changed output",
439 help="prompt to accept changed output",
440 )
440 )
441 harness.add_argument(
441 harness.add_argument(
442 "-j",
442 "-j",
443 "--jobs",
443 "--jobs",
444 type=int,
444 type=int,
445 help="number of jobs to run in parallel"
445 help="number of jobs to run in parallel"
446 " (default: $%s or %d)" % defaults['jobs'],
446 " (default: $%s or %d)" % defaults['jobs'],
447 )
447 )
448 harness.add_argument(
448 harness.add_argument(
449 "--keep-tmpdir",
449 "--keep-tmpdir",
450 action="store_true",
450 action="store_true",
451 help="keep temporary directory after running tests",
451 help="keep temporary directory after running tests",
452 )
452 )
453 harness.add_argument(
453 harness.add_argument(
454 '--known-good-rev',
454 '--known-good-rev',
455 metavar="known_good_rev",
455 metavar="known_good_rev",
456 help=(
456 help=(
457 "Automatically bisect any failures using this "
457 "Automatically bisect any failures using this "
458 "revision as a known-good revision."
458 "revision as a known-good revision."
459 ),
459 ),
460 )
460 )
461 harness.add_argument(
461 harness.add_argument(
462 "--list-tests",
462 "--list-tests",
463 action="store_true",
463 action="store_true",
464 help="list tests instead of running them",
464 help="list tests instead of running them",
465 )
465 )
466 harness.add_argument(
466 harness.add_argument(
467 "--loop", action="store_true", help="loop tests repeatedly"
467 "--loop", action="store_true", help="loop tests repeatedly"
468 )
468 )
469 harness.add_argument(
469 harness.add_argument(
470 '--random', action="store_true", help='run tests in random order'
470 '--random', action="store_true", help='run tests in random order'
471 )
471 )
472 harness.add_argument(
472 harness.add_argument(
473 '--order-by-runtime',
473 '--order-by-runtime',
474 action="store_true",
474 action="store_true",
475 help='run slowest tests first, according to .testtimes',
475 help='run slowest tests first, according to .testtimes',
476 )
476 )
477 harness.add_argument(
477 harness.add_argument(
478 "-p",
478 "-p",
479 "--port",
479 "--port",
480 type=int,
480 type=int,
481 help="port on which servers should listen"
481 help="port on which servers should listen"
482 " (default: $%s or %d)" % defaults['port'],
482 " (default: $%s or %d)" % defaults['port'],
483 )
483 )
484 harness.add_argument(
484 harness.add_argument(
485 '--profile-runner',
485 '--profile-runner',
486 action='store_true',
486 action='store_true',
487 help='run statprof on run-tests',
487 help='run statprof on run-tests',
488 )
488 )
489 harness.add_argument(
489 harness.add_argument(
490 "-R", "--restart", action="store_true", help="restart at last error"
490 "-R", "--restart", action="store_true", help="restart at last error"
491 )
491 )
492 harness.add_argument(
492 harness.add_argument(
493 "--runs-per-test",
493 "--runs-per-test",
494 type=int,
494 type=int,
495 dest="runs_per_test",
495 dest="runs_per_test",
496 help="run each test N times (default=1)",
496 help="run each test N times (default=1)",
497 default=1,
497 default=1,
498 )
498 )
499 harness.add_argument(
499 harness.add_argument(
500 "--shell", help="shell to use (default: $%s or %s)" % defaults['shell']
500 "--shell", help="shell to use (default: $%s or %s)" % defaults['shell']
501 )
501 )
502 harness.add_argument(
502 harness.add_argument(
503 '--showchannels', action='store_true', help='show scheduling channels'
503 '--showchannels', action='store_true', help='show scheduling channels'
504 )
504 )
505 harness.add_argument(
505 harness.add_argument(
506 "--slowtimeout",
506 "--slowtimeout",
507 type=int,
507 type=int,
508 help="kill errant slow tests after SLOWTIMEOUT seconds"
508 help="kill errant slow tests after SLOWTIMEOUT seconds"
509 " (default: $%s or %d)" % defaults['slowtimeout'],
509 " (default: $%s or %d)" % defaults['slowtimeout'],
510 )
510 )
511 harness.add_argument(
511 harness.add_argument(
512 "-t",
512 "-t",
513 "--timeout",
513 "--timeout",
514 type=int,
514 type=int,
515 help="kill errant tests after TIMEOUT seconds"
515 help="kill errant tests after TIMEOUT seconds"
516 " (default: $%s or %d)" % defaults['timeout'],
516 " (default: $%s or %d)" % defaults['timeout'],
517 )
517 )
518 harness.add_argument(
518 harness.add_argument(
519 "--tmpdir",
519 "--tmpdir",
520 help="run tests in the given temporary directory"
520 help="run tests in the given temporary directory"
521 " (implies --keep-tmpdir)",
521 " (implies --keep-tmpdir)",
522 )
522 )
523 harness.add_argument(
523 harness.add_argument(
524 "-v", "--verbose", action="store_true", help="output verbose messages"
524 "-v", "--verbose", action="store_true", help="output verbose messages"
525 )
525 )
526
526
527 hgconf = parser.add_argument_group('Mercurial Configuration')
527 hgconf = parser.add_argument_group('Mercurial Configuration')
528 hgconf.add_argument(
528 hgconf.add_argument(
529 "--chg",
529 "--chg",
530 action="store_true",
530 action="store_true",
531 help="install and use chg wrapper in place of hg",
531 help="install and use chg wrapper in place of hg",
532 )
532 )
533 hgconf.add_argument("--compiler", help="compiler to build with")
533 hgconf.add_argument("--compiler", help="compiler to build with")
534 hgconf.add_argument(
534 hgconf.add_argument(
535 '--extra-config-opt',
535 '--extra-config-opt',
536 action="append",
536 action="append",
537 default=[],
537 default=[],
538 help='set the given config opt in the test hgrc',
538 help='set the given config opt in the test hgrc',
539 )
539 )
540 hgconf.add_argument(
540 hgconf.add_argument(
541 "-l",
541 "-l",
542 "--local",
542 "--local",
543 action="store_true",
543 action="store_true",
544 help="shortcut for --with-hg=<testdir>/../hg, "
544 help="shortcut for --with-hg=<testdir>/../hg, "
545 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set",
545 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set",
546 )
546 )
547 hgconf.add_argument(
547 hgconf.add_argument(
548 "--ipv6",
548 "--ipv6",
549 action="store_true",
549 action="store_true",
550 help="prefer IPv6 to IPv4 for network related tests",
550 help="prefer IPv6 to IPv4 for network related tests",
551 )
551 )
552 hgconf.add_argument(
552 hgconf.add_argument(
553 "--pure",
553 "--pure",
554 action="store_true",
554 action="store_true",
555 help="use pure Python code instead of C extensions",
555 help="use pure Python code instead of C extensions",
556 )
556 )
557 hgconf.add_argument(
557 hgconf.add_argument(
558 "--with-chg",
558 "--with-chg",
559 metavar="CHG",
559 metavar="CHG",
560 help="use specified chg wrapper in place of hg",
560 help="use specified chg wrapper in place of hg",
561 )
561 )
562 hgconf.add_argument(
562 hgconf.add_argument(
563 "--with-hg",
563 "--with-hg",
564 metavar="HG",
564 metavar="HG",
565 help="test using specified hg script rather than a "
565 help="test using specified hg script rather than a "
566 "temporary installation",
566 "temporary installation",
567 )
567 )
568
568
569 reporting = parser.add_argument_group('Results Reporting')
569 reporting = parser.add_argument_group('Results Reporting')
570 reporting.add_argument(
570 reporting.add_argument(
571 "-C",
571 "-C",
572 "--annotate",
572 "--annotate",
573 action="store_true",
573 action="store_true",
574 help="output files annotated with coverage",
574 help="output files annotated with coverage",
575 )
575 )
576 reporting.add_argument(
576 reporting.add_argument(
577 "--color",
577 "--color",
578 choices=["always", "auto", "never"],
578 choices=["always", "auto", "never"],
579 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
579 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
580 help="colorisation: always|auto|never (default: auto)",
580 help="colorisation: always|auto|never (default: auto)",
581 )
581 )
582 reporting.add_argument(
582 reporting.add_argument(
583 "-c",
583 "-c",
584 "--cover",
584 "--cover",
585 action="store_true",
585 action="store_true",
586 help="print a test coverage report",
586 help="print a test coverage report",
587 )
587 )
588 reporting.add_argument(
588 reporting.add_argument(
589 '--exceptions',
589 '--exceptions',
590 action='store_true',
590 action='store_true',
591 help='log all exceptions and generate an exception report',
591 help='log all exceptions and generate an exception report',
592 )
592 )
593 reporting.add_argument(
593 reporting.add_argument(
594 "-H",
594 "-H",
595 "--htmlcov",
595 "--htmlcov",
596 action="store_true",
596 action="store_true",
597 help="create an HTML report of the coverage of the files",
597 help="create an HTML report of the coverage of the files",
598 )
598 )
599 reporting.add_argument(
599 reporting.add_argument(
600 "--json",
600 "--json",
601 action="store_true",
601 action="store_true",
602 help="store test result data in 'report.json' file",
602 help="store test result data in 'report.json' file",
603 )
603 )
604 reporting.add_argument(
604 reporting.add_argument(
605 "--outputdir",
605 "--outputdir",
606 help="directory to write error logs to (default=test directory)",
606 help="directory to write error logs to (default=test directory)",
607 )
607 )
608 reporting.add_argument(
608 reporting.add_argument(
609 "-n", "--nodiff", action="store_true", help="skip showing test changes"
609 "-n", "--nodiff", action="store_true", help="skip showing test changes"
610 )
610 )
611 reporting.add_argument(
611 reporting.add_argument(
612 "-S",
612 "-S",
613 "--noskips",
613 "--noskips",
614 action="store_true",
614 action="store_true",
615 help="don't report skip tests verbosely",
615 help="don't report skip tests verbosely",
616 )
616 )
617 reporting.add_argument(
617 reporting.add_argument(
618 "--time", action="store_true", help="time how long each test takes"
618 "--time", action="store_true", help="time how long each test takes"
619 )
619 )
620 reporting.add_argument("--view", help="external diff viewer")
620 reporting.add_argument("--view", help="external diff viewer")
621 reporting.add_argument(
621 reporting.add_argument(
622 "--xunit", help="record xunit results at specified path"
622 "--xunit", help="record xunit results at specified path"
623 )
623 )
624
624
625 for option, (envvar, default) in defaults.items():
625 for option, (envvar, default) in defaults.items():
626 defaults[option] = type(default)(os.environ.get(envvar, default))
626 defaults[option] = type(default)(os.environ.get(envvar, default))
627 parser.set_defaults(**defaults)
627 parser.set_defaults(**defaults)
628
628
629 return parser
629 return parser
630
630
631
631
632 def parseargs(args, parser):
632 def parseargs(args, parser):
633 """Parse arguments with our OptionParser and validate results."""
633 """Parse arguments with our OptionParser and validate results."""
634 options = parser.parse_args(args)
634 options = parser.parse_args(args)
635
635
636 # jython is always pure
636 # jython is always pure
637 if 'java' in sys.platform or '__pypy__' in sys.modules:
637 if 'java' in sys.platform or '__pypy__' in sys.modules:
638 options.pure = True
638 options.pure = True
639
639
640 if options.local:
640 if options.local:
641 if options.with_hg or options.with_chg:
641 if options.with_hg or options.with_chg:
642 parser.error('--local cannot be used with --with-hg or --with-chg')
642 parser.error('--local cannot be used with --with-hg or --with-chg')
643 testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
643 testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
644 reporootdir = os.path.dirname(testdir)
644 reporootdir = os.path.dirname(testdir)
645 pathandattrs = [(b'hg', 'with_hg')]
645 pathandattrs = [(b'hg', 'with_hg')]
646 if options.chg:
646 if options.chg:
647 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
647 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
648 for relpath, attr in pathandattrs:
648 for relpath, attr in pathandattrs:
649 binpath = os.path.join(reporootdir, relpath)
649 binpath = os.path.join(reporootdir, relpath)
650 if os.name != 'nt' and not os.access(binpath, os.X_OK):
650 if os.name != 'nt' and not os.access(binpath, os.X_OK):
651 parser.error(
651 parser.error(
652 '--local specified, but %r not found or '
652 '--local specified, but %r not found or '
653 'not executable' % binpath
653 'not executable' % binpath
654 )
654 )
655 setattr(options, attr, _bytes2sys(binpath))
655 setattr(options, attr, _bytes2sys(binpath))
656
656
657 if options.with_hg:
657 if options.with_hg:
658 options.with_hg = canonpath(_sys2bytes(options.with_hg))
658 options.with_hg = canonpath(_sys2bytes(options.with_hg))
659 if not (
659 if not (
660 os.path.isfile(options.with_hg)
660 os.path.isfile(options.with_hg)
661 and os.access(options.with_hg, os.X_OK)
661 and os.access(options.with_hg, os.X_OK)
662 ):
662 ):
663 parser.error('--with-hg must specify an executable hg script')
663 parser.error('--with-hg must specify an executable hg script')
664 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
664 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
665 sys.stderr.write('warning: --with-hg should specify an hg script\n')
665 sys.stderr.write('warning: --with-hg should specify an hg script\n')
666 sys.stderr.flush()
666 sys.stderr.flush()
667
667
668 if (options.chg or options.with_chg) and os.name == 'nt':
668 if (options.chg or options.with_chg) and os.name == 'nt':
669 parser.error('chg does not work on %s' % os.name)
669 parser.error('chg does not work on %s' % os.name)
670 if options.with_chg:
670 if options.with_chg:
671 options.chg = False # no installation to temporary location
671 options.chg = False # no installation to temporary location
672 options.with_chg = canonpath(_sys2bytes(options.with_chg))
672 options.with_chg = canonpath(_sys2bytes(options.with_chg))
673 if not (
673 if not (
674 os.path.isfile(options.with_chg)
674 os.path.isfile(options.with_chg)
675 and os.access(options.with_chg, os.X_OK)
675 and os.access(options.with_chg, os.X_OK)
676 ):
676 ):
677 parser.error('--with-chg must specify a chg executable')
677 parser.error('--with-chg must specify a chg executable')
678 if options.chg and options.with_hg:
678 if options.chg and options.with_hg:
679 # chg shares installation location with hg
679 # chg shares installation location with hg
680 parser.error(
680 parser.error(
681 '--chg does not work when --with-hg is specified '
681 '--chg does not work when --with-hg is specified '
682 '(use --with-chg instead)'
682 '(use --with-chg instead)'
683 )
683 )
684
684
685 if options.color == 'always' and not pygmentspresent:
685 if options.color == 'always' and not pygmentspresent:
686 sys.stderr.write(
686 sys.stderr.write(
687 'warning: --color=always ignored because '
687 'warning: --color=always ignored because '
688 'pygments is not installed\n'
688 'pygments is not installed\n'
689 )
689 )
690
690
691 if options.bisect_repo and not options.known_good_rev:
691 if options.bisect_repo and not options.known_good_rev:
692 parser.error("--bisect-repo cannot be used without --known-good-rev")
692 parser.error("--bisect-repo cannot be used without --known-good-rev")
693
693
694 global useipv6
694 global useipv6
695 if options.ipv6:
695 if options.ipv6:
696 useipv6 = checksocketfamily('AF_INET6')
696 useipv6 = checksocketfamily('AF_INET6')
697 else:
697 else:
698 # only use IPv6 if IPv4 is unavailable and IPv6 is available
698 # only use IPv6 if IPv4 is unavailable and IPv6 is available
699 useipv6 = (not checksocketfamily('AF_INET')) and checksocketfamily(
699 useipv6 = (not checksocketfamily('AF_INET')) and checksocketfamily(
700 'AF_INET6'
700 'AF_INET6'
701 )
701 )
702
702
703 options.anycoverage = options.cover or options.annotate or options.htmlcov
703 options.anycoverage = options.cover or options.annotate or options.htmlcov
704 if options.anycoverage:
704 if options.anycoverage:
705 try:
705 try:
706 import coverage
706 import coverage
707
707
708 covver = version.StrictVersion(coverage.__version__).version
708 covver = version.StrictVersion(coverage.__version__).version
709 if covver < (3, 3):
709 if covver < (3, 3):
710 parser.error('coverage options require coverage 3.3 or later')
710 parser.error('coverage options require coverage 3.3 or later')
711 except ImportError:
711 except ImportError:
712 parser.error('coverage options now require the coverage package')
712 parser.error('coverage options now require the coverage package')
713
713
714 if options.anycoverage and options.local:
714 if options.anycoverage and options.local:
715 # this needs some path mangling somewhere, I guess
715 # this needs some path mangling somewhere, I guess
716 parser.error(
716 parser.error(
717 "sorry, coverage options do not work when --local " "is specified"
717 "sorry, coverage options do not work when --local " "is specified"
718 )
718 )
719
719
720 if options.anycoverage and options.with_hg:
720 if options.anycoverage and options.with_hg:
721 parser.error(
721 parser.error(
722 "sorry, coverage options do not work when --with-hg " "is specified"
722 "sorry, coverage options do not work when --with-hg " "is specified"
723 )
723 )
724
724
725 global verbose
725 global verbose
726 if options.verbose:
726 if options.verbose:
727 verbose = ''
727 verbose = ''
728
728
729 if options.tmpdir:
729 if options.tmpdir:
730 options.tmpdir = canonpath(options.tmpdir)
730 options.tmpdir = canonpath(options.tmpdir)
731
731
732 if options.jobs < 1:
732 if options.jobs < 1:
733 parser.error('--jobs must be positive')
733 parser.error('--jobs must be positive')
734 if options.interactive and options.debug:
734 if options.interactive and options.debug:
735 parser.error("-i/--interactive and -d/--debug are incompatible")
735 parser.error("-i/--interactive and -d/--debug are incompatible")
736 if options.debug:
736 if options.debug:
737 if options.timeout != defaults['timeout']:
737 if options.timeout != defaults['timeout']:
738 sys.stderr.write('warning: --timeout option ignored with --debug\n')
738 sys.stderr.write('warning: --timeout option ignored with --debug\n')
739 if options.slowtimeout != defaults['slowtimeout']:
739 if options.slowtimeout != defaults['slowtimeout']:
740 sys.stderr.write(
740 sys.stderr.write(
741 'warning: --slowtimeout option ignored with --debug\n'
741 'warning: --slowtimeout option ignored with --debug\n'
742 )
742 )
743 options.timeout = 0
743 options.timeout = 0
744 options.slowtimeout = 0
744 options.slowtimeout = 0
745
745
746 if options.blacklist:
746 if options.blacklist:
747 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
747 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
748 if options.whitelist:
748 if options.whitelist:
749 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
749 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
750 else:
750 else:
751 options.whitelisted = {}
751 options.whitelisted = {}
752
752
753 if options.showchannels:
753 if options.showchannels:
754 options.nodiff = True
754 options.nodiff = True
755
755
756 return options
756 return options
757
757
758
758
759 def rename(src, dst):
759 def rename(src, dst):
760 """Like os.rename(), trade atomicity and opened files friendliness
760 """Like os.rename(), trade atomicity and opened files friendliness
761 for existing destination support.
761 for existing destination support.
762 """
762 """
763 shutil.copy(src, dst)
763 shutil.copy(src, dst)
764 os.remove(src)
764 os.remove(src)
765
765
766
766
767 def makecleanable(path):
767 def makecleanable(path):
768 """Try to fix directory permission recursively so that the entire tree
768 """Try to fix directory permission recursively so that the entire tree
769 can be deleted"""
769 can be deleted"""
770 for dirpath, dirnames, _filenames in os.walk(path, topdown=True):
770 for dirpath, dirnames, _filenames in os.walk(path, topdown=True):
771 for d in dirnames:
771 for d in dirnames:
772 p = os.path.join(dirpath, d)
772 p = os.path.join(dirpath, d)
773 try:
773 try:
774 os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx
774 os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx
775 except OSError:
775 except OSError:
776 pass
776 pass
777
777
778
778
779 _unified_diff = difflib.unified_diff
779 _unified_diff = difflib.unified_diff
780 if PYTHON3:
780 if PYTHON3:
781 import functools
781 import functools
782
782
783 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
783 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
784
784
785
785
786 def getdiff(expected, output, ref, err):
786 def getdiff(expected, output, ref, err):
787 servefail = False
787 servefail = False
788 lines = []
788 lines = []
789 for line in _unified_diff(expected, output, ref, err):
789 for line in _unified_diff(expected, output, ref, err):
790 if line.startswith(b'+++') or line.startswith(b'---'):
790 if line.startswith(b'+++') or line.startswith(b'---'):
791 line = line.replace(b'\\', b'/')
791 line = line.replace(b'\\', b'/')
792 if line.endswith(b' \n'):
792 if line.endswith(b' \n'):
793 line = line[:-2] + b'\n'
793 line = line[:-2] + b'\n'
794 lines.append(line)
794 lines.append(line)
795 if not servefail and line.startswith(
795 if not servefail and line.startswith(
796 b'+ abort: child process failed to start'
796 b'+ abort: child process failed to start'
797 ):
797 ):
798 servefail = True
798 servefail = True
799
799
800 return servefail, lines
800 return servefail, lines
801
801
802
802
803 verbose = False
803 verbose = False
804
804
805
805
806 def vlog(*msg):
806 def vlog(*msg):
807 """Log only when in verbose mode."""
807 """Log only when in verbose mode."""
808 if verbose is False:
808 if verbose is False:
809 return
809 return
810
810
811 return log(*msg)
811 return log(*msg)
812
812
813
813
814 # Bytes that break XML even in a CDATA block: control characters 0-31
814 # Bytes that break XML even in a CDATA block: control characters 0-31
815 # sans \t, \n and \r
815 # sans \t, \n and \r
816 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
816 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
817
817
818 # Match feature conditionalized output lines in the form, capturing the feature
818 # Match feature conditionalized output lines in the form, capturing the feature
819 # list in group 2, and the preceeding line output in group 1:
819 # list in group 2, and the preceeding line output in group 1:
820 #
820 #
821 # output..output (feature !)\n
821 # output..output (feature !)\n
822 optline = re.compile(br'(.*) \((.+?) !\)\n$')
822 optline = re.compile(br'(.*) \((.+?) !\)\n$')
823
823
824
824
825 def cdatasafe(data):
825 def cdatasafe(data):
826 """Make a string safe to include in a CDATA block.
826 """Make a string safe to include in a CDATA block.
827
827
828 Certain control characters are illegal in a CDATA block, and
828 Certain control characters are illegal in a CDATA block, and
829 there's no way to include a ]]> in a CDATA either. This function
829 there's no way to include a ]]> in a CDATA either. This function
830 replaces illegal bytes with ? and adds a space between the ]] so
830 replaces illegal bytes with ? and adds a space between the ]] so
831 that it won't break the CDATA block.
831 that it won't break the CDATA block.
832 """
832 """
833 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
833 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
834
834
835
835
836 def log(*msg):
836 def log(*msg):
837 """Log something to stdout.
837 """Log something to stdout.
838
838
839 Arguments are strings to print.
839 Arguments are strings to print.
840 """
840 """
841 with iolock:
841 with iolock:
842 if verbose:
842 if verbose:
843 print(verbose, end=' ')
843 print(verbose, end=' ')
844 for m in msg:
844 for m in msg:
845 print(m, end=' ')
845 print(m, end=' ')
846 print()
846 print()
847 sys.stdout.flush()
847 sys.stdout.flush()
848
848
849
849
850 def highlightdiff(line, color):
850 def highlightdiff(line, color):
851 if not color:
851 if not color:
852 return line
852 return line
853 assert pygmentspresent
853 assert pygmentspresent
854 return pygments.highlight(
854 return pygments.highlight(
855 line.decode('latin1'), difflexer, terminal256formatter
855 line.decode('latin1'), difflexer, terminal256formatter
856 ).encode('latin1')
856 ).encode('latin1')
857
857
858
858
859 def highlightmsg(msg, color):
859 def highlightmsg(msg, color):
860 if not color:
860 if not color:
861 return msg
861 return msg
862 assert pygmentspresent
862 assert pygmentspresent
863 return pygments.highlight(msg, runnerlexer, runnerformatter)
863 return pygments.highlight(msg, runnerlexer, runnerformatter)
864
864
865
865
866 def terminate(proc):
866 def terminate(proc):
867 """Terminate subprocess"""
867 """Terminate subprocess"""
868 vlog('# Terminating process %d' % proc.pid)
868 vlog('# Terminating process %d' % proc.pid)
869 try:
869 try:
870 proc.terminate()
870 proc.terminate()
871 except OSError:
871 except OSError:
872 pass
872 pass
873
873
874
874
875 def killdaemons(pidfile):
875 def killdaemons(pidfile):
876 import killdaemons as killmod
876 import killdaemons as killmod
877
877
878 return killmod.killdaemons(pidfile, tryhard=False, remove=True, logfn=vlog)
878 return killmod.killdaemons(pidfile, tryhard=False, remove=True, logfn=vlog)
879
879
880
880
881 class Test(unittest.TestCase):
881 class Test(unittest.TestCase):
882 """Encapsulates a single, runnable test.
882 """Encapsulates a single, runnable test.
883
883
884 While this class conforms to the unittest.TestCase API, it differs in that
884 While this class conforms to the unittest.TestCase API, it differs in that
885 instances need to be instantiated manually. (Typically, unittest.TestCase
885 instances need to be instantiated manually. (Typically, unittest.TestCase
886 classes are instantiated automatically by scanning modules.)
886 classes are instantiated automatically by scanning modules.)
887 """
887 """
888
888
889 # Status code reserved for skipped tests (used by hghave).
889 # Status code reserved for skipped tests (used by hghave).
890 SKIPPED_STATUS = 80
890 SKIPPED_STATUS = 80
891
891
892 def __init__(
892 def __init__(
893 self,
893 self,
894 path,
894 path,
895 outputdir,
895 outputdir,
896 tmpdir,
896 tmpdir,
897 keeptmpdir=False,
897 keeptmpdir=False,
898 debug=False,
898 debug=False,
899 first=False,
899 first=False,
900 timeout=None,
900 timeout=None,
901 startport=None,
901 startport=None,
902 extraconfigopts=None,
902 extraconfigopts=None,
903 shell=None,
903 shell=None,
904 hgcommand=None,
904 hgcommand=None,
905 slowtimeout=None,
905 slowtimeout=None,
906 usechg=False,
906 usechg=False,
907 useipv6=False,
907 useipv6=False,
908 ):
908 ):
909 """Create a test from parameters.
909 """Create a test from parameters.
910
910
911 path is the full path to the file defining the test.
911 path is the full path to the file defining the test.
912
912
913 tmpdir is the main temporary directory to use for this test.
913 tmpdir is the main temporary directory to use for this test.
914
914
915 keeptmpdir determines whether to keep the test's temporary directory
915 keeptmpdir determines whether to keep the test's temporary directory
916 after execution. It defaults to removal (False).
916 after execution. It defaults to removal (False).
917
917
918 debug mode will make the test execute verbosely, with unfiltered
918 debug mode will make the test execute verbosely, with unfiltered
919 output.
919 output.
920
920
921 timeout controls the maximum run time of the test. It is ignored when
921 timeout controls the maximum run time of the test. It is ignored when
922 debug is True. See slowtimeout for tests with #require slow.
922 debug is True. See slowtimeout for tests with #require slow.
923
923
924 slowtimeout overrides timeout if the test has #require slow.
924 slowtimeout overrides timeout if the test has #require slow.
925
925
926 startport controls the starting port number to use for this test. Each
926 startport controls the starting port number to use for this test. Each
927 test will reserve 3 port numbers for execution. It is the caller's
927 test will reserve 3 port numbers for execution. It is the caller's
928 responsibility to allocate a non-overlapping port range to Test
928 responsibility to allocate a non-overlapping port range to Test
929 instances.
929 instances.
930
930
931 extraconfigopts is an iterable of extra hgrc config options. Values
931 extraconfigopts is an iterable of extra hgrc config options. Values
932 must have the form "key=value" (something understood by hgrc). Values
932 must have the form "key=value" (something understood by hgrc). Values
933 of the form "foo.key=value" will result in "[foo] key=value".
933 of the form "foo.key=value" will result in "[foo] key=value".
934
934
935 shell is the shell to execute tests in.
935 shell is the shell to execute tests in.
936 """
936 """
937 if timeout is None:
937 if timeout is None:
938 timeout = defaults['timeout']
938 timeout = defaults['timeout']
939 if startport is None:
939 if startport is None:
940 startport = defaults['port']
940 startport = defaults['port']
941 if slowtimeout is None:
941 if slowtimeout is None:
942 slowtimeout = defaults['slowtimeout']
942 slowtimeout = defaults['slowtimeout']
943 self.path = path
943 self.path = path
944 self.bname = os.path.basename(path)
944 self.bname = os.path.basename(path)
945 self.name = _bytes2sys(self.bname)
945 self.name = _bytes2sys(self.bname)
946 self._testdir = os.path.dirname(path)
946 self._testdir = os.path.dirname(path)
947 self._outputdir = outputdir
947 self._outputdir = outputdir
948 self._tmpname = os.path.basename(path)
948 self._tmpname = os.path.basename(path)
949 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
949 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
950
950
951 self._threadtmp = tmpdir
951 self._threadtmp = tmpdir
952 self._keeptmpdir = keeptmpdir
952 self._keeptmpdir = keeptmpdir
953 self._debug = debug
953 self._debug = debug
954 self._first = first
954 self._first = first
955 self._timeout = timeout
955 self._timeout = timeout
956 self._slowtimeout = slowtimeout
956 self._slowtimeout = slowtimeout
957 self._startport = startport
957 self._startport = startport
958 self._extraconfigopts = extraconfigopts or []
958 self._extraconfigopts = extraconfigopts or []
959 self._shell = _sys2bytes(shell)
959 self._shell = _sys2bytes(shell)
960 self._hgcommand = hgcommand or b'hg'
960 self._hgcommand = hgcommand or b'hg'
961 self._usechg = usechg
961 self._usechg = usechg
962 self._useipv6 = useipv6
962 self._useipv6 = useipv6
963
963
964 self._aborted = False
964 self._aborted = False
965 self._daemonpids = []
965 self._daemonpids = []
966 self._finished = None
966 self._finished = None
967 self._ret = None
967 self._ret = None
968 self._out = None
968 self._out = None
969 self._skipped = None
969 self._skipped = None
970 self._testtmp = None
970 self._testtmp = None
971 self._chgsockdir = None
971 self._chgsockdir = None
972
972
973 self._refout = self.readrefout()
973 self._refout = self.readrefout()
974
974
975 def readrefout(self):
975 def readrefout(self):
976 """read reference output"""
976 """read reference output"""
977 # If we're not in --debug mode and reference output file exists,
977 # If we're not in --debug mode and reference output file exists,
978 # check test output against it.
978 # check test output against it.
979 if self._debug:
979 if self._debug:
980 return None # to match "out is None"
980 return None # to match "out is None"
981 elif os.path.exists(self.refpath):
981 elif os.path.exists(self.refpath):
982 with open(self.refpath, 'rb') as f:
982 with open(self.refpath, 'rb') as f:
983 return f.read().splitlines(True)
983 return f.read().splitlines(True)
984 else:
984 else:
985 return []
985 return []
986
986
987 # needed to get base class __repr__ running
987 # needed to get base class __repr__ running
988 @property
988 @property
989 def _testMethodName(self):
989 def _testMethodName(self):
990 return self.name
990 return self.name
991
991
992 def __str__(self):
992 def __str__(self):
993 return self.name
993 return self.name
994
994
995 def shortDescription(self):
995 def shortDescription(self):
996 return self.name
996 return self.name
997
997
998 def setUp(self):
998 def setUp(self):
999 """Tasks to perform before run()."""
999 """Tasks to perform before run()."""
1000 self._finished = False
1000 self._finished = False
1001 self._ret = None
1001 self._ret = None
1002 self._out = None
1002 self._out = None
1003 self._skipped = None
1003 self._skipped = None
1004
1004
1005 try:
1005 try:
1006 os.mkdir(self._threadtmp)
1006 os.mkdir(self._threadtmp)
1007 except OSError as e:
1007 except OSError as e:
1008 if e.errno != errno.EEXIST:
1008 if e.errno != errno.EEXIST:
1009 raise
1009 raise
1010
1010
1011 name = self._tmpname
1011 name = self._tmpname
1012 self._testtmp = os.path.join(self._threadtmp, name)
1012 self._testtmp = os.path.join(self._threadtmp, name)
1013 os.mkdir(self._testtmp)
1013 os.mkdir(self._testtmp)
1014
1014
1015 # Remove any previous output files.
1015 # Remove any previous output files.
1016 if os.path.exists(self.errpath):
1016 if os.path.exists(self.errpath):
1017 try:
1017 try:
1018 os.remove(self.errpath)
1018 os.remove(self.errpath)
1019 except OSError as e:
1019 except OSError as e:
1020 # We might have raced another test to clean up a .err
1020 # We might have raced another test to clean up a .err
1021 # file, so ignore ENOENT when removing a previous .err
1021 # file, so ignore ENOENT when removing a previous .err
1022 # file.
1022 # file.
1023 if e.errno != errno.ENOENT:
1023 if e.errno != errno.ENOENT:
1024 raise
1024 raise
1025
1025
1026 if self._usechg:
1026 if self._usechg:
1027 self._chgsockdir = os.path.join(
1027 self._chgsockdir = os.path.join(
1028 self._threadtmp, b'%s.chgsock' % name
1028 self._threadtmp, b'%s.chgsock' % name
1029 )
1029 )
1030 os.mkdir(self._chgsockdir)
1030 os.mkdir(self._chgsockdir)
1031
1031
1032 def run(self, result):
1032 def run(self, result):
1033 """Run this test and report results against a TestResult instance."""
1033 """Run this test and report results against a TestResult instance."""
1034 # This function is extremely similar to unittest.TestCase.run(). Once
1034 # This function is extremely similar to unittest.TestCase.run(). Once
1035 # we require Python 2.7 (or at least its version of unittest), this
1035 # we require Python 2.7 (or at least its version of unittest), this
1036 # function can largely go away.
1036 # function can largely go away.
1037 self._result = result
1037 self._result = result
1038 result.startTest(self)
1038 result.startTest(self)
1039 try:
1039 try:
1040 try:
1040 try:
1041 self.setUp()
1041 self.setUp()
1042 except (KeyboardInterrupt, SystemExit):
1042 except (KeyboardInterrupt, SystemExit):
1043 self._aborted = True
1043 self._aborted = True
1044 raise
1044 raise
1045 except Exception:
1045 except Exception:
1046 result.addError(self, sys.exc_info())
1046 result.addError(self, sys.exc_info())
1047 return
1047 return
1048
1048
1049 success = False
1049 success = False
1050 try:
1050 try:
1051 self.runTest()
1051 self.runTest()
1052 except KeyboardInterrupt:
1052 except KeyboardInterrupt:
1053 self._aborted = True
1053 self._aborted = True
1054 raise
1054 raise
1055 except unittest.SkipTest as e:
1055 except unittest.SkipTest as e:
1056 result.addSkip(self, str(e))
1056 result.addSkip(self, str(e))
1057 # The base class will have already counted this as a
1057 # The base class will have already counted this as a
1058 # test we "ran", but we want to exclude skipped tests
1058 # test we "ran", but we want to exclude skipped tests
1059 # from those we count towards those run.
1059 # from those we count towards those run.
1060 result.testsRun -= 1
1060 result.testsRun -= 1
1061 except self.failureException as e:
1061 except self.failureException as e:
1062 # This differs from unittest in that we don't capture
1062 # This differs from unittest in that we don't capture
1063 # the stack trace. This is for historical reasons and
1063 # the stack trace. This is for historical reasons and
1064 # this decision could be revisited in the future,
1064 # this decision could be revisited in the future,
1065 # especially for PythonTest instances.
1065 # especially for PythonTest instances.
1066 if result.addFailure(self, str(e)):
1066 if result.addFailure(self, str(e)):
1067 success = True
1067 success = True
1068 except Exception:
1068 except Exception:
1069 result.addError(self, sys.exc_info())
1069 result.addError(self, sys.exc_info())
1070 else:
1070 else:
1071 success = True
1071 success = True
1072
1072
1073 try:
1073 try:
1074 self.tearDown()
1074 self.tearDown()
1075 except (KeyboardInterrupt, SystemExit):
1075 except (KeyboardInterrupt, SystemExit):
1076 self._aborted = True
1076 self._aborted = True
1077 raise
1077 raise
1078 except Exception:
1078 except Exception:
1079 result.addError(self, sys.exc_info())
1079 result.addError(self, sys.exc_info())
1080 success = False
1080 success = False
1081
1081
1082 if success:
1082 if success:
1083 result.addSuccess(self)
1083 result.addSuccess(self)
1084 finally:
1084 finally:
1085 result.stopTest(self, interrupted=self._aborted)
1085 result.stopTest(self, interrupted=self._aborted)
1086
1086
1087 def runTest(self):
1087 def runTest(self):
1088 """Run this test instance.
1088 """Run this test instance.
1089
1089
1090 This will return a tuple describing the result of the test.
1090 This will return a tuple describing the result of the test.
1091 """
1091 """
1092 env = self._getenv()
1092 env = self._getenv()
1093 self._genrestoreenv(env)
1093 self._genrestoreenv(env)
1094 self._daemonpids.append(env['DAEMON_PIDS'])
1094 self._daemonpids.append(env['DAEMON_PIDS'])
1095 self._createhgrc(env['HGRCPATH'])
1095 self._createhgrc(env['HGRCPATH'])
1096
1096
1097 vlog('# Test', self.name)
1097 vlog('# Test', self.name)
1098
1098
1099 ret, out = self._run(env)
1099 ret, out = self._run(env)
1100 self._finished = True
1100 self._finished = True
1101 self._ret = ret
1101 self._ret = ret
1102 self._out = out
1102 self._out = out
1103
1103
1104 def describe(ret):
1104 def describe(ret):
1105 if ret < 0:
1105 if ret < 0:
1106 return 'killed by signal: %d' % -ret
1106 return 'killed by signal: %d' % -ret
1107 return 'returned error code %d' % ret
1107 return 'returned error code %d' % ret
1108
1108
1109 self._skipped = False
1109 self._skipped = False
1110
1110
1111 if ret == self.SKIPPED_STATUS:
1111 if ret == self.SKIPPED_STATUS:
1112 if out is None: # Debug mode, nothing to parse.
1112 if out is None: # Debug mode, nothing to parse.
1113 missing = ['unknown']
1113 missing = ['unknown']
1114 failed = None
1114 failed = None
1115 else:
1115 else:
1116 missing, failed = TTest.parsehghaveoutput(out)
1116 missing, failed = TTest.parsehghaveoutput(out)
1117
1117
1118 if not missing:
1118 if not missing:
1119 missing = ['skipped']
1119 missing = ['skipped']
1120
1120
1121 if failed:
1121 if failed:
1122 self.fail('hg have failed checking for %s' % failed[-1])
1122 self.fail('hg have failed checking for %s' % failed[-1])
1123 else:
1123 else:
1124 self._skipped = True
1124 self._skipped = True
1125 raise unittest.SkipTest(missing[-1])
1125 raise unittest.SkipTest(missing[-1])
1126 elif ret == 'timeout':
1126 elif ret == 'timeout':
1127 self.fail('timed out')
1127 self.fail('timed out')
1128 elif ret is False:
1128 elif ret is False:
1129 self.fail('no result code from test')
1129 self.fail('no result code from test')
1130 elif out != self._refout:
1130 elif out != self._refout:
1131 # Diff generation may rely on written .err file.
1131 # Diff generation may rely on written .err file.
1132 if (
1132 if (
1133 (ret != 0 or out != self._refout)
1133 (ret != 0 or out != self._refout)
1134 and not self._skipped
1134 and not self._skipped
1135 and not self._debug
1135 and not self._debug
1136 ):
1136 ):
1137 with open(self.errpath, 'wb') as f:
1137 with open(self.errpath, 'wb') as f:
1138 for line in out:
1138 for line in out:
1139 f.write(line)
1139 f.write(line)
1140
1140
1141 # The result object handles diff calculation for us.
1141 # The result object handles diff calculation for us.
1142 with firstlock:
1142 with firstlock:
1143 if self._result.addOutputMismatch(self, ret, out, self._refout):
1143 if self._result.addOutputMismatch(self, ret, out, self._refout):
1144 # change was accepted, skip failing
1144 # change was accepted, skip failing
1145 return
1145 return
1146 if self._first:
1146 if self._first:
1147 global firsterror
1147 global firsterror
1148 firsterror = True
1148 firsterror = True
1149
1149
1150 if ret:
1150 if ret:
1151 msg = 'output changed and ' + describe(ret)
1151 msg = 'output changed and ' + describe(ret)
1152 else:
1152 else:
1153 msg = 'output changed'
1153 msg = 'output changed'
1154
1154
1155 self.fail(msg)
1155 self.fail(msg)
1156 elif ret:
1156 elif ret:
1157 self.fail(describe(ret))
1157 self.fail(describe(ret))
1158
1158
1159 def tearDown(self):
1159 def tearDown(self):
1160 """Tasks to perform after run()."""
1160 """Tasks to perform after run()."""
1161 for entry in self._daemonpids:
1161 for entry in self._daemonpids:
1162 killdaemons(entry)
1162 killdaemons(entry)
1163 self._daemonpids = []
1163 self._daemonpids = []
1164
1164
1165 if self._keeptmpdir:
1165 if self._keeptmpdir:
1166 log(
1166 log(
1167 '\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s'
1167 '\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s'
1168 % (_bytes2sys(self._testtmp), _bytes2sys(self._threadtmp),)
1168 % (_bytes2sys(self._testtmp), _bytes2sys(self._threadtmp),)
1169 )
1169 )
1170 else:
1170 else:
1171 try:
1171 try:
1172 shutil.rmtree(self._testtmp)
1172 shutil.rmtree(self._testtmp)
1173 except OSError:
1173 except OSError:
1174 # unreadable directory may be left in $TESTTMP; fix permission
1174 # unreadable directory may be left in $TESTTMP; fix permission
1175 # and try again
1175 # and try again
1176 makecleanable(self._testtmp)
1176 makecleanable(self._testtmp)
1177 shutil.rmtree(self._testtmp, True)
1177 shutil.rmtree(self._testtmp, True)
1178 shutil.rmtree(self._threadtmp, True)
1178 shutil.rmtree(self._threadtmp, True)
1179
1179
1180 if self._usechg:
1180 if self._usechg:
1181 # chgservers will stop automatically after they find the socket
1181 # chgservers will stop automatically after they find the socket
1182 # files are deleted
1182 # files are deleted
1183 shutil.rmtree(self._chgsockdir, True)
1183 shutil.rmtree(self._chgsockdir, True)
1184
1184
1185 if (
1185 if (
1186 (self._ret != 0 or self._out != self._refout)
1186 (self._ret != 0 or self._out != self._refout)
1187 and not self._skipped
1187 and not self._skipped
1188 and not self._debug
1188 and not self._debug
1189 and self._out
1189 and self._out
1190 ):
1190 ):
1191 with open(self.errpath, 'wb') as f:
1191 with open(self.errpath, 'wb') as f:
1192 for line in self._out:
1192 for line in self._out:
1193 f.write(line)
1193 f.write(line)
1194
1194
1195 vlog("# Ret was:", self._ret, '(%s)' % self.name)
1195 vlog("# Ret was:", self._ret, '(%s)' % self.name)
1196
1196
1197 def _run(self, env):
1197 def _run(self, env):
1198 # This should be implemented in child classes to run tests.
1198 # This should be implemented in child classes to run tests.
1199 raise unittest.SkipTest('unknown test type')
1199 raise unittest.SkipTest('unknown test type')
1200
1200
1201 def abort(self):
1201 def abort(self):
1202 """Terminate execution of this test."""
1202 """Terminate execution of this test."""
1203 self._aborted = True
1203 self._aborted = True
1204
1204
1205 def _portmap(self, i):
1205 def _portmap(self, i):
1206 offset = b'' if i == 0 else b'%d' % i
1206 offset = b'' if i == 0 else b'%d' % i
1207 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
1207 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
1208
1208
1209 def _getreplacements(self):
1209 def _getreplacements(self):
1210 """Obtain a mapping of text replacements to apply to test output.
1210 """Obtain a mapping of text replacements to apply to test output.
1211
1211
1212 Test output needs to be normalized so it can be compared to expected
1212 Test output needs to be normalized so it can be compared to expected
1213 output. This function defines how some of that normalization will
1213 output. This function defines how some of that normalization will
1214 occur.
1214 occur.
1215 """
1215 """
1216 r = [
1216 r = [
1217 # This list should be parallel to defineport in _getenv
1217 # This list should be parallel to defineport in _getenv
1218 self._portmap(0),
1218 self._portmap(0),
1219 self._portmap(1),
1219 self._portmap(1),
1220 self._portmap(2),
1220 self._portmap(2),
1221 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
1221 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
1222 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
1222 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
1223 ]
1223 ]
1224 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
1224 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
1225
1225
1226 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1226 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1227
1227
1228 if os.path.exists(replacementfile):
1228 if os.path.exists(replacementfile):
1229 data = {}
1229 data = {}
1230 with open(replacementfile, mode='rb') as source:
1230 with open(replacementfile, mode='rb') as source:
1231 # the intermediate 'compile' step help with debugging
1231 # the intermediate 'compile' step help with debugging
1232 code = compile(source.read(), replacementfile, 'exec')
1232 code = compile(source.read(), replacementfile, 'exec')
1233 exec(code, data)
1233 exec(code, data)
1234 for value in data.get('substitutions', ()):
1234 for value in data.get('substitutions', ()):
1235 if len(value) != 2:
1235 if len(value) != 2:
1236 msg = 'malformatted substitution in %s: %r'
1236 msg = 'malformatted substitution in %s: %r'
1237 msg %= (replacementfile, value)
1237 msg %= (replacementfile, value)
1238 raise ValueError(msg)
1238 raise ValueError(msg)
1239 r.append(value)
1239 r.append(value)
1240 return r
1240 return r
1241
1241
1242 def _escapepath(self, p):
1242 def _escapepath(self, p):
1243 if os.name == 'nt':
1243 if os.name == 'nt':
1244 return b''.join(
1244 return b''.join(
1245 c.isalpha()
1245 c.isalpha()
1246 and b'[%s%s]' % (c.lower(), c.upper())
1246 and b'[%s%s]' % (c.lower(), c.upper())
1247 or c in b'/\\'
1247 or c in b'/\\'
1248 and br'[/\\]'
1248 and br'[/\\]'
1249 or c.isdigit()
1249 or c.isdigit()
1250 and c
1250 and c
1251 or b'\\' + c
1251 or b'\\' + c
1252 for c in [p[i : i + 1] for i in range(len(p))]
1252 for c in [p[i : i + 1] for i in range(len(p))]
1253 )
1253 )
1254 else:
1254 else:
1255 return re.escape(p)
1255 return re.escape(p)
1256
1256
1257 def _localip(self):
1257 def _localip(self):
1258 if self._useipv6:
1258 if self._useipv6:
1259 return b'::1'
1259 return b'::1'
1260 else:
1260 else:
1261 return b'127.0.0.1'
1261 return b'127.0.0.1'
1262
1262
1263 def _genrestoreenv(self, testenv):
1263 def _genrestoreenv(self, testenv):
1264 """Generate a script that can be used by tests to restore the original
1264 """Generate a script that can be used by tests to restore the original
1265 environment."""
1265 environment."""
1266 # Put the restoreenv script inside self._threadtmp
1266 # Put the restoreenv script inside self._threadtmp
1267 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1267 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1268 testenv['HGTEST_RESTOREENV'] = _bytes2sys(scriptpath)
1268 testenv['HGTEST_RESTOREENV'] = _bytes2sys(scriptpath)
1269
1269
1270 # Only restore environment variable names that the shell allows
1270 # Only restore environment variable names that the shell allows
1271 # us to export.
1271 # us to export.
1272 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1272 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1273
1273
1274 # Do not restore these variables; otherwise tests would fail.
1274 # Do not restore these variables; otherwise tests would fail.
1275 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1275 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1276
1276
1277 with open(scriptpath, 'w') as envf:
1277 with open(scriptpath, 'w') as envf:
1278 for name, value in origenviron.items():
1278 for name, value in origenviron.items():
1279 if not name_regex.match(name):
1279 if not name_regex.match(name):
1280 # Skip environment variables with unusual names not
1280 # Skip environment variables with unusual names not
1281 # allowed by most shells.
1281 # allowed by most shells.
1282 continue
1282 continue
1283 if name in reqnames:
1283 if name in reqnames:
1284 continue
1284 continue
1285 envf.write('%s=%s\n' % (name, shellquote(value)))
1285 envf.write('%s=%s\n' % (name, shellquote(value)))
1286
1286
1287 for name in testenv:
1287 for name in testenv:
1288 if name in origenviron or name in reqnames:
1288 if name in origenviron or name in reqnames:
1289 continue
1289 continue
1290 envf.write('unset %s\n' % (name,))
1290 envf.write('unset %s\n' % (name,))
1291
1291
1292 def _getenv(self):
1292 def _getenv(self):
1293 """Obtain environment variables to use during test execution."""
1293 """Obtain environment variables to use during test execution."""
1294
1294
1295 def defineport(i):
1295 def defineport(i):
1296 offset = '' if i == 0 else '%s' % i
1296 offset = '' if i == 0 else '%s' % i
1297 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1297 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1298
1298
1299 env = os.environ.copy()
1299 env = os.environ.copy()
1300 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1300 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1301 env['HGEMITWARNINGS'] = '1'
1301 env['HGEMITWARNINGS'] = '1'
1302 env['TESTTMP'] = _bytes2sys(self._testtmp)
1302 env['TESTTMP'] = _bytes2sys(self._testtmp)
1303 env['TESTNAME'] = self.name
1303 env['TESTNAME'] = self.name
1304 env['HOME'] = _bytes2sys(self._testtmp)
1304 env['HOME'] = _bytes2sys(self._testtmp)
1305 # This number should match portneeded in _getport
1305 # This number should match portneeded in _getport
1306 for port in xrange(3):
1306 for port in xrange(3):
1307 # This list should be parallel to _portmap in _getreplacements
1307 # This list should be parallel to _portmap in _getreplacements
1308 defineport(port)
1308 defineport(port)
1309 env["HGRCPATH"] = _bytes2sys(os.path.join(self._threadtmp, b'.hgrc'))
1309 env["HGRCPATH"] = _bytes2sys(os.path.join(self._threadtmp, b'.hgrc'))
1310 env["DAEMON_PIDS"] = _bytes2sys(
1310 env["DAEMON_PIDS"] = _bytes2sys(
1311 os.path.join(self._threadtmp, b'daemon.pids')
1311 os.path.join(self._threadtmp, b'daemon.pids')
1312 )
1312 )
1313 env["HGEDITOR"] = (
1313 env["HGEDITOR"] = (
1314 '"' + sysexecutable + '"' + ' -c "import sys; sys.exit(0)"'
1314 '"' + sysexecutable + '"' + ' -c "import sys; sys.exit(0)"'
1315 )
1315 )
1316 env["HGUSER"] = "test"
1316 env["HGUSER"] = "test"
1317 env["HGENCODING"] = "ascii"
1317 env["HGENCODING"] = "ascii"
1318 env["HGENCODINGMODE"] = "strict"
1318 env["HGENCODINGMODE"] = "strict"
1319 env["HGHOSTNAME"] = "test-hostname"
1319 env["HGHOSTNAME"] = "test-hostname"
1320 env['HGIPV6'] = str(int(self._useipv6))
1320 env['HGIPV6'] = str(int(self._useipv6))
1321 # See contrib/catapipe.py for how to use this functionality.
1321 # See contrib/catapipe.py for how to use this functionality.
1322 if 'HGTESTCATAPULTSERVERPIPE' not in env:
1322 if 'HGTESTCATAPULTSERVERPIPE' not in env:
1323 # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
1323 # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
1324 # non-test one in as a default, otherwise set to devnull
1324 # non-test one in as a default, otherwise set to devnull
1325 env['HGTESTCATAPULTSERVERPIPE'] = env.get(
1325 env['HGTESTCATAPULTSERVERPIPE'] = env.get(
1326 'HGCATAPULTSERVERPIPE', os.devnull
1326 'HGCATAPULTSERVERPIPE', os.devnull
1327 )
1327 )
1328
1328
1329 extraextensions = []
1329 extraextensions = []
1330 for opt in self._extraconfigopts:
1330 for opt in self._extraconfigopts:
1331 section, key = _sys2bytes(opt).split(b'.', 1)
1331 section, key = _sys2bytes(opt).split(b'.', 1)
1332 if section != 'extensions':
1332 if section != 'extensions':
1333 continue
1333 continue
1334 name = key.split(b'=', 1)[0]
1334 name = key.split(b'=', 1)[0]
1335 extraextensions.append(name)
1335 extraextensions.append(name)
1336
1336
1337 if extraextensions:
1337 if extraextensions:
1338 env['HGTESTEXTRAEXTENSIONS'] = b' '.join(extraextensions)
1338 env['HGTESTEXTRAEXTENSIONS'] = b' '.join(extraextensions)
1339
1339
1340 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1340 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1341 # IP addresses.
1341 # IP addresses.
1342 env['LOCALIP'] = _bytes2sys(self._localip())
1342 env['LOCALIP'] = _bytes2sys(self._localip())
1343
1343
1344 # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
1344 # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
1345 # but this is needed for testing python instances like dummyssh,
1345 # but this is needed for testing python instances like dummyssh,
1346 # dummysmtpd.py, and dumbhttp.py.
1346 # dummysmtpd.py, and dumbhttp.py.
1347 if PYTHON3 and os.name == 'nt':
1347 if PYTHON3 and os.name == 'nt':
1348 env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
1348 env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
1349
1349
1350 # Modified HOME in test environment can confuse Rust tools. So set
1350 # Modified HOME in test environment can confuse Rust tools. So set
1351 # CARGO_HOME and RUSTUP_HOME automatically if a Rust toolchain is
1351 # CARGO_HOME and RUSTUP_HOME automatically if a Rust toolchain is
1352 # present and these variables aren't already defined.
1352 # present and these variables aren't already defined.
1353 cargo_home_path = os.path.expanduser('~/.cargo')
1353 cargo_home_path = os.path.expanduser('~/.cargo')
1354 rustup_home_path = os.path.expanduser('~/.rustup')
1354 rustup_home_path = os.path.expanduser('~/.rustup')
1355
1355
1356 if os.path.exists(cargo_home_path) and b'CARGO_HOME' not in osenvironb:
1356 if os.path.exists(cargo_home_path) and b'CARGO_HOME' not in osenvironb:
1357 env['CARGO_HOME'] = cargo_home_path
1357 env['CARGO_HOME'] = cargo_home_path
1358 if (
1358 if (
1359 os.path.exists(rustup_home_path)
1359 os.path.exists(rustup_home_path)
1360 and b'RUSTUP_HOME' not in osenvironb
1360 and b'RUSTUP_HOME' not in osenvironb
1361 ):
1361 ):
1362 env['RUSTUP_HOME'] = rustup_home_path
1362 env['RUSTUP_HOME'] = rustup_home_path
1363
1363
1364 # Reset some environment variables to well-known values so that
1364 # Reset some environment variables to well-known values so that
1365 # the tests produce repeatable output.
1365 # the tests produce repeatable output.
1366 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1366 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1367 env['TZ'] = 'GMT'
1367 env['TZ'] = 'GMT'
1368 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1368 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1369 env['COLUMNS'] = '80'
1369 env['COLUMNS'] = '80'
1370 env['TERM'] = 'xterm'
1370 env['TERM'] = 'xterm'
1371
1371
1372 dropped = [
1372 dropped = [
1373 'CDPATH',
1373 'CDPATH',
1374 'CHGDEBUG',
1374 'CHGDEBUG',
1375 'EDITOR',
1375 'EDITOR',
1376 'GREP_OPTIONS',
1376 'GREP_OPTIONS',
1377 'HG',
1377 'HG',
1378 'HGMERGE',
1378 'HGMERGE',
1379 'HGPLAIN',
1379 'HGPLAIN',
1380 'HGPLAINEXCEPT',
1380 'HGPLAINEXCEPT',
1381 'HGPROF',
1381 'HGPROF',
1382 'http_proxy',
1382 'http_proxy',
1383 'no_proxy',
1383 'no_proxy',
1384 'NO_PROXY',
1384 'NO_PROXY',
1385 'PAGER',
1385 'PAGER',
1386 'VISUAL',
1386 'VISUAL',
1387 ]
1387 ]
1388
1388
1389 for k in dropped:
1389 for k in dropped:
1390 if k in env:
1390 if k in env:
1391 del env[k]
1391 del env[k]
1392
1392
1393 # unset env related to hooks
1393 # unset env related to hooks
1394 for k in list(env):
1394 for k in list(env):
1395 if k.startswith('HG_'):
1395 if k.startswith('HG_'):
1396 del env[k]
1396 del env[k]
1397
1397
1398 if self._usechg:
1398 if self._usechg:
1399 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1399 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1400
1400
1401 return env
1401 return env
1402
1402
1403 def _createhgrc(self, path):
1403 def _createhgrc(self, path):
1404 """Create an hgrc file for this test."""
1404 """Create an hgrc file for this test."""
1405 with open(path, 'wb') as hgrc:
1405 with open(path, 'wb') as hgrc:
1406 hgrc.write(b'[ui]\n')
1406 hgrc.write(b'[ui]\n')
1407 hgrc.write(b'slash = True\n')
1407 hgrc.write(b'slash = True\n')
1408 hgrc.write(b'interactive = False\n')
1408 hgrc.write(b'interactive = False\n')
1409 hgrc.write(b'merge = internal:merge\n')
1409 hgrc.write(b'merge = internal:merge\n')
1410 hgrc.write(b'mergemarkers = detailed\n')
1410 hgrc.write(b'mergemarkers = detailed\n')
1411 hgrc.write(b'promptecho = True\n')
1411 hgrc.write(b'promptecho = True\n')
1412 hgrc.write(b'[defaults]\n')
1412 hgrc.write(b'[defaults]\n')
1413 hgrc.write(b'[devel]\n')
1413 hgrc.write(b'[devel]\n')
1414 hgrc.write(b'all-warnings = true\n')
1414 hgrc.write(b'all-warnings = true\n')
1415 hgrc.write(b'default-date = 0 0\n')
1415 hgrc.write(b'default-date = 0 0\n')
1416 hgrc.write(b'[largefiles]\n')
1416 hgrc.write(b'[largefiles]\n')
1417 hgrc.write(
1417 hgrc.write(
1418 b'usercache = %s\n'
1418 b'usercache = %s\n'
1419 % (os.path.join(self._testtmp, b'.cache/largefiles'))
1419 % (os.path.join(self._testtmp, b'.cache/largefiles'))
1420 )
1420 )
1421 hgrc.write(b'[lfs]\n')
1421 hgrc.write(b'[lfs]\n')
1422 hgrc.write(
1422 hgrc.write(
1423 b'usercache = %s\n'
1423 b'usercache = %s\n'
1424 % (os.path.join(self._testtmp, b'.cache/lfs'))
1424 % (os.path.join(self._testtmp, b'.cache/lfs'))
1425 )
1425 )
1426 hgrc.write(b'[web]\n')
1426 hgrc.write(b'[web]\n')
1427 hgrc.write(b'address = localhost\n')
1427 hgrc.write(b'address = localhost\n')
1428 hgrc.write(b'ipv6 = %r\n' % self._useipv6)
1428 hgrc.write(b'ipv6 = %r\n' % self._useipv6)
1429 hgrc.write(b'server-header = testing stub value\n')
1429 hgrc.write(b'server-header = testing stub value\n')
1430
1430
1431 for opt in self._extraconfigopts:
1431 for opt in self._extraconfigopts:
1432 section, key = _sys2bytes(opt).split(b'.', 1)
1432 section, key = _sys2bytes(opt).split(b'.', 1)
1433 assert b'=' in key, (
1433 assert b'=' in key, (
1434 'extra config opt %s must ' 'have an = for assignment' % opt
1434 'extra config opt %s must ' 'have an = for assignment' % opt
1435 )
1435 )
1436 hgrc.write(b'[%s]\n%s\n' % (section, key))
1436 hgrc.write(b'[%s]\n%s\n' % (section, key))
1437
1437
1438 def fail(self, msg):
1438 def fail(self, msg):
1439 # unittest differentiates between errored and failed.
1439 # unittest differentiates between errored and failed.
1440 # Failed is denoted by AssertionError (by default at least).
1440 # Failed is denoted by AssertionError (by default at least).
1441 raise AssertionError(msg)
1441 raise AssertionError(msg)
1442
1442
1443 def _runcommand(self, cmd, env, normalizenewlines=False):
1443 def _runcommand(self, cmd, env, normalizenewlines=False):
1444 """Run command in a sub-process, capturing the output (stdout and
1444 """Run command in a sub-process, capturing the output (stdout and
1445 stderr).
1445 stderr).
1446
1446
1447 Return a tuple (exitcode, output). output is None in debug mode.
1447 Return a tuple (exitcode, output). output is None in debug mode.
1448 """
1448 """
1449 if self._debug:
1449 if self._debug:
1450 proc = subprocess.Popen(
1450 proc = subprocess.Popen(
1451 _bytes2sys(cmd),
1451 _bytes2sys(cmd),
1452 shell=True,
1452 shell=True,
1453 cwd=_bytes2sys(self._testtmp),
1453 cwd=_bytes2sys(self._testtmp),
1454 env=env,
1454 env=env,
1455 )
1455 )
1456 ret = proc.wait()
1456 ret = proc.wait()
1457 return (ret, None)
1457 return (ret, None)
1458
1458
1459 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1459 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1460
1460
1461 def cleanup():
1461 def cleanup():
1462 terminate(proc)
1462 terminate(proc)
1463 ret = proc.wait()
1463 ret = proc.wait()
1464 if ret == 0:
1464 if ret == 0:
1465 ret = signal.SIGTERM << 8
1465 ret = signal.SIGTERM << 8
1466 killdaemons(env['DAEMON_PIDS'])
1466 killdaemons(env['DAEMON_PIDS'])
1467 return ret
1467 return ret
1468
1468
1469 proc.tochild.close()
1469 proc.tochild.close()
1470
1470
1471 try:
1471 try:
1472 output = proc.fromchild.read()
1472 output = proc.fromchild.read()
1473 except KeyboardInterrupt:
1473 except KeyboardInterrupt:
1474 vlog('# Handling keyboard interrupt')
1474 vlog('# Handling keyboard interrupt')
1475 cleanup()
1475 cleanup()
1476 raise
1476 raise
1477
1477
1478 ret = proc.wait()
1478 ret = proc.wait()
1479 if wifexited(ret):
1479 if wifexited(ret):
1480 ret = os.WEXITSTATUS(ret)
1480 ret = os.WEXITSTATUS(ret)
1481
1481
1482 if proc.timeout:
1482 if proc.timeout:
1483 ret = 'timeout'
1483 ret = 'timeout'
1484
1484
1485 if ret:
1485 if ret:
1486 killdaemons(env['DAEMON_PIDS'])
1486 killdaemons(env['DAEMON_PIDS'])
1487
1487
1488 for s, r in self._getreplacements():
1488 for s, r in self._getreplacements():
1489 output = re.sub(s, r, output)
1489 output = re.sub(s, r, output)
1490
1490
1491 if normalizenewlines:
1491 if normalizenewlines:
1492 output = output.replace(b'\r\n', b'\n')
1492 output = output.replace(b'\r\n', b'\n')
1493
1493
1494 return ret, output.splitlines(True)
1494 return ret, output.splitlines(True)
1495
1495
1496
1496
1497 class PythonTest(Test):
1497 class PythonTest(Test):
1498 """A Python-based test."""
1498 """A Python-based test."""
1499
1499
1500 @property
1500 @property
1501 def refpath(self):
1501 def refpath(self):
1502 return os.path.join(self._testdir, b'%s.out' % self.bname)
1502 return os.path.join(self._testdir, b'%s.out' % self.bname)
1503
1503
1504 def _run(self, env):
1504 def _run(self, env):
1505 # Quote the python(3) executable for Windows
1505 # Quote the python(3) executable for Windows
1506 cmd = b'"%s" "%s"' % (PYTHON, self.path)
1506 cmd = b'"%s" "%s"' % (PYTHON, self.path)
1507 vlog("# Running", cmd.decode("utf-8"))
1507 vlog("# Running", cmd.decode("utf-8"))
1508 normalizenewlines = os.name == 'nt'
1508 normalizenewlines = os.name == 'nt'
1509 result = self._runcommand(cmd, env, normalizenewlines=normalizenewlines)
1509 result = self._runcommand(cmd, env, normalizenewlines=normalizenewlines)
1510 if self._aborted:
1510 if self._aborted:
1511 raise KeyboardInterrupt()
1511 raise KeyboardInterrupt()
1512
1512
1513 return result
1513 return result
1514
1514
1515
1515
1516 # Some glob patterns apply only in some circumstances, so the script
1516 # Some glob patterns apply only in some circumstances, so the script
1517 # might want to remove (glob) annotations that otherwise should be
1517 # might want to remove (glob) annotations that otherwise should be
1518 # retained.
1518 # retained.
1519 checkcodeglobpats = [
1519 checkcodeglobpats = [
1520 # On Windows it looks like \ doesn't require a (glob), but we know
1520 # On Windows it looks like \ doesn't require a (glob), but we know
1521 # better.
1521 # better.
1522 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1522 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1523 re.compile(br'^moving \S+/.*[^)]$'),
1523 re.compile(br'^moving \S+/.*[^)]$'),
1524 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1524 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1525 # Not all platforms have 127.0.0.1 as loopback (though most do),
1525 # Not all platforms have 127.0.0.1 as loopback (though most do),
1526 # so we always glob that too.
1526 # so we always glob that too.
1527 re.compile(br'.*\$LOCALIP.*$'),
1527 re.compile(br'.*\$LOCALIP.*$'),
1528 ]
1528 ]
1529
1529
1530 bchr = chr
1530 bchr = chr
1531 if PYTHON3:
1531 if PYTHON3:
1532 bchr = lambda x: bytes([x])
1532 bchr = lambda x: bytes([x])
1533
1533
1534 WARN_UNDEFINED = 1
1534 WARN_UNDEFINED = 1
1535 WARN_YES = 2
1535 WARN_YES = 2
1536 WARN_NO = 3
1536 WARN_NO = 3
1537
1537
1538 MARK_OPTIONAL = b" (?)\n"
1538 MARK_OPTIONAL = b" (?)\n"
1539
1539
1540
1540
1541 def isoptional(line):
1541 def isoptional(line):
1542 return line.endswith(MARK_OPTIONAL)
1542 return line.endswith(MARK_OPTIONAL)
1543
1543
1544
1544
1545 class TTest(Test):
1545 class TTest(Test):
1546 """A "t test" is a test backed by a .t file."""
1546 """A "t test" is a test backed by a .t file."""
1547
1547
1548 SKIPPED_PREFIX = b'skipped: '
1548 SKIPPED_PREFIX = b'skipped: '
1549 FAILED_PREFIX = b'hghave check failed: '
1549 FAILED_PREFIX = b'hghave check failed: '
1550 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1550 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1551
1551
1552 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1552 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1553 ESCAPEMAP = {bchr(i): br'\x%02x' % i for i in range(256)}
1553 ESCAPEMAP = {bchr(i): br'\x%02x' % i for i in range(256)}
1554 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1554 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1555
1555
1556 def __init__(self, path, *args, **kwds):
1556 def __init__(self, path, *args, **kwds):
1557 # accept an extra "case" parameter
1557 # accept an extra "case" parameter
1558 case = kwds.pop('case', [])
1558 case = kwds.pop('case', [])
1559 self._case = case
1559 self._case = case
1560 self._allcases = {x for y in parsettestcases(path) for x in y}
1560 self._allcases = {x for y in parsettestcases(path) for x in y}
1561 super(TTest, self).__init__(path, *args, **kwds)
1561 super(TTest, self).__init__(path, *args, **kwds)
1562 if case:
1562 if case:
1563 casepath = b'#'.join(case)
1563 casepath = b'#'.join(case)
1564 self.name = '%s#%s' % (self.name, _bytes2sys(casepath))
1564 self.name = '%s#%s' % (self.name, _bytes2sys(casepath))
1565 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1565 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1566 self._tmpname += b'-%s' % casepath
1566 self._tmpname += b'-%s' % casepath
1567 self._have = {}
1567 self._have = {}
1568
1568
1569 @property
1569 @property
1570 def refpath(self):
1570 def refpath(self):
1571 return os.path.join(self._testdir, self.bname)
1571 return os.path.join(self._testdir, self.bname)
1572
1572
1573 def _run(self, env):
1573 def _run(self, env):
1574 with open(self.path, 'rb') as f:
1574 with open(self.path, 'rb') as f:
1575 lines = f.readlines()
1575 lines = f.readlines()
1576
1576
1577 # .t file is both reference output and the test input, keep reference
1577 # .t file is both reference output and the test input, keep reference
1578 # output updated with the the test input. This avoids some race
1578 # output updated with the the test input. This avoids some race
1579 # conditions where the reference output does not match the actual test.
1579 # conditions where the reference output does not match the actual test.
1580 if self._refout is not None:
1580 if self._refout is not None:
1581 self._refout = lines
1581 self._refout = lines
1582
1582
1583 salt, script, after, expected = self._parsetest(lines)
1583 salt, script, after, expected = self._parsetest(lines)
1584
1584
1585 # Write out the generated script.
1585 # Write out the generated script.
1586 fname = b'%s.sh' % self._testtmp
1586 fname = b'%s.sh' % self._testtmp
1587 with open(fname, 'wb') as f:
1587 with open(fname, 'wb') as f:
1588 for l in script:
1588 for l in script:
1589 f.write(l)
1589 f.write(l)
1590
1590
1591 cmd = b'%s "%s"' % (self._shell, fname)
1591 cmd = b'%s "%s"' % (self._shell, fname)
1592 vlog("# Running", cmd.decode("utf-8"))
1592 vlog("# Running", cmd.decode("utf-8"))
1593
1593
1594 exitcode, output = self._runcommand(cmd, env)
1594 exitcode, output = self._runcommand(cmd, env)
1595
1595
1596 if self._aborted:
1596 if self._aborted:
1597 raise KeyboardInterrupt()
1597 raise KeyboardInterrupt()
1598
1598
1599 # Do not merge output if skipped. Return hghave message instead.
1599 # Do not merge output if skipped. Return hghave message instead.
1600 # Similarly, with --debug, output is None.
1600 # Similarly, with --debug, output is None.
1601 if exitcode == self.SKIPPED_STATUS or output is None:
1601 if exitcode == self.SKIPPED_STATUS or output is None:
1602 return exitcode, output
1602 return exitcode, output
1603
1603
1604 return self._processoutput(exitcode, output, salt, after, expected)
1604 return self._processoutput(exitcode, output, salt, after, expected)
1605
1605
1606 def _hghave(self, reqs):
1606 def _hghave(self, reqs):
1607 allreqs = b' '.join(reqs)
1607 allreqs = b' '.join(reqs)
1608
1608
1609 self._detectslow(reqs)
1609 self._detectslow(reqs)
1610
1610
1611 if allreqs in self._have:
1611 if allreqs in self._have:
1612 return self._have.get(allreqs)
1612 return self._have.get(allreqs)
1613
1613
1614 # TODO do something smarter when all other uses of hghave are gone.
1614 # TODO do something smarter when all other uses of hghave are gone.
1615 runtestdir = os.path.abspath(os.path.dirname(_sys2bytes(__file__)))
1615 runtestdir = os.path.abspath(os.path.dirname(_sys2bytes(__file__)))
1616 tdir = runtestdir.replace(b'\\', b'/')
1616 tdir = runtestdir.replace(b'\\', b'/')
1617 proc = Popen4(
1617 proc = Popen4(
1618 b'%s -c "%s/hghave %s"' % (self._shell, tdir, allreqs),
1618 b'%s -c "%s/hghave %s"' % (self._shell, tdir, allreqs),
1619 self._testtmp,
1619 self._testtmp,
1620 0,
1620 0,
1621 self._getenv(),
1621 self._getenv(),
1622 )
1622 )
1623 stdout, stderr = proc.communicate()
1623 stdout, stderr = proc.communicate()
1624 ret = proc.wait()
1624 ret = proc.wait()
1625 if wifexited(ret):
1625 if wifexited(ret):
1626 ret = os.WEXITSTATUS(ret)
1626 ret = os.WEXITSTATUS(ret)
1627 if ret == 2:
1627 if ret == 2:
1628 print(stdout.decode('utf-8'))
1628 print(stdout.decode('utf-8'))
1629 sys.exit(1)
1629 sys.exit(1)
1630
1630
1631 if ret != 0:
1631 if ret != 0:
1632 self._have[allreqs] = (False, stdout)
1632 self._have[allreqs] = (False, stdout)
1633 return False, stdout
1633 return False, stdout
1634
1634
1635 self._have[allreqs] = (True, None)
1635 self._have[allreqs] = (True, None)
1636 return True, None
1636 return True, None
1637
1637
1638 def _detectslow(self, reqs):
1638 def _detectslow(self, reqs):
1639 """update the timeout of slow test when appropriate"""
1639 """update the timeout of slow test when appropriate"""
1640 if b'slow' in reqs:
1640 if b'slow' in reqs:
1641 self._timeout = self._slowtimeout
1641 self._timeout = self._slowtimeout
1642
1642
1643 def _iftest(self, args):
1643 def _iftest(self, args):
1644 # implements "#if"
1644 # implements "#if"
1645 reqs = []
1645 reqs = []
1646 for arg in args:
1646 for arg in args:
1647 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1647 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1648 if arg[3:] in self._case:
1648 if arg[3:] in self._case:
1649 return False
1649 return False
1650 elif arg in self._allcases:
1650 elif arg in self._allcases:
1651 if arg not in self._case:
1651 if arg not in self._case:
1652 return False
1652 return False
1653 else:
1653 else:
1654 reqs.append(arg)
1654 reqs.append(arg)
1655 self._detectslow(reqs)
1655 self._detectslow(reqs)
1656 return self._hghave(reqs)[0]
1656 return self._hghave(reqs)[0]
1657
1657
1658 def _parsetest(self, lines):
1658 def _parsetest(self, lines):
1659 # We generate a shell script which outputs unique markers to line
1659 # We generate a shell script which outputs unique markers to line
1660 # up script results with our source. These markers include input
1660 # up script results with our source. These markers include input
1661 # line number and the last return code.
1661 # line number and the last return code.
1662 salt = b"SALT%d" % time.time()
1662 salt = b"SALT%d" % time.time()
1663
1663
1664 def addsalt(line, inpython):
1664 def addsalt(line, inpython):
1665 if inpython:
1665 if inpython:
1666 script.append(b'%s %d 0\n' % (salt, line))
1666 script.append(b'%s %d 0\n' % (salt, line))
1667 else:
1667 else:
1668 script.append(b'echo %s %d $?\n' % (salt, line))
1668 script.append(b'echo %s %d $?\n' % (salt, line))
1669
1669
1670 activetrace = []
1670 activetrace = []
1671 session = str(uuid.uuid4())
1671 session = str(uuid.uuid4())
1672 if PYTHON3:
1672 if PYTHON3:
1673 session = session.encode('ascii')
1673 session = session.encode('ascii')
1674 hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE') or os.getenv(
1674 hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE') or os.getenv(
1675 'HGCATAPULTSERVERPIPE'
1675 'HGCATAPULTSERVERPIPE'
1676 )
1676 )
1677
1677
1678 def toggletrace(cmd=None):
1678 def toggletrace(cmd=None):
1679 if not hgcatapult or hgcatapult == os.devnull:
1679 if not hgcatapult or hgcatapult == os.devnull:
1680 return
1680 return
1681
1681
1682 if activetrace:
1682 if activetrace:
1683 script.append(
1683 script.append(
1684 b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1684 b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1685 % (session, activetrace[0])
1685 % (session, activetrace[0])
1686 )
1686 )
1687 if cmd is None:
1687 if cmd is None:
1688 return
1688 return
1689
1689
1690 if isinstance(cmd, str):
1690 if isinstance(cmd, str):
1691 quoted = shellquote(cmd.strip())
1691 quoted = shellquote(cmd.strip())
1692 else:
1692 else:
1693 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1693 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1694 quoted = quoted.replace(b'\\', b'\\\\')
1694 quoted = quoted.replace(b'\\', b'\\\\')
1695 script.append(
1695 script.append(
1696 b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1696 b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1697 % (session, quoted)
1697 % (session, quoted)
1698 )
1698 )
1699 activetrace[0:] = [quoted]
1699 activetrace[0:] = [quoted]
1700
1700
1701 script = []
1701 script = []
1702
1702
1703 # After we run the shell script, we re-unify the script output
1703 # After we run the shell script, we re-unify the script output
1704 # with non-active parts of the source, with synchronization by our
1704 # with non-active parts of the source, with synchronization by our
1705 # SALT line number markers. The after table contains the non-active
1705 # SALT line number markers. The after table contains the non-active
1706 # components, ordered by line number.
1706 # components, ordered by line number.
1707 after = {}
1707 after = {}
1708
1708
1709 # Expected shell script output.
1709 # Expected shell script output.
1710 expected = {}
1710 expected = {}
1711
1711
1712 pos = prepos = -1
1712 pos = prepos = -1
1713
1713
1714 # True or False when in a true or false conditional section
1714 # True or False when in a true or false conditional section
1715 skipping = None
1715 skipping = None
1716
1716
1717 # We keep track of whether or not we're in a Python block so we
1717 # We keep track of whether or not we're in a Python block so we
1718 # can generate the surrounding doctest magic.
1718 # can generate the surrounding doctest magic.
1719 inpython = False
1719 inpython = False
1720
1720
1721 if self._debug:
1721 if self._debug:
1722 script.append(b'set -x\n')
1722 script.append(b'set -x\n')
1723 if self._hgcommand != b'hg':
1723 if self._hgcommand != b'hg':
1724 script.append(b'alias hg="%s"\n' % self._hgcommand)
1724 script.append(b'alias hg="%s"\n' % self._hgcommand)
1725 if os.getenv('MSYSTEM'):
1725 if os.getenv('MSYSTEM'):
1726 script.append(b'alias pwd="pwd -W"\n')
1726 script.append(b'alias pwd="pwd -W"\n')
1727
1727
1728 if hgcatapult and hgcatapult != os.devnull:
1728 if hgcatapult and hgcatapult != os.devnull:
1729 if PYTHON3:
1729 if PYTHON3:
1730 hgcatapult = hgcatapult.encode('utf8')
1730 hgcatapult = hgcatapult.encode('utf8')
1731 cataname = self.name.encode('utf8')
1731 cataname = self.name.encode('utf8')
1732 else:
1732 else:
1733 cataname = self.name
1733 cataname = self.name
1734
1734
1735 # Kludge: use a while loop to keep the pipe from getting
1735 # Kludge: use a while loop to keep the pipe from getting
1736 # closed by our echo commands. The still-running file gets
1736 # closed by our echo commands. The still-running file gets
1737 # reaped at the end of the script, which causes the while
1737 # reaped at the end of the script, which causes the while
1738 # loop to exit and closes the pipe. Sigh.
1738 # loop to exit and closes the pipe. Sigh.
1739 script.append(
1739 script.append(
1740 b'rtendtracing() {\n'
1740 b'rtendtracing() {\n'
1741 b' echo END %(session)s %(name)s >> %(catapult)s\n'
1741 b' echo END %(session)s %(name)s >> %(catapult)s\n'
1742 b' rm -f "$TESTTMP/.still-running"\n'
1742 b' rm -f "$TESTTMP/.still-running"\n'
1743 b'}\n'
1743 b'}\n'
1744 b'trap "rtendtracing" 0\n'
1744 b'trap "rtendtracing" 0\n'
1745 b'touch "$TESTTMP/.still-running"\n'
1745 b'touch "$TESTTMP/.still-running"\n'
1746 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1746 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1747 b'> %(catapult)s &\n'
1747 b'> %(catapult)s &\n'
1748 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1748 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1749 b'echo START %(session)s %(name)s >> %(catapult)s\n'
1749 b'echo START %(session)s %(name)s >> %(catapult)s\n'
1750 % {
1750 % {
1751 b'name': cataname,
1751 b'name': cataname,
1752 b'session': session,
1752 b'session': session,
1753 b'catapult': hgcatapult,
1753 b'catapult': hgcatapult,
1754 }
1754 }
1755 )
1755 )
1756
1756
1757 if self._case:
1757 if self._case:
1758 casestr = b'#'.join(self._case)
1758 casestr = b'#'.join(self._case)
1759 if isinstance(self._case, str):
1759 if isinstance(casestr, str):
1760 quoted = shellquote(casestr)
1760 quoted = shellquote(casestr)
1761 else:
1761 else:
1762 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1762 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1763 script.append(b'TESTCASE=%s\n' % quoted)
1763 script.append(b'TESTCASE=%s\n' % quoted)
1764 script.append(b'export TESTCASE\n')
1764 script.append(b'export TESTCASE\n')
1765
1765
1766 n = 0
1766 n = 0
1767 for n, l in enumerate(lines):
1767 for n, l in enumerate(lines):
1768 if not l.endswith(b'\n'):
1768 if not l.endswith(b'\n'):
1769 l += b'\n'
1769 l += b'\n'
1770 if l.startswith(b'#require'):
1770 if l.startswith(b'#require'):
1771 lsplit = l.split()
1771 lsplit = l.split()
1772 if len(lsplit) < 2 or lsplit[0] != b'#require':
1772 if len(lsplit) < 2 or lsplit[0] != b'#require':
1773 after.setdefault(pos, []).append(
1773 after.setdefault(pos, []).append(
1774 b' !!! invalid #require\n'
1774 b' !!! invalid #require\n'
1775 )
1775 )
1776 if not skipping:
1776 if not skipping:
1777 haveresult, message = self._hghave(lsplit[1:])
1777 haveresult, message = self._hghave(lsplit[1:])
1778 if not haveresult:
1778 if not haveresult:
1779 script = [b'echo "%s"\nexit 80\n' % message]
1779 script = [b'echo "%s"\nexit 80\n' % message]
1780 break
1780 break
1781 after.setdefault(pos, []).append(l)
1781 after.setdefault(pos, []).append(l)
1782 elif l.startswith(b'#if'):
1782 elif l.startswith(b'#if'):
1783 lsplit = l.split()
1783 lsplit = l.split()
1784 if len(lsplit) < 2 or lsplit[0] != b'#if':
1784 if len(lsplit) < 2 or lsplit[0] != b'#if':
1785 after.setdefault(pos, []).append(b' !!! invalid #if\n')
1785 after.setdefault(pos, []).append(b' !!! invalid #if\n')
1786 if skipping is not None:
1786 if skipping is not None:
1787 after.setdefault(pos, []).append(b' !!! nested #if\n')
1787 after.setdefault(pos, []).append(b' !!! nested #if\n')
1788 skipping = not self._iftest(lsplit[1:])
1788 skipping = not self._iftest(lsplit[1:])
1789 after.setdefault(pos, []).append(l)
1789 after.setdefault(pos, []).append(l)
1790 elif l.startswith(b'#else'):
1790 elif l.startswith(b'#else'):
1791 if skipping is None:
1791 if skipping is None:
1792 after.setdefault(pos, []).append(b' !!! missing #if\n')
1792 after.setdefault(pos, []).append(b' !!! missing #if\n')
1793 skipping = not skipping
1793 skipping = not skipping
1794 after.setdefault(pos, []).append(l)
1794 after.setdefault(pos, []).append(l)
1795 elif l.startswith(b'#endif'):
1795 elif l.startswith(b'#endif'):
1796 if skipping is None:
1796 if skipping is None:
1797 after.setdefault(pos, []).append(b' !!! missing #if\n')
1797 after.setdefault(pos, []).append(b' !!! missing #if\n')
1798 skipping = None
1798 skipping = None
1799 after.setdefault(pos, []).append(l)
1799 after.setdefault(pos, []).append(l)
1800 elif skipping:
1800 elif skipping:
1801 after.setdefault(pos, []).append(l)
1801 after.setdefault(pos, []).append(l)
1802 elif l.startswith(b' >>> '): # python inlines
1802 elif l.startswith(b' >>> '): # python inlines
1803 after.setdefault(pos, []).append(l)
1803 after.setdefault(pos, []).append(l)
1804 prepos = pos
1804 prepos = pos
1805 pos = n
1805 pos = n
1806 if not inpython:
1806 if not inpython:
1807 # We've just entered a Python block. Add the header.
1807 # We've just entered a Python block. Add the header.
1808 inpython = True
1808 inpython = True
1809 addsalt(prepos, False) # Make sure we report the exit code.
1809 addsalt(prepos, False) # Make sure we report the exit code.
1810 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1810 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1811 addsalt(n, True)
1811 addsalt(n, True)
1812 script.append(l[2:])
1812 script.append(l[2:])
1813 elif l.startswith(b' ... '): # python inlines
1813 elif l.startswith(b' ... '): # python inlines
1814 after.setdefault(prepos, []).append(l)
1814 after.setdefault(prepos, []).append(l)
1815 script.append(l[2:])
1815 script.append(l[2:])
1816 elif l.startswith(b' $ '): # commands
1816 elif l.startswith(b' $ '): # commands
1817 if inpython:
1817 if inpython:
1818 script.append(b'EOF\n')
1818 script.append(b'EOF\n')
1819 inpython = False
1819 inpython = False
1820 after.setdefault(pos, []).append(l)
1820 after.setdefault(pos, []).append(l)
1821 prepos = pos
1821 prepos = pos
1822 pos = n
1822 pos = n
1823 addsalt(n, False)
1823 addsalt(n, False)
1824 rawcmd = l[4:]
1824 rawcmd = l[4:]
1825 cmd = rawcmd.split()
1825 cmd = rawcmd.split()
1826 toggletrace(rawcmd)
1826 toggletrace(rawcmd)
1827 if len(cmd) == 2 and cmd[0] == b'cd':
1827 if len(cmd) == 2 and cmd[0] == b'cd':
1828 rawcmd = b'cd %s || exit 1\n' % cmd[1]
1828 rawcmd = b'cd %s || exit 1\n' % cmd[1]
1829 script.append(rawcmd)
1829 script.append(rawcmd)
1830 elif l.startswith(b' > '): # continuations
1830 elif l.startswith(b' > '): # continuations
1831 after.setdefault(prepos, []).append(l)
1831 after.setdefault(prepos, []).append(l)
1832 script.append(l[4:])
1832 script.append(l[4:])
1833 elif l.startswith(b' '): # results
1833 elif l.startswith(b' '): # results
1834 # Queue up a list of expected results.
1834 # Queue up a list of expected results.
1835 expected.setdefault(pos, []).append(l[2:])
1835 expected.setdefault(pos, []).append(l[2:])
1836 else:
1836 else:
1837 if inpython:
1837 if inpython:
1838 script.append(b'EOF\n')
1838 script.append(b'EOF\n')
1839 inpython = False
1839 inpython = False
1840 # Non-command/result. Queue up for merged output.
1840 # Non-command/result. Queue up for merged output.
1841 after.setdefault(pos, []).append(l)
1841 after.setdefault(pos, []).append(l)
1842
1842
1843 if inpython:
1843 if inpython:
1844 script.append(b'EOF\n')
1844 script.append(b'EOF\n')
1845 if skipping is not None:
1845 if skipping is not None:
1846 after.setdefault(pos, []).append(b' !!! missing #endif\n')
1846 after.setdefault(pos, []).append(b' !!! missing #endif\n')
1847 addsalt(n + 1, False)
1847 addsalt(n + 1, False)
1848 # Need to end any current per-command trace
1848 # Need to end any current per-command trace
1849 if activetrace:
1849 if activetrace:
1850 toggletrace()
1850 toggletrace()
1851 return salt, script, after, expected
1851 return salt, script, after, expected
1852
1852
1853 def _processoutput(self, exitcode, output, salt, after, expected):
1853 def _processoutput(self, exitcode, output, salt, after, expected):
1854 # Merge the script output back into a unified test.
1854 # Merge the script output back into a unified test.
1855 warnonly = WARN_UNDEFINED # 1: not yet; 2: yes; 3: for sure not
1855 warnonly = WARN_UNDEFINED # 1: not yet; 2: yes; 3: for sure not
1856 if exitcode != 0:
1856 if exitcode != 0:
1857 warnonly = WARN_NO
1857 warnonly = WARN_NO
1858
1858
1859 pos = -1
1859 pos = -1
1860 postout = []
1860 postout = []
1861 for out_rawline in output:
1861 for out_rawline in output:
1862 out_line, cmd_line = out_rawline, None
1862 out_line, cmd_line = out_rawline, None
1863 if salt in out_rawline:
1863 if salt in out_rawline:
1864 out_line, cmd_line = out_rawline.split(salt, 1)
1864 out_line, cmd_line = out_rawline.split(salt, 1)
1865
1865
1866 pos, postout, warnonly = self._process_out_line(
1866 pos, postout, warnonly = self._process_out_line(
1867 out_line, pos, postout, expected, warnonly
1867 out_line, pos, postout, expected, warnonly
1868 )
1868 )
1869 pos, postout = self._process_cmd_line(cmd_line, pos, postout, after)
1869 pos, postout = self._process_cmd_line(cmd_line, pos, postout, after)
1870
1870
1871 if pos in after:
1871 if pos in after:
1872 postout += after.pop(pos)
1872 postout += after.pop(pos)
1873
1873
1874 if warnonly == WARN_YES:
1874 if warnonly == WARN_YES:
1875 exitcode = False # Set exitcode to warned.
1875 exitcode = False # Set exitcode to warned.
1876
1876
1877 return exitcode, postout
1877 return exitcode, postout
1878
1878
1879 def _process_out_line(self, out_line, pos, postout, expected, warnonly):
1879 def _process_out_line(self, out_line, pos, postout, expected, warnonly):
1880 while out_line:
1880 while out_line:
1881 if not out_line.endswith(b'\n'):
1881 if not out_line.endswith(b'\n'):
1882 out_line += b' (no-eol)\n'
1882 out_line += b' (no-eol)\n'
1883
1883
1884 # Find the expected output at the current position.
1884 # Find the expected output at the current position.
1885 els = [None]
1885 els = [None]
1886 if expected.get(pos, None):
1886 if expected.get(pos, None):
1887 els = expected[pos]
1887 els = expected[pos]
1888
1888
1889 optional = []
1889 optional = []
1890 for i, el in enumerate(els):
1890 for i, el in enumerate(els):
1891 r = False
1891 r = False
1892 if el:
1892 if el:
1893 r, exact = self.linematch(el, out_line)
1893 r, exact = self.linematch(el, out_line)
1894 if isinstance(r, str):
1894 if isinstance(r, str):
1895 if r == '-glob':
1895 if r == '-glob':
1896 out_line = ''.join(el.rsplit(' (glob)', 1))
1896 out_line = ''.join(el.rsplit(' (glob)', 1))
1897 r = '' # Warn only this line.
1897 r = '' # Warn only this line.
1898 elif r == "retry":
1898 elif r == "retry":
1899 postout.append(b' ' + el)
1899 postout.append(b' ' + el)
1900 else:
1900 else:
1901 log('\ninfo, unknown linematch result: %r\n' % r)
1901 log('\ninfo, unknown linematch result: %r\n' % r)
1902 r = False
1902 r = False
1903 if r:
1903 if r:
1904 els.pop(i)
1904 els.pop(i)
1905 break
1905 break
1906 if el:
1906 if el:
1907 if isoptional(el):
1907 if isoptional(el):
1908 optional.append(i)
1908 optional.append(i)
1909 else:
1909 else:
1910 m = optline.match(el)
1910 m = optline.match(el)
1911 if m:
1911 if m:
1912 conditions = [c for c in m.group(2).split(b' ')]
1912 conditions = [c for c in m.group(2).split(b' ')]
1913
1913
1914 if not self._iftest(conditions):
1914 if not self._iftest(conditions):
1915 optional.append(i)
1915 optional.append(i)
1916 if exact:
1916 if exact:
1917 # Don't allow line to be matches against a later
1917 # Don't allow line to be matches against a later
1918 # line in the output
1918 # line in the output
1919 els.pop(i)
1919 els.pop(i)
1920 break
1920 break
1921
1921
1922 if r:
1922 if r:
1923 if r == "retry":
1923 if r == "retry":
1924 continue
1924 continue
1925 # clean up any optional leftovers
1925 # clean up any optional leftovers
1926 for i in optional:
1926 for i in optional:
1927 postout.append(b' ' + els[i])
1927 postout.append(b' ' + els[i])
1928 for i in reversed(optional):
1928 for i in reversed(optional):
1929 del els[i]
1929 del els[i]
1930 postout.append(b' ' + el)
1930 postout.append(b' ' + el)
1931 else:
1931 else:
1932 if self.NEEDESCAPE(out_line):
1932 if self.NEEDESCAPE(out_line):
1933 out_line = TTest._stringescape(
1933 out_line = TTest._stringescape(
1934 b'%s (esc)\n' % out_line.rstrip(b'\n')
1934 b'%s (esc)\n' % out_line.rstrip(b'\n')
1935 )
1935 )
1936 postout.append(b' ' + out_line) # Let diff deal with it.
1936 postout.append(b' ' + out_line) # Let diff deal with it.
1937 if r != '': # If line failed.
1937 if r != '': # If line failed.
1938 warnonly = WARN_NO
1938 warnonly = WARN_NO
1939 elif warnonly == WARN_UNDEFINED:
1939 elif warnonly == WARN_UNDEFINED:
1940 warnonly = WARN_YES
1940 warnonly = WARN_YES
1941 break
1941 break
1942 else:
1942 else:
1943 # clean up any optional leftovers
1943 # clean up any optional leftovers
1944 while expected.get(pos, None):
1944 while expected.get(pos, None):
1945 el = expected[pos].pop(0)
1945 el = expected[pos].pop(0)
1946 if el:
1946 if el:
1947 if not isoptional(el):
1947 if not isoptional(el):
1948 m = optline.match(el)
1948 m = optline.match(el)
1949 if m:
1949 if m:
1950 conditions = [c for c in m.group(2).split(b' ')]
1950 conditions = [c for c in m.group(2).split(b' ')]
1951
1951
1952 if self._iftest(conditions):
1952 if self._iftest(conditions):
1953 # Don't append as optional line
1953 # Don't append as optional line
1954 continue
1954 continue
1955 else:
1955 else:
1956 continue
1956 continue
1957 postout.append(b' ' + el)
1957 postout.append(b' ' + el)
1958 return pos, postout, warnonly
1958 return pos, postout, warnonly
1959
1959
1960 def _process_cmd_line(self, cmd_line, pos, postout, after):
1960 def _process_cmd_line(self, cmd_line, pos, postout, after):
1961 """process a "command" part of a line from unified test output"""
1961 """process a "command" part of a line from unified test output"""
1962 if cmd_line:
1962 if cmd_line:
1963 # Add on last return code.
1963 # Add on last return code.
1964 ret = int(cmd_line.split()[1])
1964 ret = int(cmd_line.split()[1])
1965 if ret != 0:
1965 if ret != 0:
1966 postout.append(b' [%d]\n' % ret)
1966 postout.append(b' [%d]\n' % ret)
1967 if pos in after:
1967 if pos in after:
1968 # Merge in non-active test bits.
1968 # Merge in non-active test bits.
1969 postout += after.pop(pos)
1969 postout += after.pop(pos)
1970 pos = int(cmd_line.split()[0])
1970 pos = int(cmd_line.split()[0])
1971 return pos, postout
1971 return pos, postout
1972
1972
1973 @staticmethod
1973 @staticmethod
1974 def rematch(el, l):
1974 def rematch(el, l):
1975 try:
1975 try:
1976 # parse any flags at the beginning of the regex. Only 'i' is
1976 # parse any flags at the beginning of the regex. Only 'i' is
1977 # supported right now, but this should be easy to extend.
1977 # supported right now, but this should be easy to extend.
1978 flags, el = re.match(br'^(\(\?i\))?(.*)', el).groups()[0:2]
1978 flags, el = re.match(br'^(\(\?i\))?(.*)', el).groups()[0:2]
1979 flags = flags or b''
1979 flags = flags or b''
1980 el = flags + b'(?:' + el + b')'
1980 el = flags + b'(?:' + el + b')'
1981 # use \Z to ensure that the regex matches to the end of the string
1981 # use \Z to ensure that the regex matches to the end of the string
1982 if os.name == 'nt':
1982 if os.name == 'nt':
1983 return re.match(el + br'\r?\n\Z', l)
1983 return re.match(el + br'\r?\n\Z', l)
1984 return re.match(el + br'\n\Z', l)
1984 return re.match(el + br'\n\Z', l)
1985 except re.error:
1985 except re.error:
1986 # el is an invalid regex
1986 # el is an invalid regex
1987 return False
1987 return False
1988
1988
1989 @staticmethod
1989 @staticmethod
1990 def globmatch(el, l):
1990 def globmatch(el, l):
1991 # The only supported special characters are * and ? plus / which also
1991 # The only supported special characters are * and ? plus / which also
1992 # matches \ on windows. Escaping of these characters is supported.
1992 # matches \ on windows. Escaping of these characters is supported.
1993 if el + b'\n' == l:
1993 if el + b'\n' == l:
1994 if os.altsep:
1994 if os.altsep:
1995 # matching on "/" is not needed for this line
1995 # matching on "/" is not needed for this line
1996 for pat in checkcodeglobpats:
1996 for pat in checkcodeglobpats:
1997 if pat.match(el):
1997 if pat.match(el):
1998 return True
1998 return True
1999 return b'-glob'
1999 return b'-glob'
2000 return True
2000 return True
2001 el = el.replace(b'$LOCALIP', b'*')
2001 el = el.replace(b'$LOCALIP', b'*')
2002 i, n = 0, len(el)
2002 i, n = 0, len(el)
2003 res = b''
2003 res = b''
2004 while i < n:
2004 while i < n:
2005 c = el[i : i + 1]
2005 c = el[i : i + 1]
2006 i += 1
2006 i += 1
2007 if c == b'\\' and i < n and el[i : i + 1] in b'*?\\/':
2007 if c == b'\\' and i < n and el[i : i + 1] in b'*?\\/':
2008 res += el[i - 1 : i + 1]
2008 res += el[i - 1 : i + 1]
2009 i += 1
2009 i += 1
2010 elif c == b'*':
2010 elif c == b'*':
2011 res += b'.*'
2011 res += b'.*'
2012 elif c == b'?':
2012 elif c == b'?':
2013 res += b'.'
2013 res += b'.'
2014 elif c == b'/' and os.altsep:
2014 elif c == b'/' and os.altsep:
2015 res += b'[/\\\\]'
2015 res += b'[/\\\\]'
2016 else:
2016 else:
2017 res += re.escape(c)
2017 res += re.escape(c)
2018 return TTest.rematch(res, l)
2018 return TTest.rematch(res, l)
2019
2019
2020 def linematch(self, el, l):
2020 def linematch(self, el, l):
2021 if el == l: # perfect match (fast)
2021 if el == l: # perfect match (fast)
2022 return True, True
2022 return True, True
2023 retry = False
2023 retry = False
2024 if isoptional(el):
2024 if isoptional(el):
2025 retry = "retry"
2025 retry = "retry"
2026 el = el[: -len(MARK_OPTIONAL)] + b"\n"
2026 el = el[: -len(MARK_OPTIONAL)] + b"\n"
2027 else:
2027 else:
2028 m = optline.match(el)
2028 m = optline.match(el)
2029 if m:
2029 if m:
2030 conditions = [c for c in m.group(2).split(b' ')]
2030 conditions = [c for c in m.group(2).split(b' ')]
2031
2031
2032 el = m.group(1) + b"\n"
2032 el = m.group(1) + b"\n"
2033 if not self._iftest(conditions):
2033 if not self._iftest(conditions):
2034 # listed feature missing, should not match
2034 # listed feature missing, should not match
2035 return "retry", False
2035 return "retry", False
2036
2036
2037 if el.endswith(b" (esc)\n"):
2037 if el.endswith(b" (esc)\n"):
2038 if PYTHON3:
2038 if PYTHON3:
2039 el = el[:-7].decode('unicode_escape') + '\n'
2039 el = el[:-7].decode('unicode_escape') + '\n'
2040 el = el.encode('utf-8')
2040 el = el.encode('utf-8')
2041 else:
2041 else:
2042 el = el[:-7].decode('string-escape') + '\n'
2042 el = el[:-7].decode('string-escape') + '\n'
2043 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
2043 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
2044 return True, True
2044 return True, True
2045 if el.endswith(b" (re)\n"):
2045 if el.endswith(b" (re)\n"):
2046 return (TTest.rematch(el[:-6], l) or retry), False
2046 return (TTest.rematch(el[:-6], l) or retry), False
2047 if el.endswith(b" (glob)\n"):
2047 if el.endswith(b" (glob)\n"):
2048 # ignore '(glob)' added to l by 'replacements'
2048 # ignore '(glob)' added to l by 'replacements'
2049 if l.endswith(b" (glob)\n"):
2049 if l.endswith(b" (glob)\n"):
2050 l = l[:-8] + b"\n"
2050 l = l[:-8] + b"\n"
2051 return (TTest.globmatch(el[:-8], l) or retry), False
2051 return (TTest.globmatch(el[:-8], l) or retry), False
2052 if os.altsep:
2052 if os.altsep:
2053 _l = l.replace(b'\\', b'/')
2053 _l = l.replace(b'\\', b'/')
2054 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
2054 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
2055 return True, True
2055 return True, True
2056 return retry, True
2056 return retry, True
2057
2057
2058 @staticmethod
2058 @staticmethod
2059 def parsehghaveoutput(lines):
2059 def parsehghaveoutput(lines):
2060 '''Parse hghave log lines.
2060 '''Parse hghave log lines.
2061
2061
2062 Return tuple of lists (missing, failed):
2062 Return tuple of lists (missing, failed):
2063 * the missing/unknown features
2063 * the missing/unknown features
2064 * the features for which existence check failed'''
2064 * the features for which existence check failed'''
2065 missing = []
2065 missing = []
2066 failed = []
2066 failed = []
2067 for line in lines:
2067 for line in lines:
2068 if line.startswith(TTest.SKIPPED_PREFIX):
2068 if line.startswith(TTest.SKIPPED_PREFIX):
2069 line = line.splitlines()[0]
2069 line = line.splitlines()[0]
2070 missing.append(_bytes2sys(line[len(TTest.SKIPPED_PREFIX) :]))
2070 missing.append(_bytes2sys(line[len(TTest.SKIPPED_PREFIX) :]))
2071 elif line.startswith(TTest.FAILED_PREFIX):
2071 elif line.startswith(TTest.FAILED_PREFIX):
2072 line = line.splitlines()[0]
2072 line = line.splitlines()[0]
2073 failed.append(_bytes2sys(line[len(TTest.FAILED_PREFIX) :]))
2073 failed.append(_bytes2sys(line[len(TTest.FAILED_PREFIX) :]))
2074
2074
2075 return missing, failed
2075 return missing, failed
2076
2076
2077 @staticmethod
2077 @staticmethod
2078 def _escapef(m):
2078 def _escapef(m):
2079 return TTest.ESCAPEMAP[m.group(0)]
2079 return TTest.ESCAPEMAP[m.group(0)]
2080
2080
2081 @staticmethod
2081 @staticmethod
2082 def _stringescape(s):
2082 def _stringescape(s):
2083 return TTest.ESCAPESUB(TTest._escapef, s)
2083 return TTest.ESCAPESUB(TTest._escapef, s)
2084
2084
2085
2085
2086 iolock = threading.RLock()
2086 iolock = threading.RLock()
2087 firstlock = threading.RLock()
2087 firstlock = threading.RLock()
2088 firsterror = False
2088 firsterror = False
2089
2089
2090
2090
2091 class TestResult(unittest._TextTestResult):
2091 class TestResult(unittest._TextTestResult):
2092 """Holds results when executing via unittest."""
2092 """Holds results when executing via unittest."""
2093
2093
2094 # Don't worry too much about accessing the non-public _TextTestResult.
2094 # Don't worry too much about accessing the non-public _TextTestResult.
2095 # It is relatively common in Python testing tools.
2095 # It is relatively common in Python testing tools.
2096 def __init__(self, options, *args, **kwargs):
2096 def __init__(self, options, *args, **kwargs):
2097 super(TestResult, self).__init__(*args, **kwargs)
2097 super(TestResult, self).__init__(*args, **kwargs)
2098
2098
2099 self._options = options
2099 self._options = options
2100
2100
2101 # unittest.TestResult didn't have skipped until 2.7. We need to
2101 # unittest.TestResult didn't have skipped until 2.7. We need to
2102 # polyfill it.
2102 # polyfill it.
2103 self.skipped = []
2103 self.skipped = []
2104
2104
2105 # We have a custom "ignored" result that isn't present in any Python
2105 # We have a custom "ignored" result that isn't present in any Python
2106 # unittest implementation. It is very similar to skipped. It may make
2106 # unittest implementation. It is very similar to skipped. It may make
2107 # sense to map it into skip some day.
2107 # sense to map it into skip some day.
2108 self.ignored = []
2108 self.ignored = []
2109
2109
2110 self.times = []
2110 self.times = []
2111 self._firststarttime = None
2111 self._firststarttime = None
2112 # Data stored for the benefit of generating xunit reports.
2112 # Data stored for the benefit of generating xunit reports.
2113 self.successes = []
2113 self.successes = []
2114 self.faildata = {}
2114 self.faildata = {}
2115
2115
2116 if options.color == 'auto':
2116 if options.color == 'auto':
2117 self.color = pygmentspresent and self.stream.isatty()
2117 self.color = pygmentspresent and self.stream.isatty()
2118 elif options.color == 'never':
2118 elif options.color == 'never':
2119 self.color = False
2119 self.color = False
2120 else: # 'always', for testing purposes
2120 else: # 'always', for testing purposes
2121 self.color = pygmentspresent
2121 self.color = pygmentspresent
2122
2122
2123 def onStart(self, test):
2123 def onStart(self, test):
2124 """ Can be overriden by custom TestResult
2124 """ Can be overriden by custom TestResult
2125 """
2125 """
2126
2126
2127 def onEnd(self):
2127 def onEnd(self):
2128 """ Can be overriden by custom TestResult
2128 """ Can be overriden by custom TestResult
2129 """
2129 """
2130
2130
2131 def addFailure(self, test, reason):
2131 def addFailure(self, test, reason):
2132 self.failures.append((test, reason))
2132 self.failures.append((test, reason))
2133
2133
2134 if self._options.first:
2134 if self._options.first:
2135 self.stop()
2135 self.stop()
2136 else:
2136 else:
2137 with iolock:
2137 with iolock:
2138 if reason == "timed out":
2138 if reason == "timed out":
2139 self.stream.write('t')
2139 self.stream.write('t')
2140 else:
2140 else:
2141 if not self._options.nodiff:
2141 if not self._options.nodiff:
2142 self.stream.write('\n')
2142 self.stream.write('\n')
2143 # Exclude the '\n' from highlighting to lex correctly
2143 # Exclude the '\n' from highlighting to lex correctly
2144 formatted = 'ERROR: %s output changed\n' % test
2144 formatted = 'ERROR: %s output changed\n' % test
2145 self.stream.write(highlightmsg(formatted, self.color))
2145 self.stream.write(highlightmsg(formatted, self.color))
2146 self.stream.write('!')
2146 self.stream.write('!')
2147
2147
2148 self.stream.flush()
2148 self.stream.flush()
2149
2149
2150 def addSuccess(self, test):
2150 def addSuccess(self, test):
2151 with iolock:
2151 with iolock:
2152 super(TestResult, self).addSuccess(test)
2152 super(TestResult, self).addSuccess(test)
2153 self.successes.append(test)
2153 self.successes.append(test)
2154
2154
2155 def addError(self, test, err):
2155 def addError(self, test, err):
2156 super(TestResult, self).addError(test, err)
2156 super(TestResult, self).addError(test, err)
2157 if self._options.first:
2157 if self._options.first:
2158 self.stop()
2158 self.stop()
2159
2159
2160 # Polyfill.
2160 # Polyfill.
2161 def addSkip(self, test, reason):
2161 def addSkip(self, test, reason):
2162 self.skipped.append((test, reason))
2162 self.skipped.append((test, reason))
2163 with iolock:
2163 with iolock:
2164 if self.showAll:
2164 if self.showAll:
2165 self.stream.writeln('skipped %s' % reason)
2165 self.stream.writeln('skipped %s' % reason)
2166 else:
2166 else:
2167 self.stream.write('s')
2167 self.stream.write('s')
2168 self.stream.flush()
2168 self.stream.flush()
2169
2169
2170 def addIgnore(self, test, reason):
2170 def addIgnore(self, test, reason):
2171 self.ignored.append((test, reason))
2171 self.ignored.append((test, reason))
2172 with iolock:
2172 with iolock:
2173 if self.showAll:
2173 if self.showAll:
2174 self.stream.writeln('ignored %s' % reason)
2174 self.stream.writeln('ignored %s' % reason)
2175 else:
2175 else:
2176 if reason not in ('not retesting', "doesn't match keyword"):
2176 if reason not in ('not retesting', "doesn't match keyword"):
2177 self.stream.write('i')
2177 self.stream.write('i')
2178 else:
2178 else:
2179 self.testsRun += 1
2179 self.testsRun += 1
2180 self.stream.flush()
2180 self.stream.flush()
2181
2181
2182 def addOutputMismatch(self, test, ret, got, expected):
2182 def addOutputMismatch(self, test, ret, got, expected):
2183 """Record a mismatch in test output for a particular test."""
2183 """Record a mismatch in test output for a particular test."""
2184 if self.shouldStop or firsterror:
2184 if self.shouldStop or firsterror:
2185 # don't print, some other test case already failed and
2185 # don't print, some other test case already failed and
2186 # printed, we're just stale and probably failed due to our
2186 # printed, we're just stale and probably failed due to our
2187 # temp dir getting cleaned up.
2187 # temp dir getting cleaned up.
2188 return
2188 return
2189
2189
2190 accepted = False
2190 accepted = False
2191 lines = []
2191 lines = []
2192
2192
2193 with iolock:
2193 with iolock:
2194 if self._options.nodiff:
2194 if self._options.nodiff:
2195 pass
2195 pass
2196 elif self._options.view:
2196 elif self._options.view:
2197 v = self._options.view
2197 v = self._options.view
2198 subprocess.call(
2198 subprocess.call(
2199 r'"%s" "%s" "%s"'
2199 r'"%s" "%s" "%s"'
2200 % (v, _bytes2sys(test.refpath), _bytes2sys(test.errpath)),
2200 % (v, _bytes2sys(test.refpath), _bytes2sys(test.errpath)),
2201 shell=True,
2201 shell=True,
2202 )
2202 )
2203 else:
2203 else:
2204 servefail, lines = getdiff(
2204 servefail, lines = getdiff(
2205 expected, got, test.refpath, test.errpath
2205 expected, got, test.refpath, test.errpath
2206 )
2206 )
2207 self.stream.write('\n')
2207 self.stream.write('\n')
2208 for line in lines:
2208 for line in lines:
2209 line = highlightdiff(line, self.color)
2209 line = highlightdiff(line, self.color)
2210 if PYTHON3:
2210 if PYTHON3:
2211 self.stream.flush()
2211 self.stream.flush()
2212 self.stream.buffer.write(line)
2212 self.stream.buffer.write(line)
2213 self.stream.buffer.flush()
2213 self.stream.buffer.flush()
2214 else:
2214 else:
2215 self.stream.write(line)
2215 self.stream.write(line)
2216 self.stream.flush()
2216 self.stream.flush()
2217
2217
2218 if servefail:
2218 if servefail:
2219 raise test.failureException(
2219 raise test.failureException(
2220 'server failed to start (HGPORT=%s)' % test._startport
2220 'server failed to start (HGPORT=%s)' % test._startport
2221 )
2221 )
2222
2222
2223 # handle interactive prompt without releasing iolock
2223 # handle interactive prompt without releasing iolock
2224 if self._options.interactive:
2224 if self._options.interactive:
2225 if test.readrefout() != expected:
2225 if test.readrefout() != expected:
2226 self.stream.write(
2226 self.stream.write(
2227 'Reference output has changed (run again to prompt '
2227 'Reference output has changed (run again to prompt '
2228 'changes)'
2228 'changes)'
2229 )
2229 )
2230 else:
2230 else:
2231 self.stream.write('Accept this change? [n] ')
2231 self.stream.write('Accept this change? [n] ')
2232 self.stream.flush()
2232 self.stream.flush()
2233 answer = sys.stdin.readline().strip()
2233 answer = sys.stdin.readline().strip()
2234 if answer.lower() in ('y', 'yes'):
2234 if answer.lower() in ('y', 'yes'):
2235 if test.path.endswith(b'.t'):
2235 if test.path.endswith(b'.t'):
2236 rename(test.errpath, test.path)
2236 rename(test.errpath, test.path)
2237 else:
2237 else:
2238 rename(test.errpath, '%s.out' % test.path)
2238 rename(test.errpath, '%s.out' % test.path)
2239 accepted = True
2239 accepted = True
2240 if not accepted:
2240 if not accepted:
2241 self.faildata[test.name] = b''.join(lines)
2241 self.faildata[test.name] = b''.join(lines)
2242
2242
2243 return accepted
2243 return accepted
2244
2244
2245 def startTest(self, test):
2245 def startTest(self, test):
2246 super(TestResult, self).startTest(test)
2246 super(TestResult, self).startTest(test)
2247
2247
2248 # os.times module computes the user time and system time spent by
2248 # os.times module computes the user time and system time spent by
2249 # child's processes along with real elapsed time taken by a process.
2249 # child's processes along with real elapsed time taken by a process.
2250 # This module has one limitation. It can only work for Linux user
2250 # This module has one limitation. It can only work for Linux user
2251 # and not for Windows. Hence why we fall back to another function
2251 # and not for Windows. Hence why we fall back to another function
2252 # for wall time calculations.
2252 # for wall time calculations.
2253 test.started_times = os.times()
2253 test.started_times = os.times()
2254 # TODO use a monotonic clock once support for Python 2.7 is dropped.
2254 # TODO use a monotonic clock once support for Python 2.7 is dropped.
2255 test.started_time = time.time()
2255 test.started_time = time.time()
2256 if self._firststarttime is None: # thread racy but irrelevant
2256 if self._firststarttime is None: # thread racy but irrelevant
2257 self._firststarttime = test.started_time
2257 self._firststarttime = test.started_time
2258
2258
2259 def stopTest(self, test, interrupted=False):
2259 def stopTest(self, test, interrupted=False):
2260 super(TestResult, self).stopTest(test)
2260 super(TestResult, self).stopTest(test)
2261
2261
2262 test.stopped_times = os.times()
2262 test.stopped_times = os.times()
2263 stopped_time = time.time()
2263 stopped_time = time.time()
2264
2264
2265 starttime = test.started_times
2265 starttime = test.started_times
2266 endtime = test.stopped_times
2266 endtime = test.stopped_times
2267 origin = self._firststarttime
2267 origin = self._firststarttime
2268 self.times.append(
2268 self.times.append(
2269 (
2269 (
2270 test.name,
2270 test.name,
2271 endtime[2] - starttime[2], # user space CPU time
2271 endtime[2] - starttime[2], # user space CPU time
2272 endtime[3] - starttime[3], # sys space CPU time
2272 endtime[3] - starttime[3], # sys space CPU time
2273 stopped_time - test.started_time, # real time
2273 stopped_time - test.started_time, # real time
2274 test.started_time - origin, # start date in run context
2274 test.started_time - origin, # start date in run context
2275 stopped_time - origin, # end date in run context
2275 stopped_time - origin, # end date in run context
2276 )
2276 )
2277 )
2277 )
2278
2278
2279 if interrupted:
2279 if interrupted:
2280 with iolock:
2280 with iolock:
2281 self.stream.writeln(
2281 self.stream.writeln(
2282 'INTERRUPTED: %s (after %d seconds)'
2282 'INTERRUPTED: %s (after %d seconds)'
2283 % (test.name, self.times[-1][3])
2283 % (test.name, self.times[-1][3])
2284 )
2284 )
2285
2285
2286
2286
2287 def getTestResult():
2287 def getTestResult():
2288 """
2288 """
2289 Returns the relevant test result
2289 Returns the relevant test result
2290 """
2290 """
2291 if "CUSTOM_TEST_RESULT" in os.environ:
2291 if "CUSTOM_TEST_RESULT" in os.environ:
2292 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
2292 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
2293 return testresultmodule.TestResult
2293 return testresultmodule.TestResult
2294 else:
2294 else:
2295 return TestResult
2295 return TestResult
2296
2296
2297
2297
2298 class TestSuite(unittest.TestSuite):
2298 class TestSuite(unittest.TestSuite):
2299 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
2299 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
2300
2300
2301 def __init__(
2301 def __init__(
2302 self,
2302 self,
2303 testdir,
2303 testdir,
2304 jobs=1,
2304 jobs=1,
2305 whitelist=None,
2305 whitelist=None,
2306 blacklist=None,
2306 blacklist=None,
2307 retest=False,
2307 retest=False,
2308 keywords=None,
2308 keywords=None,
2309 loop=False,
2309 loop=False,
2310 runs_per_test=1,
2310 runs_per_test=1,
2311 loadtest=None,
2311 loadtest=None,
2312 showchannels=False,
2312 showchannels=False,
2313 *args,
2313 *args,
2314 **kwargs
2314 **kwargs
2315 ):
2315 ):
2316 """Create a new instance that can run tests with a configuration.
2316 """Create a new instance that can run tests with a configuration.
2317
2317
2318 testdir specifies the directory where tests are executed from. This
2318 testdir specifies the directory where tests are executed from. This
2319 is typically the ``tests`` directory from Mercurial's source
2319 is typically the ``tests`` directory from Mercurial's source
2320 repository.
2320 repository.
2321
2321
2322 jobs specifies the number of jobs to run concurrently. Each test
2322 jobs specifies the number of jobs to run concurrently. Each test
2323 executes on its own thread. Tests actually spawn new processes, so
2323 executes on its own thread. Tests actually spawn new processes, so
2324 state mutation should not be an issue.
2324 state mutation should not be an issue.
2325
2325
2326 If there is only one job, it will use the main thread.
2326 If there is only one job, it will use the main thread.
2327
2327
2328 whitelist and blacklist denote tests that have been whitelisted and
2328 whitelist and blacklist denote tests that have been whitelisted and
2329 blacklisted, respectively. These arguments don't belong in TestSuite.
2329 blacklisted, respectively. These arguments don't belong in TestSuite.
2330 Instead, whitelist and blacklist should be handled by the thing that
2330 Instead, whitelist and blacklist should be handled by the thing that
2331 populates the TestSuite with tests. They are present to preserve
2331 populates the TestSuite with tests. They are present to preserve
2332 backwards compatible behavior which reports skipped tests as part
2332 backwards compatible behavior which reports skipped tests as part
2333 of the results.
2333 of the results.
2334
2334
2335 retest denotes whether to retest failed tests. This arguably belongs
2335 retest denotes whether to retest failed tests. This arguably belongs
2336 outside of TestSuite.
2336 outside of TestSuite.
2337
2337
2338 keywords denotes key words that will be used to filter which tests
2338 keywords denotes key words that will be used to filter which tests
2339 to execute. This arguably belongs outside of TestSuite.
2339 to execute. This arguably belongs outside of TestSuite.
2340
2340
2341 loop denotes whether to loop over tests forever.
2341 loop denotes whether to loop over tests forever.
2342 """
2342 """
2343 super(TestSuite, self).__init__(*args, **kwargs)
2343 super(TestSuite, self).__init__(*args, **kwargs)
2344
2344
2345 self._jobs = jobs
2345 self._jobs = jobs
2346 self._whitelist = whitelist
2346 self._whitelist = whitelist
2347 self._blacklist = blacklist
2347 self._blacklist = blacklist
2348 self._retest = retest
2348 self._retest = retest
2349 self._keywords = keywords
2349 self._keywords = keywords
2350 self._loop = loop
2350 self._loop = loop
2351 self._runs_per_test = runs_per_test
2351 self._runs_per_test = runs_per_test
2352 self._loadtest = loadtest
2352 self._loadtest = loadtest
2353 self._showchannels = showchannels
2353 self._showchannels = showchannels
2354
2354
2355 def run(self, result):
2355 def run(self, result):
2356 # We have a number of filters that need to be applied. We do this
2356 # We have a number of filters that need to be applied. We do this
2357 # here instead of inside Test because it makes the running logic for
2357 # here instead of inside Test because it makes the running logic for
2358 # Test simpler.
2358 # Test simpler.
2359 tests = []
2359 tests = []
2360 num_tests = [0]
2360 num_tests = [0]
2361 for test in self._tests:
2361 for test in self._tests:
2362
2362
2363 def get():
2363 def get():
2364 num_tests[0] += 1
2364 num_tests[0] += 1
2365 if getattr(test, 'should_reload', False):
2365 if getattr(test, 'should_reload', False):
2366 return self._loadtest(test, num_tests[0])
2366 return self._loadtest(test, num_tests[0])
2367 return test
2367 return test
2368
2368
2369 if not os.path.exists(test.path):
2369 if not os.path.exists(test.path):
2370 result.addSkip(test, "Doesn't exist")
2370 result.addSkip(test, "Doesn't exist")
2371 continue
2371 continue
2372
2372
2373 if not (self._whitelist and test.bname in self._whitelist):
2373 if not (self._whitelist and test.bname in self._whitelist):
2374 if self._blacklist and test.bname in self._blacklist:
2374 if self._blacklist and test.bname in self._blacklist:
2375 result.addSkip(test, 'blacklisted')
2375 result.addSkip(test, 'blacklisted')
2376 continue
2376 continue
2377
2377
2378 if self._retest and not os.path.exists(test.errpath):
2378 if self._retest and not os.path.exists(test.errpath):
2379 result.addIgnore(test, 'not retesting')
2379 result.addIgnore(test, 'not retesting')
2380 continue
2380 continue
2381
2381
2382 if self._keywords:
2382 if self._keywords:
2383 with open(test.path, 'rb') as f:
2383 with open(test.path, 'rb') as f:
2384 t = f.read().lower() + test.bname.lower()
2384 t = f.read().lower() + test.bname.lower()
2385 ignored = False
2385 ignored = False
2386 for k in self._keywords.lower().split():
2386 for k in self._keywords.lower().split():
2387 if k not in t:
2387 if k not in t:
2388 result.addIgnore(test, "doesn't match keyword")
2388 result.addIgnore(test, "doesn't match keyword")
2389 ignored = True
2389 ignored = True
2390 break
2390 break
2391
2391
2392 if ignored:
2392 if ignored:
2393 continue
2393 continue
2394 for _ in xrange(self._runs_per_test):
2394 for _ in xrange(self._runs_per_test):
2395 tests.append(get())
2395 tests.append(get())
2396
2396
2397 runtests = list(tests)
2397 runtests = list(tests)
2398 done = queue.Queue()
2398 done = queue.Queue()
2399 running = 0
2399 running = 0
2400
2400
2401 channels = [""] * self._jobs
2401 channels = [""] * self._jobs
2402
2402
2403 def job(test, result):
2403 def job(test, result):
2404 for n, v in enumerate(channels):
2404 for n, v in enumerate(channels):
2405 if not v:
2405 if not v:
2406 channel = n
2406 channel = n
2407 break
2407 break
2408 else:
2408 else:
2409 raise ValueError('Could not find output channel')
2409 raise ValueError('Could not find output channel')
2410 channels[channel] = "=" + test.name[5:].split(".")[0]
2410 channels[channel] = "=" + test.name[5:].split(".")[0]
2411 try:
2411 try:
2412 test(result)
2412 test(result)
2413 done.put(None)
2413 done.put(None)
2414 except KeyboardInterrupt:
2414 except KeyboardInterrupt:
2415 pass
2415 pass
2416 except: # re-raises
2416 except: # re-raises
2417 done.put(('!', test, 'run-test raised an error, see traceback'))
2417 done.put(('!', test, 'run-test raised an error, see traceback'))
2418 raise
2418 raise
2419 finally:
2419 finally:
2420 try:
2420 try:
2421 channels[channel] = ''
2421 channels[channel] = ''
2422 except IndexError:
2422 except IndexError:
2423 pass
2423 pass
2424
2424
2425 def stat():
2425 def stat():
2426 count = 0
2426 count = 0
2427 while channels:
2427 while channels:
2428 d = '\n%03s ' % count
2428 d = '\n%03s ' % count
2429 for n, v in enumerate(channels):
2429 for n, v in enumerate(channels):
2430 if v:
2430 if v:
2431 d += v[0]
2431 d += v[0]
2432 channels[n] = v[1:] or '.'
2432 channels[n] = v[1:] or '.'
2433 else:
2433 else:
2434 d += ' '
2434 d += ' '
2435 d += ' '
2435 d += ' '
2436 with iolock:
2436 with iolock:
2437 sys.stdout.write(d + ' ')
2437 sys.stdout.write(d + ' ')
2438 sys.stdout.flush()
2438 sys.stdout.flush()
2439 for x in xrange(10):
2439 for x in xrange(10):
2440 if channels:
2440 if channels:
2441 time.sleep(0.1)
2441 time.sleep(0.1)
2442 count += 1
2442 count += 1
2443
2443
2444 stoppedearly = False
2444 stoppedearly = False
2445
2445
2446 if self._showchannels:
2446 if self._showchannels:
2447 statthread = threading.Thread(target=stat, name="stat")
2447 statthread = threading.Thread(target=stat, name="stat")
2448 statthread.start()
2448 statthread.start()
2449
2449
2450 try:
2450 try:
2451 while tests or running:
2451 while tests or running:
2452 if not done.empty() or running == self._jobs or not tests:
2452 if not done.empty() or running == self._jobs or not tests:
2453 try:
2453 try:
2454 done.get(True, 1)
2454 done.get(True, 1)
2455 running -= 1
2455 running -= 1
2456 if result and result.shouldStop:
2456 if result and result.shouldStop:
2457 stoppedearly = True
2457 stoppedearly = True
2458 break
2458 break
2459 except queue.Empty:
2459 except queue.Empty:
2460 continue
2460 continue
2461 if tests and not running == self._jobs:
2461 if tests and not running == self._jobs:
2462 test = tests.pop(0)
2462 test = tests.pop(0)
2463 if self._loop:
2463 if self._loop:
2464 if getattr(test, 'should_reload', False):
2464 if getattr(test, 'should_reload', False):
2465 num_tests[0] += 1
2465 num_tests[0] += 1
2466 tests.append(self._loadtest(test, num_tests[0]))
2466 tests.append(self._loadtest(test, num_tests[0]))
2467 else:
2467 else:
2468 tests.append(test)
2468 tests.append(test)
2469 if self._jobs == 1:
2469 if self._jobs == 1:
2470 job(test, result)
2470 job(test, result)
2471 else:
2471 else:
2472 t = threading.Thread(
2472 t = threading.Thread(
2473 target=job, name=test.name, args=(test, result)
2473 target=job, name=test.name, args=(test, result)
2474 )
2474 )
2475 t.start()
2475 t.start()
2476 running += 1
2476 running += 1
2477
2477
2478 # If we stop early we still need to wait on started tests to
2478 # If we stop early we still need to wait on started tests to
2479 # finish. Otherwise, there is a race between the test completing
2479 # finish. Otherwise, there is a race between the test completing
2480 # and the test's cleanup code running. This could result in the
2480 # and the test's cleanup code running. This could result in the
2481 # test reporting incorrect.
2481 # test reporting incorrect.
2482 if stoppedearly:
2482 if stoppedearly:
2483 while running:
2483 while running:
2484 try:
2484 try:
2485 done.get(True, 1)
2485 done.get(True, 1)
2486 running -= 1
2486 running -= 1
2487 except queue.Empty:
2487 except queue.Empty:
2488 continue
2488 continue
2489 except KeyboardInterrupt:
2489 except KeyboardInterrupt:
2490 for test in runtests:
2490 for test in runtests:
2491 test.abort()
2491 test.abort()
2492
2492
2493 channels = []
2493 channels = []
2494
2494
2495 return result
2495 return result
2496
2496
2497
2497
2498 # Save the most recent 5 wall-clock runtimes of each test to a
2498 # Save the most recent 5 wall-clock runtimes of each test to a
2499 # human-readable text file named .testtimes. Tests are sorted
2499 # human-readable text file named .testtimes. Tests are sorted
2500 # alphabetically, while times for each test are listed from oldest to
2500 # alphabetically, while times for each test are listed from oldest to
2501 # newest.
2501 # newest.
2502
2502
2503
2503
2504 def loadtimes(outputdir):
2504 def loadtimes(outputdir):
2505 times = []
2505 times = []
2506 try:
2506 try:
2507 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2507 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2508 for line in fp:
2508 for line in fp:
2509 m = re.match('(.*?) ([0-9. ]+)', line)
2509 m = re.match('(.*?) ([0-9. ]+)', line)
2510 times.append(
2510 times.append(
2511 (m.group(1), [float(t) for t in m.group(2).split()])
2511 (m.group(1), [float(t) for t in m.group(2).split()])
2512 )
2512 )
2513 except IOError as err:
2513 except IOError as err:
2514 if err.errno != errno.ENOENT:
2514 if err.errno != errno.ENOENT:
2515 raise
2515 raise
2516 return times
2516 return times
2517
2517
2518
2518
2519 def savetimes(outputdir, result):
2519 def savetimes(outputdir, result):
2520 saved = dict(loadtimes(outputdir))
2520 saved = dict(loadtimes(outputdir))
2521 maxruns = 5
2521 maxruns = 5
2522 skipped = {str(t[0]) for t in result.skipped}
2522 skipped = {str(t[0]) for t in result.skipped}
2523 for tdata in result.times:
2523 for tdata in result.times:
2524 test, real = tdata[0], tdata[3]
2524 test, real = tdata[0], tdata[3]
2525 if test not in skipped:
2525 if test not in skipped:
2526 ts = saved.setdefault(test, [])
2526 ts = saved.setdefault(test, [])
2527 ts.append(real)
2527 ts.append(real)
2528 ts[:] = ts[-maxruns:]
2528 ts[:] = ts[-maxruns:]
2529
2529
2530 fd, tmpname = tempfile.mkstemp(
2530 fd, tmpname = tempfile.mkstemp(
2531 prefix=b'.testtimes', dir=outputdir, text=True
2531 prefix=b'.testtimes', dir=outputdir, text=True
2532 )
2532 )
2533 with os.fdopen(fd, 'w') as fp:
2533 with os.fdopen(fd, 'w') as fp:
2534 for name, ts in sorted(saved.items()):
2534 for name, ts in sorted(saved.items()):
2535 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2535 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2536 timepath = os.path.join(outputdir, b'.testtimes')
2536 timepath = os.path.join(outputdir, b'.testtimes')
2537 try:
2537 try:
2538 os.unlink(timepath)
2538 os.unlink(timepath)
2539 except OSError:
2539 except OSError:
2540 pass
2540 pass
2541 try:
2541 try:
2542 os.rename(tmpname, timepath)
2542 os.rename(tmpname, timepath)
2543 except OSError:
2543 except OSError:
2544 pass
2544 pass
2545
2545
2546
2546
2547 class TextTestRunner(unittest.TextTestRunner):
2547 class TextTestRunner(unittest.TextTestRunner):
2548 """Custom unittest test runner that uses appropriate settings."""
2548 """Custom unittest test runner that uses appropriate settings."""
2549
2549
2550 def __init__(self, runner, *args, **kwargs):
2550 def __init__(self, runner, *args, **kwargs):
2551 super(TextTestRunner, self).__init__(*args, **kwargs)
2551 super(TextTestRunner, self).__init__(*args, **kwargs)
2552
2552
2553 self._runner = runner
2553 self._runner = runner
2554
2554
2555 self._result = getTestResult()(
2555 self._result = getTestResult()(
2556 self._runner.options, self.stream, self.descriptions, self.verbosity
2556 self._runner.options, self.stream, self.descriptions, self.verbosity
2557 )
2557 )
2558
2558
2559 def listtests(self, test):
2559 def listtests(self, test):
2560 test = sorted(test, key=lambda t: t.name)
2560 test = sorted(test, key=lambda t: t.name)
2561
2561
2562 self._result.onStart(test)
2562 self._result.onStart(test)
2563
2563
2564 for t in test:
2564 for t in test:
2565 print(t.name)
2565 print(t.name)
2566 self._result.addSuccess(t)
2566 self._result.addSuccess(t)
2567
2567
2568 if self._runner.options.xunit:
2568 if self._runner.options.xunit:
2569 with open(self._runner.options.xunit, "wb") as xuf:
2569 with open(self._runner.options.xunit, "wb") as xuf:
2570 self._writexunit(self._result, xuf)
2570 self._writexunit(self._result, xuf)
2571
2571
2572 if self._runner.options.json:
2572 if self._runner.options.json:
2573 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2573 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2574 with open(jsonpath, 'w') as fp:
2574 with open(jsonpath, 'w') as fp:
2575 self._writejson(self._result, fp)
2575 self._writejson(self._result, fp)
2576
2576
2577 return self._result
2577 return self._result
2578
2578
2579 def run(self, test):
2579 def run(self, test):
2580 self._result.onStart(test)
2580 self._result.onStart(test)
2581 test(self._result)
2581 test(self._result)
2582
2582
2583 failed = len(self._result.failures)
2583 failed = len(self._result.failures)
2584 skipped = len(self._result.skipped)
2584 skipped = len(self._result.skipped)
2585 ignored = len(self._result.ignored)
2585 ignored = len(self._result.ignored)
2586
2586
2587 with iolock:
2587 with iolock:
2588 self.stream.writeln('')
2588 self.stream.writeln('')
2589
2589
2590 if not self._runner.options.noskips:
2590 if not self._runner.options.noskips:
2591 for test, msg in sorted(
2591 for test, msg in sorted(
2592 self._result.skipped, key=lambda s: s[0].name
2592 self._result.skipped, key=lambda s: s[0].name
2593 ):
2593 ):
2594 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2594 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2595 msg = highlightmsg(formatted, self._result.color)
2595 msg = highlightmsg(formatted, self._result.color)
2596 self.stream.write(msg)
2596 self.stream.write(msg)
2597 for test, msg in sorted(
2597 for test, msg in sorted(
2598 self._result.failures, key=lambda f: f[0].name
2598 self._result.failures, key=lambda f: f[0].name
2599 ):
2599 ):
2600 formatted = 'Failed %s: %s\n' % (test.name, msg)
2600 formatted = 'Failed %s: %s\n' % (test.name, msg)
2601 self.stream.write(highlightmsg(formatted, self._result.color))
2601 self.stream.write(highlightmsg(formatted, self._result.color))
2602 for test, msg in sorted(
2602 for test, msg in sorted(
2603 self._result.errors, key=lambda e: e[0].name
2603 self._result.errors, key=lambda e: e[0].name
2604 ):
2604 ):
2605 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2605 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2606
2606
2607 if self._runner.options.xunit:
2607 if self._runner.options.xunit:
2608 with open(self._runner.options.xunit, "wb") as xuf:
2608 with open(self._runner.options.xunit, "wb") as xuf:
2609 self._writexunit(self._result, xuf)
2609 self._writexunit(self._result, xuf)
2610
2610
2611 if self._runner.options.json:
2611 if self._runner.options.json:
2612 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2612 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2613 with open(jsonpath, 'w') as fp:
2613 with open(jsonpath, 'w') as fp:
2614 self._writejson(self._result, fp)
2614 self._writejson(self._result, fp)
2615
2615
2616 self._runner._checkhglib('Tested')
2616 self._runner._checkhglib('Tested')
2617
2617
2618 savetimes(self._runner._outputdir, self._result)
2618 savetimes(self._runner._outputdir, self._result)
2619
2619
2620 if failed and self._runner.options.known_good_rev:
2620 if failed and self._runner.options.known_good_rev:
2621 self._bisecttests(t for t, m in self._result.failures)
2621 self._bisecttests(t for t, m in self._result.failures)
2622 self.stream.writeln(
2622 self.stream.writeln(
2623 '# Ran %d tests, %d skipped, %d failed.'
2623 '# Ran %d tests, %d skipped, %d failed.'
2624 % (self._result.testsRun, skipped + ignored, failed)
2624 % (self._result.testsRun, skipped + ignored, failed)
2625 )
2625 )
2626 if failed:
2626 if failed:
2627 self.stream.writeln(
2627 self.stream.writeln(
2628 'python hash seed: %s' % os.environ['PYTHONHASHSEED']
2628 'python hash seed: %s' % os.environ['PYTHONHASHSEED']
2629 )
2629 )
2630 if self._runner.options.time:
2630 if self._runner.options.time:
2631 self.printtimes(self._result.times)
2631 self.printtimes(self._result.times)
2632
2632
2633 if self._runner.options.exceptions:
2633 if self._runner.options.exceptions:
2634 exceptions = aggregateexceptions(
2634 exceptions = aggregateexceptions(
2635 os.path.join(self._runner._outputdir, b'exceptions')
2635 os.path.join(self._runner._outputdir, b'exceptions')
2636 )
2636 )
2637
2637
2638 self.stream.writeln('Exceptions Report:')
2638 self.stream.writeln('Exceptions Report:')
2639 self.stream.writeln(
2639 self.stream.writeln(
2640 '%d total from %d frames'
2640 '%d total from %d frames'
2641 % (exceptions['total'], len(exceptions['exceptioncounts']))
2641 % (exceptions['total'], len(exceptions['exceptioncounts']))
2642 )
2642 )
2643 combined = exceptions['combined']
2643 combined = exceptions['combined']
2644 for key in sorted(combined, key=combined.get, reverse=True):
2644 for key in sorted(combined, key=combined.get, reverse=True):
2645 frame, line, exc = key
2645 frame, line, exc = key
2646 totalcount, testcount, leastcount, leasttest = combined[key]
2646 totalcount, testcount, leastcount, leasttest = combined[key]
2647
2647
2648 self.stream.writeln(
2648 self.stream.writeln(
2649 '%d (%d tests)\t%s: %s (%s - %d total)'
2649 '%d (%d tests)\t%s: %s (%s - %d total)'
2650 % (
2650 % (
2651 totalcount,
2651 totalcount,
2652 testcount,
2652 testcount,
2653 frame,
2653 frame,
2654 exc,
2654 exc,
2655 leasttest,
2655 leasttest,
2656 leastcount,
2656 leastcount,
2657 )
2657 )
2658 )
2658 )
2659
2659
2660 self.stream.flush()
2660 self.stream.flush()
2661
2661
2662 return self._result
2662 return self._result
2663
2663
2664 def _bisecttests(self, tests):
2664 def _bisecttests(self, tests):
2665 bisectcmd = ['hg', 'bisect']
2665 bisectcmd = ['hg', 'bisect']
2666 bisectrepo = self._runner.options.bisect_repo
2666 bisectrepo = self._runner.options.bisect_repo
2667 if bisectrepo:
2667 if bisectrepo:
2668 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2668 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2669
2669
2670 def pread(args):
2670 def pread(args):
2671 env = os.environ.copy()
2671 env = os.environ.copy()
2672 env['HGPLAIN'] = '1'
2672 env['HGPLAIN'] = '1'
2673 p = subprocess.Popen(
2673 p = subprocess.Popen(
2674 args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env
2674 args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env
2675 )
2675 )
2676 data = p.stdout.read()
2676 data = p.stdout.read()
2677 p.wait()
2677 p.wait()
2678 return data
2678 return data
2679
2679
2680 for test in tests:
2680 for test in tests:
2681 pread(bisectcmd + ['--reset']),
2681 pread(bisectcmd + ['--reset']),
2682 pread(bisectcmd + ['--bad', '.'])
2682 pread(bisectcmd + ['--bad', '.'])
2683 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2683 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2684 # TODO: we probably need to forward more options
2684 # TODO: we probably need to forward more options
2685 # that alter hg's behavior inside the tests.
2685 # that alter hg's behavior inside the tests.
2686 opts = ''
2686 opts = ''
2687 withhg = self._runner.options.with_hg
2687 withhg = self._runner.options.with_hg
2688 if withhg:
2688 if withhg:
2689 opts += ' --with-hg=%s ' % shellquote(_bytes2sys(withhg))
2689 opts += ' --with-hg=%s ' % shellquote(_bytes2sys(withhg))
2690 rtc = '%s %s %s %s' % (sysexecutable, sys.argv[0], opts, test)
2690 rtc = '%s %s %s %s' % (sysexecutable, sys.argv[0], opts, test)
2691 data = pread(bisectcmd + ['--command', rtc])
2691 data = pread(bisectcmd + ['--command', rtc])
2692 m = re.search(
2692 m = re.search(
2693 (
2693 (
2694 br'\nThe first (?P<goodbad>bad|good) revision '
2694 br'\nThe first (?P<goodbad>bad|good) revision '
2695 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2695 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2696 br'summary: +(?P<summary>[^\n]+)\n'
2696 br'summary: +(?P<summary>[^\n]+)\n'
2697 ),
2697 ),
2698 data,
2698 data,
2699 (re.MULTILINE | re.DOTALL),
2699 (re.MULTILINE | re.DOTALL),
2700 )
2700 )
2701 if m is None:
2701 if m is None:
2702 self.stream.writeln(
2702 self.stream.writeln(
2703 'Failed to identify failure point for %s' % test
2703 'Failed to identify failure point for %s' % test
2704 )
2704 )
2705 continue
2705 continue
2706 dat = m.groupdict()
2706 dat = m.groupdict()
2707 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2707 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2708 self.stream.writeln(
2708 self.stream.writeln(
2709 '%s %s by %s (%s)'
2709 '%s %s by %s (%s)'
2710 % (
2710 % (
2711 test,
2711 test,
2712 verb,
2712 verb,
2713 dat['node'].decode('ascii'),
2713 dat['node'].decode('ascii'),
2714 dat['summary'].decode('utf8', 'ignore'),
2714 dat['summary'].decode('utf8', 'ignore'),
2715 )
2715 )
2716 )
2716 )
2717
2717
2718 def printtimes(self, times):
2718 def printtimes(self, times):
2719 # iolock held by run
2719 # iolock held by run
2720 self.stream.writeln('# Producing time report')
2720 self.stream.writeln('# Producing time report')
2721 times.sort(key=lambda t: (t[3]))
2721 times.sort(key=lambda t: (t[3]))
2722 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2722 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2723 self.stream.writeln(
2723 self.stream.writeln(
2724 '%-7s %-7s %-7s %-7s %-7s %s'
2724 '%-7s %-7s %-7s %-7s %-7s %s'
2725 % ('start', 'end', 'cuser', 'csys', 'real', 'Test')
2725 % ('start', 'end', 'cuser', 'csys', 'real', 'Test')
2726 )
2726 )
2727 for tdata in times:
2727 for tdata in times:
2728 test = tdata[0]
2728 test = tdata[0]
2729 cuser, csys, real, start, end = tdata[1:6]
2729 cuser, csys, real, start, end = tdata[1:6]
2730 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2730 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2731
2731
2732 @staticmethod
2732 @staticmethod
2733 def _writexunit(result, outf):
2733 def _writexunit(result, outf):
2734 # See http://llg.cubic.org/docs/junit/ for a reference.
2734 # See http://llg.cubic.org/docs/junit/ for a reference.
2735 timesd = {t[0]: t[3] for t in result.times}
2735 timesd = {t[0]: t[3] for t in result.times}
2736 doc = minidom.Document()
2736 doc = minidom.Document()
2737 s = doc.createElement('testsuite')
2737 s = doc.createElement('testsuite')
2738 s.setAttribute('errors', "0") # TODO
2738 s.setAttribute('errors', "0") # TODO
2739 s.setAttribute('failures', str(len(result.failures)))
2739 s.setAttribute('failures', str(len(result.failures)))
2740 s.setAttribute('name', 'run-tests')
2740 s.setAttribute('name', 'run-tests')
2741 s.setAttribute(
2741 s.setAttribute(
2742 'skipped', str(len(result.skipped) + len(result.ignored))
2742 'skipped', str(len(result.skipped) + len(result.ignored))
2743 )
2743 )
2744 s.setAttribute('tests', str(result.testsRun))
2744 s.setAttribute('tests', str(result.testsRun))
2745 doc.appendChild(s)
2745 doc.appendChild(s)
2746 for tc in result.successes:
2746 for tc in result.successes:
2747 t = doc.createElement('testcase')
2747 t = doc.createElement('testcase')
2748 t.setAttribute('name', tc.name)
2748 t.setAttribute('name', tc.name)
2749 tctime = timesd.get(tc.name)
2749 tctime = timesd.get(tc.name)
2750 if tctime is not None:
2750 if tctime is not None:
2751 t.setAttribute('time', '%.3f' % tctime)
2751 t.setAttribute('time', '%.3f' % tctime)
2752 s.appendChild(t)
2752 s.appendChild(t)
2753 for tc, err in sorted(result.faildata.items()):
2753 for tc, err in sorted(result.faildata.items()):
2754 t = doc.createElement('testcase')
2754 t = doc.createElement('testcase')
2755 t.setAttribute('name', tc)
2755 t.setAttribute('name', tc)
2756 tctime = timesd.get(tc)
2756 tctime = timesd.get(tc)
2757 if tctime is not None:
2757 if tctime is not None:
2758 t.setAttribute('time', '%.3f' % tctime)
2758 t.setAttribute('time', '%.3f' % tctime)
2759 # createCDATASection expects a unicode or it will
2759 # createCDATASection expects a unicode or it will
2760 # convert using default conversion rules, which will
2760 # convert using default conversion rules, which will
2761 # fail if string isn't ASCII.
2761 # fail if string isn't ASCII.
2762 err = cdatasafe(err).decode('utf-8', 'replace')
2762 err = cdatasafe(err).decode('utf-8', 'replace')
2763 cd = doc.createCDATASection(err)
2763 cd = doc.createCDATASection(err)
2764 # Use 'failure' here instead of 'error' to match errors = 0,
2764 # Use 'failure' here instead of 'error' to match errors = 0,
2765 # failures = len(result.failures) in the testsuite element.
2765 # failures = len(result.failures) in the testsuite element.
2766 failelem = doc.createElement('failure')
2766 failelem = doc.createElement('failure')
2767 failelem.setAttribute('message', 'output changed')
2767 failelem.setAttribute('message', 'output changed')
2768 failelem.setAttribute('type', 'output-mismatch')
2768 failelem.setAttribute('type', 'output-mismatch')
2769 failelem.appendChild(cd)
2769 failelem.appendChild(cd)
2770 t.appendChild(failelem)
2770 t.appendChild(failelem)
2771 s.appendChild(t)
2771 s.appendChild(t)
2772 for tc, message in result.skipped:
2772 for tc, message in result.skipped:
2773 # According to the schema, 'skipped' has no attributes. So store
2773 # According to the schema, 'skipped' has no attributes. So store
2774 # the skip message as a text node instead.
2774 # the skip message as a text node instead.
2775 t = doc.createElement('testcase')
2775 t = doc.createElement('testcase')
2776 t.setAttribute('name', tc.name)
2776 t.setAttribute('name', tc.name)
2777 binmessage = message.encode('utf-8')
2777 binmessage = message.encode('utf-8')
2778 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2778 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2779 cd = doc.createCDATASection(message)
2779 cd = doc.createCDATASection(message)
2780 skipelem = doc.createElement('skipped')
2780 skipelem = doc.createElement('skipped')
2781 skipelem.appendChild(cd)
2781 skipelem.appendChild(cd)
2782 t.appendChild(skipelem)
2782 t.appendChild(skipelem)
2783 s.appendChild(t)
2783 s.appendChild(t)
2784 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2784 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2785
2785
2786 @staticmethod
2786 @staticmethod
2787 def _writejson(result, outf):
2787 def _writejson(result, outf):
2788 timesd = {}
2788 timesd = {}
2789 for tdata in result.times:
2789 for tdata in result.times:
2790 test = tdata[0]
2790 test = tdata[0]
2791 timesd[test] = tdata[1:]
2791 timesd[test] = tdata[1:]
2792
2792
2793 outcome = {}
2793 outcome = {}
2794 groups = [
2794 groups = [
2795 ('success', ((tc, None) for tc in result.successes)),
2795 ('success', ((tc, None) for tc in result.successes)),
2796 ('failure', result.failures),
2796 ('failure', result.failures),
2797 ('skip', result.skipped),
2797 ('skip', result.skipped),
2798 ]
2798 ]
2799 for res, testcases in groups:
2799 for res, testcases in groups:
2800 for tc, __ in testcases:
2800 for tc, __ in testcases:
2801 if tc.name in timesd:
2801 if tc.name in timesd:
2802 diff = result.faildata.get(tc.name, b'')
2802 diff = result.faildata.get(tc.name, b'')
2803 try:
2803 try:
2804 diff = diff.decode('unicode_escape')
2804 diff = diff.decode('unicode_escape')
2805 except UnicodeDecodeError as e:
2805 except UnicodeDecodeError as e:
2806 diff = '%r decoding diff, sorry' % e
2806 diff = '%r decoding diff, sorry' % e
2807 tres = {
2807 tres = {
2808 'result': res,
2808 'result': res,
2809 'time': ('%0.3f' % timesd[tc.name][2]),
2809 'time': ('%0.3f' % timesd[tc.name][2]),
2810 'cuser': ('%0.3f' % timesd[tc.name][0]),
2810 'cuser': ('%0.3f' % timesd[tc.name][0]),
2811 'csys': ('%0.3f' % timesd[tc.name][1]),
2811 'csys': ('%0.3f' % timesd[tc.name][1]),
2812 'start': ('%0.3f' % timesd[tc.name][3]),
2812 'start': ('%0.3f' % timesd[tc.name][3]),
2813 'end': ('%0.3f' % timesd[tc.name][4]),
2813 'end': ('%0.3f' % timesd[tc.name][4]),
2814 'diff': diff,
2814 'diff': diff,
2815 }
2815 }
2816 else:
2816 else:
2817 # blacklisted test
2817 # blacklisted test
2818 tres = {'result': res}
2818 tres = {'result': res}
2819
2819
2820 outcome[tc.name] = tres
2820 outcome[tc.name] = tres
2821 jsonout = json.dumps(
2821 jsonout = json.dumps(
2822 outcome, sort_keys=True, indent=4, separators=(',', ': ')
2822 outcome, sort_keys=True, indent=4, separators=(',', ': ')
2823 )
2823 )
2824 outf.writelines(("testreport =", jsonout))
2824 outf.writelines(("testreport =", jsonout))
2825
2825
2826
2826
2827 def sorttests(testdescs, previoustimes, shuffle=False):
2827 def sorttests(testdescs, previoustimes, shuffle=False):
2828 """Do an in-place sort of tests."""
2828 """Do an in-place sort of tests."""
2829 if shuffle:
2829 if shuffle:
2830 random.shuffle(testdescs)
2830 random.shuffle(testdescs)
2831 return
2831 return
2832
2832
2833 if previoustimes:
2833 if previoustimes:
2834
2834
2835 def sortkey(f):
2835 def sortkey(f):
2836 f = f['path']
2836 f = f['path']
2837 if f in previoustimes:
2837 if f in previoustimes:
2838 # Use most recent time as estimate
2838 # Use most recent time as estimate
2839 return -(previoustimes[f][-1])
2839 return -(previoustimes[f][-1])
2840 else:
2840 else:
2841 # Default to a rather arbitrary value of 1 second for new tests
2841 # Default to a rather arbitrary value of 1 second for new tests
2842 return -1.0
2842 return -1.0
2843
2843
2844 else:
2844 else:
2845 # keywords for slow tests
2845 # keywords for slow tests
2846 slow = {
2846 slow = {
2847 b'svn': 10,
2847 b'svn': 10,
2848 b'cvs': 10,
2848 b'cvs': 10,
2849 b'hghave': 10,
2849 b'hghave': 10,
2850 b'largefiles-update': 10,
2850 b'largefiles-update': 10,
2851 b'run-tests': 10,
2851 b'run-tests': 10,
2852 b'corruption': 10,
2852 b'corruption': 10,
2853 b'race': 10,
2853 b'race': 10,
2854 b'i18n': 10,
2854 b'i18n': 10,
2855 b'check': 100,
2855 b'check': 100,
2856 b'gendoc': 100,
2856 b'gendoc': 100,
2857 b'contrib-perf': 200,
2857 b'contrib-perf': 200,
2858 b'merge-combination': 100,
2858 b'merge-combination': 100,
2859 }
2859 }
2860 perf = {}
2860 perf = {}
2861
2861
2862 def sortkey(f):
2862 def sortkey(f):
2863 # run largest tests first, as they tend to take the longest
2863 # run largest tests first, as they tend to take the longest
2864 f = f['path']
2864 f = f['path']
2865 try:
2865 try:
2866 return perf[f]
2866 return perf[f]
2867 except KeyError:
2867 except KeyError:
2868 try:
2868 try:
2869 val = -os.stat(f).st_size
2869 val = -os.stat(f).st_size
2870 except OSError as e:
2870 except OSError as e:
2871 if e.errno != errno.ENOENT:
2871 if e.errno != errno.ENOENT:
2872 raise
2872 raise
2873 perf[f] = -1e9 # file does not exist, tell early
2873 perf[f] = -1e9 # file does not exist, tell early
2874 return -1e9
2874 return -1e9
2875 for kw, mul in slow.items():
2875 for kw, mul in slow.items():
2876 if kw in f:
2876 if kw in f:
2877 val *= mul
2877 val *= mul
2878 if f.endswith(b'.py'):
2878 if f.endswith(b'.py'):
2879 val /= 10.0
2879 val /= 10.0
2880 perf[f] = val / 1000.0
2880 perf[f] = val / 1000.0
2881 return perf[f]
2881 return perf[f]
2882
2882
2883 testdescs.sort(key=sortkey)
2883 testdescs.sort(key=sortkey)
2884
2884
2885
2885
2886 class TestRunner(object):
2886 class TestRunner(object):
2887 """Holds context for executing tests.
2887 """Holds context for executing tests.
2888
2888
2889 Tests rely on a lot of state. This object holds it for them.
2889 Tests rely on a lot of state. This object holds it for them.
2890 """
2890 """
2891
2891
2892 # Programs required to run tests.
2892 # Programs required to run tests.
2893 REQUIREDTOOLS = [
2893 REQUIREDTOOLS = [
2894 b'diff',
2894 b'diff',
2895 b'grep',
2895 b'grep',
2896 b'unzip',
2896 b'unzip',
2897 b'gunzip',
2897 b'gunzip',
2898 b'bunzip2',
2898 b'bunzip2',
2899 b'sed',
2899 b'sed',
2900 ]
2900 ]
2901
2901
2902 # Maps file extensions to test class.
2902 # Maps file extensions to test class.
2903 TESTTYPES = [
2903 TESTTYPES = [
2904 (b'.py', PythonTest),
2904 (b'.py', PythonTest),
2905 (b'.t', TTest),
2905 (b'.t', TTest),
2906 ]
2906 ]
2907
2907
2908 def __init__(self):
2908 def __init__(self):
2909 self.options = None
2909 self.options = None
2910 self._hgroot = None
2910 self._hgroot = None
2911 self._testdir = None
2911 self._testdir = None
2912 self._outputdir = None
2912 self._outputdir = None
2913 self._hgtmp = None
2913 self._hgtmp = None
2914 self._installdir = None
2914 self._installdir = None
2915 self._bindir = None
2915 self._bindir = None
2916 self._tmpbinddir = None
2916 self._tmpbinddir = None
2917 self._pythondir = None
2917 self._pythondir = None
2918 self._coveragefile = None
2918 self._coveragefile = None
2919 self._createdfiles = []
2919 self._createdfiles = []
2920 self._hgcommand = None
2920 self._hgcommand = None
2921 self._hgpath = None
2921 self._hgpath = None
2922 self._portoffset = 0
2922 self._portoffset = 0
2923 self._ports = {}
2923 self._ports = {}
2924
2924
2925 def run(self, args, parser=None):
2925 def run(self, args, parser=None):
2926 """Run the test suite."""
2926 """Run the test suite."""
2927 oldmask = os.umask(0o22)
2927 oldmask = os.umask(0o22)
2928 try:
2928 try:
2929 parser = parser or getparser()
2929 parser = parser or getparser()
2930 options = parseargs(args, parser)
2930 options = parseargs(args, parser)
2931 tests = [_sys2bytes(a) for a in options.tests]
2931 tests = [_sys2bytes(a) for a in options.tests]
2932 if options.test_list is not None:
2932 if options.test_list is not None:
2933 for listfile in options.test_list:
2933 for listfile in options.test_list:
2934 with open(listfile, 'rb') as f:
2934 with open(listfile, 'rb') as f:
2935 tests.extend(t for t in f.read().splitlines() if t)
2935 tests.extend(t for t in f.read().splitlines() if t)
2936 self.options = options
2936 self.options = options
2937
2937
2938 self._checktools()
2938 self._checktools()
2939 testdescs = self.findtests(tests)
2939 testdescs = self.findtests(tests)
2940 if options.profile_runner:
2940 if options.profile_runner:
2941 import statprof
2941 import statprof
2942
2942
2943 statprof.start()
2943 statprof.start()
2944 result = self._run(testdescs)
2944 result = self._run(testdescs)
2945 if options.profile_runner:
2945 if options.profile_runner:
2946 statprof.stop()
2946 statprof.stop()
2947 statprof.display()
2947 statprof.display()
2948 return result
2948 return result
2949
2949
2950 finally:
2950 finally:
2951 os.umask(oldmask)
2951 os.umask(oldmask)
2952
2952
2953 def _run(self, testdescs):
2953 def _run(self, testdescs):
2954 testdir = getcwdb()
2954 testdir = getcwdb()
2955 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
2955 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
2956 # assume all tests in same folder for now
2956 # assume all tests in same folder for now
2957 if testdescs:
2957 if testdescs:
2958 pathname = os.path.dirname(testdescs[0]['path'])
2958 pathname = os.path.dirname(testdescs[0]['path'])
2959 if pathname:
2959 if pathname:
2960 testdir = os.path.join(testdir, pathname)
2960 testdir = os.path.join(testdir, pathname)
2961 self._testdir = osenvironb[b'TESTDIR'] = testdir
2961 self._testdir = osenvironb[b'TESTDIR'] = testdir
2962 if self.options.outputdir:
2962 if self.options.outputdir:
2963 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
2963 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
2964 else:
2964 else:
2965 self._outputdir = getcwdb()
2965 self._outputdir = getcwdb()
2966 if testdescs and pathname:
2966 if testdescs and pathname:
2967 self._outputdir = os.path.join(self._outputdir, pathname)
2967 self._outputdir = os.path.join(self._outputdir, pathname)
2968 previoustimes = {}
2968 previoustimes = {}
2969 if self.options.order_by_runtime:
2969 if self.options.order_by_runtime:
2970 previoustimes = dict(loadtimes(self._outputdir))
2970 previoustimes = dict(loadtimes(self._outputdir))
2971 sorttests(testdescs, previoustimes, shuffle=self.options.random)
2971 sorttests(testdescs, previoustimes, shuffle=self.options.random)
2972
2972
2973 if 'PYTHONHASHSEED' not in os.environ:
2973 if 'PYTHONHASHSEED' not in os.environ:
2974 # use a random python hash seed all the time
2974 # use a random python hash seed all the time
2975 # we do the randomness ourself to know what seed is used
2975 # we do the randomness ourself to know what seed is used
2976 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2976 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2977
2977
2978 if self.options.tmpdir:
2978 if self.options.tmpdir:
2979 self.options.keep_tmpdir = True
2979 self.options.keep_tmpdir = True
2980 tmpdir = _sys2bytes(self.options.tmpdir)
2980 tmpdir = _sys2bytes(self.options.tmpdir)
2981 if os.path.exists(tmpdir):
2981 if os.path.exists(tmpdir):
2982 # Meaning of tmpdir has changed since 1.3: we used to create
2982 # Meaning of tmpdir has changed since 1.3: we used to create
2983 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2983 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2984 # tmpdir already exists.
2984 # tmpdir already exists.
2985 print("error: temp dir %r already exists" % tmpdir)
2985 print("error: temp dir %r already exists" % tmpdir)
2986 return 1
2986 return 1
2987
2987
2988 os.makedirs(tmpdir)
2988 os.makedirs(tmpdir)
2989 else:
2989 else:
2990 d = None
2990 d = None
2991 if os.name == 'nt':
2991 if os.name == 'nt':
2992 # without this, we get the default temp dir location, but
2992 # without this, we get the default temp dir location, but
2993 # in all lowercase, which causes troubles with paths (issue3490)
2993 # in all lowercase, which causes troubles with paths (issue3490)
2994 d = osenvironb.get(b'TMP', None)
2994 d = osenvironb.get(b'TMP', None)
2995 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2995 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2996
2996
2997 self._hgtmp = osenvironb[b'HGTMP'] = os.path.realpath(tmpdir)
2997 self._hgtmp = osenvironb[b'HGTMP'] = os.path.realpath(tmpdir)
2998
2998
2999 if self.options.with_hg:
2999 if self.options.with_hg:
3000 self._installdir = None
3000 self._installdir = None
3001 whg = self.options.with_hg
3001 whg = self.options.with_hg
3002 self._bindir = os.path.dirname(os.path.realpath(whg))
3002 self._bindir = os.path.dirname(os.path.realpath(whg))
3003 assert isinstance(self._bindir, bytes)
3003 assert isinstance(self._bindir, bytes)
3004 self._hgcommand = os.path.basename(whg)
3004 self._hgcommand = os.path.basename(whg)
3005 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
3005 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
3006 os.makedirs(self._tmpbindir)
3006 os.makedirs(self._tmpbindir)
3007
3007
3008 normbin = os.path.normpath(os.path.abspath(whg))
3008 normbin = os.path.normpath(os.path.abspath(whg))
3009 normbin = normbin.replace(_sys2bytes(os.sep), b'/')
3009 normbin = normbin.replace(_sys2bytes(os.sep), b'/')
3010
3010
3011 # Other Python scripts in the test harness need to
3011 # Other Python scripts in the test harness need to
3012 # `import mercurial`. If `hg` is a Python script, we assume
3012 # `import mercurial`. If `hg` is a Python script, we assume
3013 # the Mercurial modules are relative to its path and tell the tests
3013 # the Mercurial modules are relative to its path and tell the tests
3014 # to load Python modules from its directory.
3014 # to load Python modules from its directory.
3015 with open(whg, 'rb') as fh:
3015 with open(whg, 'rb') as fh:
3016 initial = fh.read(1024)
3016 initial = fh.read(1024)
3017
3017
3018 if re.match(b'#!.*python', initial):
3018 if re.match(b'#!.*python', initial):
3019 self._pythondir = self._bindir
3019 self._pythondir = self._bindir
3020 # If it looks like our in-repo Rust binary, use the source root.
3020 # If it looks like our in-repo Rust binary, use the source root.
3021 # This is a bit hacky. But rhg is still not supported outside the
3021 # This is a bit hacky. But rhg is still not supported outside the
3022 # source directory. So until it is, do the simple thing.
3022 # source directory. So until it is, do the simple thing.
3023 elif re.search(b'/rust/target/[^/]+/hg', normbin):
3023 elif re.search(b'/rust/target/[^/]+/hg', normbin):
3024 self._pythondir = os.path.dirname(self._testdir)
3024 self._pythondir = os.path.dirname(self._testdir)
3025 # Fall back to the legacy behavior.
3025 # Fall back to the legacy behavior.
3026 else:
3026 else:
3027 self._pythondir = self._bindir
3027 self._pythondir = self._bindir
3028
3028
3029 else:
3029 else:
3030 self._installdir = os.path.join(self._hgtmp, b"install")
3030 self._installdir = os.path.join(self._hgtmp, b"install")
3031 self._bindir = os.path.join(self._installdir, b"bin")
3031 self._bindir = os.path.join(self._installdir, b"bin")
3032 self._hgcommand = b'hg'
3032 self._hgcommand = b'hg'
3033 self._tmpbindir = self._bindir
3033 self._tmpbindir = self._bindir
3034 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
3034 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
3035
3035
3036 # Force the use of hg.exe instead of relying on MSYS to recognize hg is
3036 # Force the use of hg.exe instead of relying on MSYS to recognize hg is
3037 # a python script and feed it to python.exe. Legacy stdio is force
3037 # a python script and feed it to python.exe. Legacy stdio is force
3038 # enabled by hg.exe, and this is a more realistic way to launch hg
3038 # enabled by hg.exe, and this is a more realistic way to launch hg
3039 # anyway.
3039 # anyway.
3040 if os.name == 'nt' and not self._hgcommand.endswith(b'.exe'):
3040 if os.name == 'nt' and not self._hgcommand.endswith(b'.exe'):
3041 self._hgcommand += b'.exe'
3041 self._hgcommand += b'.exe'
3042
3042
3043 # set CHGHG, then replace "hg" command by "chg"
3043 # set CHGHG, then replace "hg" command by "chg"
3044 chgbindir = self._bindir
3044 chgbindir = self._bindir
3045 if self.options.chg or self.options.with_chg:
3045 if self.options.chg or self.options.with_chg:
3046 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
3046 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
3047 else:
3047 else:
3048 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
3048 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
3049 if self.options.chg:
3049 if self.options.chg:
3050 self._hgcommand = b'chg'
3050 self._hgcommand = b'chg'
3051 elif self.options.with_chg:
3051 elif self.options.with_chg:
3052 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
3052 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
3053 self._hgcommand = os.path.basename(self.options.with_chg)
3053 self._hgcommand = os.path.basename(self.options.with_chg)
3054
3054
3055 osenvironb[b"BINDIR"] = self._bindir
3055 osenvironb[b"BINDIR"] = self._bindir
3056 osenvironb[b"PYTHON"] = PYTHON
3056 osenvironb[b"PYTHON"] = PYTHON
3057
3057
3058 fileb = _sys2bytes(__file__)
3058 fileb = _sys2bytes(__file__)
3059 runtestdir = os.path.abspath(os.path.dirname(fileb))
3059 runtestdir = os.path.abspath(os.path.dirname(fileb))
3060 osenvironb[b'RUNTESTDIR'] = runtestdir
3060 osenvironb[b'RUNTESTDIR'] = runtestdir
3061 if PYTHON3:
3061 if PYTHON3:
3062 sepb = _sys2bytes(os.pathsep)
3062 sepb = _sys2bytes(os.pathsep)
3063 else:
3063 else:
3064 sepb = os.pathsep
3064 sepb = os.pathsep
3065 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
3065 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
3066 if os.path.islink(__file__):
3066 if os.path.islink(__file__):
3067 # test helper will likely be at the end of the symlink
3067 # test helper will likely be at the end of the symlink
3068 realfile = os.path.realpath(fileb)
3068 realfile = os.path.realpath(fileb)
3069 realdir = os.path.abspath(os.path.dirname(realfile))
3069 realdir = os.path.abspath(os.path.dirname(realfile))
3070 path.insert(2, realdir)
3070 path.insert(2, realdir)
3071 if chgbindir != self._bindir:
3071 if chgbindir != self._bindir:
3072 path.insert(1, chgbindir)
3072 path.insert(1, chgbindir)
3073 if self._testdir != runtestdir:
3073 if self._testdir != runtestdir:
3074 path = [self._testdir] + path
3074 path = [self._testdir] + path
3075 if self._tmpbindir != self._bindir:
3075 if self._tmpbindir != self._bindir:
3076 path = [self._tmpbindir] + path
3076 path = [self._tmpbindir] + path
3077 osenvironb[b"PATH"] = sepb.join(path)
3077 osenvironb[b"PATH"] = sepb.join(path)
3078
3078
3079 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
3079 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
3080 # can run .../tests/run-tests.py test-foo where test-foo
3080 # can run .../tests/run-tests.py test-foo where test-foo
3081 # adds an extension to HGRC. Also include run-test.py directory to
3081 # adds an extension to HGRC. Also include run-test.py directory to
3082 # import modules like heredoctest.
3082 # import modules like heredoctest.
3083 pypath = [self._pythondir, self._testdir, runtestdir]
3083 pypath = [self._pythondir, self._testdir, runtestdir]
3084 # We have to augment PYTHONPATH, rather than simply replacing
3084 # We have to augment PYTHONPATH, rather than simply replacing
3085 # it, in case external libraries are only available via current
3085 # it, in case external libraries are only available via current
3086 # PYTHONPATH. (In particular, the Subversion bindings on OS X
3086 # PYTHONPATH. (In particular, the Subversion bindings on OS X
3087 # are in /opt/subversion.)
3087 # are in /opt/subversion.)
3088 oldpypath = osenvironb.get(IMPL_PATH)
3088 oldpypath = osenvironb.get(IMPL_PATH)
3089 if oldpypath:
3089 if oldpypath:
3090 pypath.append(oldpypath)
3090 pypath.append(oldpypath)
3091 osenvironb[IMPL_PATH] = sepb.join(pypath)
3091 osenvironb[IMPL_PATH] = sepb.join(pypath)
3092
3092
3093 if self.options.pure:
3093 if self.options.pure:
3094 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
3094 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
3095 os.environ["HGMODULEPOLICY"] = "py"
3095 os.environ["HGMODULEPOLICY"] = "py"
3096
3096
3097 if self.options.allow_slow_tests:
3097 if self.options.allow_slow_tests:
3098 os.environ["HGTEST_SLOW"] = "slow"
3098 os.environ["HGTEST_SLOW"] = "slow"
3099 elif 'HGTEST_SLOW' in os.environ:
3099 elif 'HGTEST_SLOW' in os.environ:
3100 del os.environ['HGTEST_SLOW']
3100 del os.environ['HGTEST_SLOW']
3101
3101
3102 self._coveragefile = os.path.join(self._testdir, b'.coverage')
3102 self._coveragefile = os.path.join(self._testdir, b'.coverage')
3103
3103
3104 if self.options.exceptions:
3104 if self.options.exceptions:
3105 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
3105 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
3106 try:
3106 try:
3107 os.makedirs(exceptionsdir)
3107 os.makedirs(exceptionsdir)
3108 except OSError as e:
3108 except OSError as e:
3109 if e.errno != errno.EEXIST:
3109 if e.errno != errno.EEXIST:
3110 raise
3110 raise
3111
3111
3112 # Remove all existing exception reports.
3112 # Remove all existing exception reports.
3113 for f in os.listdir(exceptionsdir):
3113 for f in os.listdir(exceptionsdir):
3114 os.unlink(os.path.join(exceptionsdir, f))
3114 os.unlink(os.path.join(exceptionsdir, f))
3115
3115
3116 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
3116 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
3117 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
3117 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
3118 self.options.extra_config_opt.append(
3118 self.options.extra_config_opt.append(
3119 'extensions.logexceptions=%s' % logexceptions.decode('utf-8')
3119 'extensions.logexceptions=%s' % logexceptions.decode('utf-8')
3120 )
3120 )
3121
3121
3122 vlog("# Using TESTDIR", _bytes2sys(self._testdir))
3122 vlog("# Using TESTDIR", _bytes2sys(self._testdir))
3123 vlog("# Using RUNTESTDIR", _bytes2sys(osenvironb[b'RUNTESTDIR']))
3123 vlog("# Using RUNTESTDIR", _bytes2sys(osenvironb[b'RUNTESTDIR']))
3124 vlog("# Using HGTMP", _bytes2sys(self._hgtmp))
3124 vlog("# Using HGTMP", _bytes2sys(self._hgtmp))
3125 vlog("# Using PATH", os.environ["PATH"])
3125 vlog("# Using PATH", os.environ["PATH"])
3126 vlog(
3126 vlog(
3127 "# Using", _bytes2sys(IMPL_PATH), _bytes2sys(osenvironb[IMPL_PATH]),
3127 "# Using", _bytes2sys(IMPL_PATH), _bytes2sys(osenvironb[IMPL_PATH]),
3128 )
3128 )
3129 vlog("# Writing to directory", _bytes2sys(self._outputdir))
3129 vlog("# Writing to directory", _bytes2sys(self._outputdir))
3130
3130
3131 try:
3131 try:
3132 return self._runtests(testdescs) or 0
3132 return self._runtests(testdescs) or 0
3133 finally:
3133 finally:
3134 time.sleep(0.1)
3134 time.sleep(0.1)
3135 self._cleanup()
3135 self._cleanup()
3136
3136
3137 def findtests(self, args):
3137 def findtests(self, args):
3138 """Finds possible test files from arguments.
3138 """Finds possible test files from arguments.
3139
3139
3140 If you wish to inject custom tests into the test harness, this would
3140 If you wish to inject custom tests into the test harness, this would
3141 be a good function to monkeypatch or override in a derived class.
3141 be a good function to monkeypatch or override in a derived class.
3142 """
3142 """
3143 if not args:
3143 if not args:
3144 if self.options.changed:
3144 if self.options.changed:
3145 proc = Popen4(
3145 proc = Popen4(
3146 b'hg st --rev "%s" -man0 .'
3146 b'hg st --rev "%s" -man0 .'
3147 % _sys2bytes(self.options.changed),
3147 % _sys2bytes(self.options.changed),
3148 None,
3148 None,
3149 0,
3149 0,
3150 )
3150 )
3151 stdout, stderr = proc.communicate()
3151 stdout, stderr = proc.communicate()
3152 args = stdout.strip(b'\0').split(b'\0')
3152 args = stdout.strip(b'\0').split(b'\0')
3153 else:
3153 else:
3154 args = os.listdir(b'.')
3154 args = os.listdir(b'.')
3155
3155
3156 expanded_args = []
3156 expanded_args = []
3157 for arg in args:
3157 for arg in args:
3158 if os.path.isdir(arg):
3158 if os.path.isdir(arg):
3159 if not arg.endswith(b'/'):
3159 if not arg.endswith(b'/'):
3160 arg += b'/'
3160 arg += b'/'
3161 expanded_args.extend([arg + a for a in os.listdir(arg)])
3161 expanded_args.extend([arg + a for a in os.listdir(arg)])
3162 else:
3162 else:
3163 expanded_args.append(arg)
3163 expanded_args.append(arg)
3164 args = expanded_args
3164 args = expanded_args
3165
3165
3166 testcasepattern = re.compile(br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-.#]+))')
3166 testcasepattern = re.compile(br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-.#]+))')
3167 tests = []
3167 tests = []
3168 for t in args:
3168 for t in args:
3169 case = []
3169 case = []
3170
3170
3171 if not (
3171 if not (
3172 os.path.basename(t).startswith(b'test-')
3172 os.path.basename(t).startswith(b'test-')
3173 and (t.endswith(b'.py') or t.endswith(b'.t'))
3173 and (t.endswith(b'.py') or t.endswith(b'.t'))
3174 ):
3174 ):
3175
3175
3176 m = testcasepattern.match(os.path.basename(t))
3176 m = testcasepattern.match(os.path.basename(t))
3177 if m is not None:
3177 if m is not None:
3178 t_basename, casestr = m.groups()
3178 t_basename, casestr = m.groups()
3179 t = os.path.join(os.path.dirname(t), t_basename)
3179 t = os.path.join(os.path.dirname(t), t_basename)
3180 if casestr:
3180 if casestr:
3181 case = casestr.split(b'#')
3181 case = casestr.split(b'#')
3182 else:
3182 else:
3183 continue
3183 continue
3184
3184
3185 if t.endswith(b'.t'):
3185 if t.endswith(b'.t'):
3186 # .t file may contain multiple test cases
3186 # .t file may contain multiple test cases
3187 casedimensions = parsettestcases(t)
3187 casedimensions = parsettestcases(t)
3188 if casedimensions:
3188 if casedimensions:
3189 cases = []
3189 cases = []
3190
3190
3191 def addcases(case, casedimensions):
3191 def addcases(case, casedimensions):
3192 if not casedimensions:
3192 if not casedimensions:
3193 cases.append(case)
3193 cases.append(case)
3194 else:
3194 else:
3195 for c in casedimensions[0]:
3195 for c in casedimensions[0]:
3196 addcases(case + [c], casedimensions[1:])
3196 addcases(case + [c], casedimensions[1:])
3197
3197
3198 addcases([], casedimensions)
3198 addcases([], casedimensions)
3199 if case and case in cases:
3199 if case and case in cases:
3200 cases = [case]
3200 cases = [case]
3201 elif case:
3201 elif case:
3202 # Ignore invalid cases
3202 # Ignore invalid cases
3203 cases = []
3203 cases = []
3204 else:
3204 else:
3205 pass
3205 pass
3206 tests += [{'path': t, 'case': c} for c in sorted(cases)]
3206 tests += [{'path': t, 'case': c} for c in sorted(cases)]
3207 else:
3207 else:
3208 tests.append({'path': t})
3208 tests.append({'path': t})
3209 else:
3209 else:
3210 tests.append({'path': t})
3210 tests.append({'path': t})
3211 return tests
3211 return tests
3212
3212
3213 def _runtests(self, testdescs):
3213 def _runtests(self, testdescs):
3214 def _reloadtest(test, i):
3214 def _reloadtest(test, i):
3215 # convert a test back to its description dict
3215 # convert a test back to its description dict
3216 desc = {'path': test.path}
3216 desc = {'path': test.path}
3217 case = getattr(test, '_case', [])
3217 case = getattr(test, '_case', [])
3218 if case:
3218 if case:
3219 desc['case'] = case
3219 desc['case'] = case
3220 return self._gettest(desc, i)
3220 return self._gettest(desc, i)
3221
3221
3222 try:
3222 try:
3223 if self.options.restart:
3223 if self.options.restart:
3224 orig = list(testdescs)
3224 orig = list(testdescs)
3225 while testdescs:
3225 while testdescs:
3226 desc = testdescs[0]
3226 desc = testdescs[0]
3227 # desc['path'] is a relative path
3227 # desc['path'] is a relative path
3228 if 'case' in desc:
3228 if 'case' in desc:
3229 casestr = b'#'.join(desc['case'])
3229 casestr = b'#'.join(desc['case'])
3230 errpath = b'%s#%s.err' % (desc['path'], casestr)
3230 errpath = b'%s#%s.err' % (desc['path'], casestr)
3231 else:
3231 else:
3232 errpath = b'%s.err' % desc['path']
3232 errpath = b'%s.err' % desc['path']
3233 errpath = os.path.join(self._outputdir, errpath)
3233 errpath = os.path.join(self._outputdir, errpath)
3234 if os.path.exists(errpath):
3234 if os.path.exists(errpath):
3235 break
3235 break
3236 testdescs.pop(0)
3236 testdescs.pop(0)
3237 if not testdescs:
3237 if not testdescs:
3238 print("running all tests")
3238 print("running all tests")
3239 testdescs = orig
3239 testdescs = orig
3240
3240
3241 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
3241 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
3242 num_tests = len(tests) * self.options.runs_per_test
3242 num_tests = len(tests) * self.options.runs_per_test
3243
3243
3244 jobs = min(num_tests, self.options.jobs)
3244 jobs = min(num_tests, self.options.jobs)
3245
3245
3246 failed = False
3246 failed = False
3247 kws = self.options.keywords
3247 kws = self.options.keywords
3248 if kws is not None and PYTHON3:
3248 if kws is not None and PYTHON3:
3249 kws = kws.encode('utf-8')
3249 kws = kws.encode('utf-8')
3250
3250
3251 suite = TestSuite(
3251 suite = TestSuite(
3252 self._testdir,
3252 self._testdir,
3253 jobs=jobs,
3253 jobs=jobs,
3254 whitelist=self.options.whitelisted,
3254 whitelist=self.options.whitelisted,
3255 blacklist=self.options.blacklist,
3255 blacklist=self.options.blacklist,
3256 retest=self.options.retest,
3256 retest=self.options.retest,
3257 keywords=kws,
3257 keywords=kws,
3258 loop=self.options.loop,
3258 loop=self.options.loop,
3259 runs_per_test=self.options.runs_per_test,
3259 runs_per_test=self.options.runs_per_test,
3260 showchannels=self.options.showchannels,
3260 showchannels=self.options.showchannels,
3261 tests=tests,
3261 tests=tests,
3262 loadtest=_reloadtest,
3262 loadtest=_reloadtest,
3263 )
3263 )
3264 verbosity = 1
3264 verbosity = 1
3265 if self.options.list_tests:
3265 if self.options.list_tests:
3266 verbosity = 0
3266 verbosity = 0
3267 elif self.options.verbose:
3267 elif self.options.verbose:
3268 verbosity = 2
3268 verbosity = 2
3269 runner = TextTestRunner(self, verbosity=verbosity)
3269 runner = TextTestRunner(self, verbosity=verbosity)
3270
3270
3271 if self.options.list_tests:
3271 if self.options.list_tests:
3272 result = runner.listtests(suite)
3272 result = runner.listtests(suite)
3273 else:
3273 else:
3274 if self._installdir:
3274 if self._installdir:
3275 self._installhg()
3275 self._installhg()
3276 self._checkhglib("Testing")
3276 self._checkhglib("Testing")
3277 else:
3277 else:
3278 self._usecorrectpython()
3278 self._usecorrectpython()
3279 if self.options.chg:
3279 if self.options.chg:
3280 assert self._installdir
3280 assert self._installdir
3281 self._installchg()
3281 self._installchg()
3282
3282
3283 log(
3283 log(
3284 'running %d tests using %d parallel processes'
3284 'running %d tests using %d parallel processes'
3285 % (num_tests, jobs)
3285 % (num_tests, jobs)
3286 )
3286 )
3287
3287
3288 result = runner.run(suite)
3288 result = runner.run(suite)
3289
3289
3290 if result.failures or result.errors:
3290 if result.failures or result.errors:
3291 failed = True
3291 failed = True
3292
3292
3293 result.onEnd()
3293 result.onEnd()
3294
3294
3295 if self.options.anycoverage:
3295 if self.options.anycoverage:
3296 self._outputcoverage()
3296 self._outputcoverage()
3297 except KeyboardInterrupt:
3297 except KeyboardInterrupt:
3298 failed = True
3298 failed = True
3299 print("\ninterrupted!")
3299 print("\ninterrupted!")
3300
3300
3301 if failed:
3301 if failed:
3302 return 1
3302 return 1
3303
3303
3304 def _getport(self, count):
3304 def _getport(self, count):
3305 port = self._ports.get(count) # do we have a cached entry?
3305 port = self._ports.get(count) # do we have a cached entry?
3306 if port is None:
3306 if port is None:
3307 portneeded = 3
3307 portneeded = 3
3308 # above 100 tries we just give up and let test reports failure
3308 # above 100 tries we just give up and let test reports failure
3309 for tries in xrange(100):
3309 for tries in xrange(100):
3310 allfree = True
3310 allfree = True
3311 port = self.options.port + self._portoffset
3311 port = self.options.port + self._portoffset
3312 for idx in xrange(portneeded):
3312 for idx in xrange(portneeded):
3313 if not checkportisavailable(port + idx):
3313 if not checkportisavailable(port + idx):
3314 allfree = False
3314 allfree = False
3315 break
3315 break
3316 self._portoffset += portneeded
3316 self._portoffset += portneeded
3317 if allfree:
3317 if allfree:
3318 break
3318 break
3319 self._ports[count] = port
3319 self._ports[count] = port
3320 return port
3320 return port
3321
3321
3322 def _gettest(self, testdesc, count):
3322 def _gettest(self, testdesc, count):
3323 """Obtain a Test by looking at its filename.
3323 """Obtain a Test by looking at its filename.
3324
3324
3325 Returns a Test instance. The Test may not be runnable if it doesn't
3325 Returns a Test instance. The Test may not be runnable if it doesn't
3326 map to a known type.
3326 map to a known type.
3327 """
3327 """
3328 path = testdesc['path']
3328 path = testdesc['path']
3329 lctest = path.lower()
3329 lctest = path.lower()
3330 testcls = Test
3330 testcls = Test
3331
3331
3332 for ext, cls in self.TESTTYPES:
3332 for ext, cls in self.TESTTYPES:
3333 if lctest.endswith(ext):
3333 if lctest.endswith(ext):
3334 testcls = cls
3334 testcls = cls
3335 break
3335 break
3336
3336
3337 refpath = os.path.join(getcwdb(), path)
3337 refpath = os.path.join(getcwdb(), path)
3338 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
3338 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
3339
3339
3340 # extra keyword parameters. 'case' is used by .t tests
3340 # extra keyword parameters. 'case' is used by .t tests
3341 kwds = {k: testdesc[k] for k in ['case'] if k in testdesc}
3341 kwds = {k: testdesc[k] for k in ['case'] if k in testdesc}
3342
3342
3343 t = testcls(
3343 t = testcls(
3344 refpath,
3344 refpath,
3345 self._outputdir,
3345 self._outputdir,
3346 tmpdir,
3346 tmpdir,
3347 keeptmpdir=self.options.keep_tmpdir,
3347 keeptmpdir=self.options.keep_tmpdir,
3348 debug=self.options.debug,
3348 debug=self.options.debug,
3349 first=self.options.first,
3349 first=self.options.first,
3350 timeout=self.options.timeout,
3350 timeout=self.options.timeout,
3351 startport=self._getport(count),
3351 startport=self._getport(count),
3352 extraconfigopts=self.options.extra_config_opt,
3352 extraconfigopts=self.options.extra_config_opt,
3353 shell=self.options.shell,
3353 shell=self.options.shell,
3354 hgcommand=self._hgcommand,
3354 hgcommand=self._hgcommand,
3355 usechg=bool(self.options.with_chg or self.options.chg),
3355 usechg=bool(self.options.with_chg or self.options.chg),
3356 useipv6=useipv6,
3356 useipv6=useipv6,
3357 **kwds
3357 **kwds
3358 )
3358 )
3359 t.should_reload = True
3359 t.should_reload = True
3360 return t
3360 return t
3361
3361
3362 def _cleanup(self):
3362 def _cleanup(self):
3363 """Clean up state from this test invocation."""
3363 """Clean up state from this test invocation."""
3364 if self.options.keep_tmpdir:
3364 if self.options.keep_tmpdir:
3365 return
3365 return
3366
3366
3367 vlog("# Cleaning up HGTMP", _bytes2sys(self._hgtmp))
3367 vlog("# Cleaning up HGTMP", _bytes2sys(self._hgtmp))
3368 shutil.rmtree(self._hgtmp, True)
3368 shutil.rmtree(self._hgtmp, True)
3369 for f in self._createdfiles:
3369 for f in self._createdfiles:
3370 try:
3370 try:
3371 os.remove(f)
3371 os.remove(f)
3372 except OSError:
3372 except OSError:
3373 pass
3373 pass
3374
3374
3375 def _usecorrectpython(self):
3375 def _usecorrectpython(self):
3376 """Configure the environment to use the appropriate Python in tests."""
3376 """Configure the environment to use the appropriate Python in tests."""
3377 # Tests must use the same interpreter as us or bad things will happen.
3377 # Tests must use the same interpreter as us or bad things will happen.
3378 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
3378 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
3379
3379
3380 # os.symlink() is a thing with py3 on Windows, but it requires
3380 # os.symlink() is a thing with py3 on Windows, but it requires
3381 # Administrator rights.
3381 # Administrator rights.
3382 if getattr(os, 'symlink', None) and os.name != 'nt':
3382 if getattr(os, 'symlink', None) and os.name != 'nt':
3383 vlog(
3383 vlog(
3384 "# Making python executable in test path a symlink to '%s'"
3384 "# Making python executable in test path a symlink to '%s'"
3385 % sysexecutable
3385 % sysexecutable
3386 )
3386 )
3387 mypython = os.path.join(self._tmpbindir, pyexename)
3387 mypython = os.path.join(self._tmpbindir, pyexename)
3388 try:
3388 try:
3389 if os.readlink(mypython) == sysexecutable:
3389 if os.readlink(mypython) == sysexecutable:
3390 return
3390 return
3391 os.unlink(mypython)
3391 os.unlink(mypython)
3392 except OSError as err:
3392 except OSError as err:
3393 if err.errno != errno.ENOENT:
3393 if err.errno != errno.ENOENT:
3394 raise
3394 raise
3395 if self._findprogram(pyexename) != sysexecutable:
3395 if self._findprogram(pyexename) != sysexecutable:
3396 try:
3396 try:
3397 os.symlink(sysexecutable, mypython)
3397 os.symlink(sysexecutable, mypython)
3398 self._createdfiles.append(mypython)
3398 self._createdfiles.append(mypython)
3399 except OSError as err:
3399 except OSError as err:
3400 # child processes may race, which is harmless
3400 # child processes may race, which is harmless
3401 if err.errno != errno.EEXIST:
3401 if err.errno != errno.EEXIST:
3402 raise
3402 raise
3403 else:
3403 else:
3404 exedir, exename = os.path.split(sysexecutable)
3404 exedir, exename = os.path.split(sysexecutable)
3405 vlog(
3405 vlog(
3406 "# Modifying search path to find %s as %s in '%s'"
3406 "# Modifying search path to find %s as %s in '%s'"
3407 % (exename, pyexename, exedir)
3407 % (exename, pyexename, exedir)
3408 )
3408 )
3409 path = os.environ['PATH'].split(os.pathsep)
3409 path = os.environ['PATH'].split(os.pathsep)
3410 while exedir in path:
3410 while exedir in path:
3411 path.remove(exedir)
3411 path.remove(exedir)
3412 os.environ['PATH'] = os.pathsep.join([exedir] + path)
3412 os.environ['PATH'] = os.pathsep.join([exedir] + path)
3413 if not self._findprogram(pyexename):
3413 if not self._findprogram(pyexename):
3414 print("WARNING: Cannot find %s in search path" % pyexename)
3414 print("WARNING: Cannot find %s in search path" % pyexename)
3415
3415
3416 def _installhg(self):
3416 def _installhg(self):
3417 """Install hg into the test environment.
3417 """Install hg into the test environment.
3418
3418
3419 This will also configure hg with the appropriate testing settings.
3419 This will also configure hg with the appropriate testing settings.
3420 """
3420 """
3421 vlog("# Performing temporary installation of HG")
3421 vlog("# Performing temporary installation of HG")
3422 installerrs = os.path.join(self._hgtmp, b"install.err")
3422 installerrs = os.path.join(self._hgtmp, b"install.err")
3423 compiler = ''
3423 compiler = ''
3424 if self.options.compiler:
3424 if self.options.compiler:
3425 compiler = '--compiler ' + self.options.compiler
3425 compiler = '--compiler ' + self.options.compiler
3426 if self.options.pure:
3426 if self.options.pure:
3427 pure = b"--pure"
3427 pure = b"--pure"
3428 else:
3428 else:
3429 pure = b""
3429 pure = b""
3430
3430
3431 # Run installer in hg root
3431 # Run installer in hg root
3432 script = os.path.realpath(sys.argv[0])
3432 script = os.path.realpath(sys.argv[0])
3433 exe = sysexecutable
3433 exe = sysexecutable
3434 if PYTHON3:
3434 if PYTHON3:
3435 compiler = _sys2bytes(compiler)
3435 compiler = _sys2bytes(compiler)
3436 script = _sys2bytes(script)
3436 script = _sys2bytes(script)
3437 exe = _sys2bytes(exe)
3437 exe = _sys2bytes(exe)
3438 hgroot = os.path.dirname(os.path.dirname(script))
3438 hgroot = os.path.dirname(os.path.dirname(script))
3439 self._hgroot = hgroot
3439 self._hgroot = hgroot
3440 os.chdir(hgroot)
3440 os.chdir(hgroot)
3441 nohome = b'--home=""'
3441 nohome = b'--home=""'
3442 if os.name == 'nt':
3442 if os.name == 'nt':
3443 # The --home="" trick works only on OS where os.sep == '/'
3443 # The --home="" trick works only on OS where os.sep == '/'
3444 # because of a distutils convert_path() fast-path. Avoid it at
3444 # because of a distutils convert_path() fast-path. Avoid it at
3445 # least on Windows for now, deal with .pydistutils.cfg bugs
3445 # least on Windows for now, deal with .pydistutils.cfg bugs
3446 # when they happen.
3446 # when they happen.
3447 nohome = b''
3447 nohome = b''
3448 cmd = (
3448 cmd = (
3449 b'"%(exe)s" setup.py %(pure)s clean --all'
3449 b'"%(exe)s" setup.py %(pure)s clean --all'
3450 b' build %(compiler)s --build-base="%(base)s"'
3450 b' build %(compiler)s --build-base="%(base)s"'
3451 b' install --force --prefix="%(prefix)s"'
3451 b' install --force --prefix="%(prefix)s"'
3452 b' --install-lib="%(libdir)s"'
3452 b' --install-lib="%(libdir)s"'
3453 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
3453 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
3454 % {
3454 % {
3455 b'exe': exe,
3455 b'exe': exe,
3456 b'pure': pure,
3456 b'pure': pure,
3457 b'compiler': compiler,
3457 b'compiler': compiler,
3458 b'base': os.path.join(self._hgtmp, b"build"),
3458 b'base': os.path.join(self._hgtmp, b"build"),
3459 b'prefix': self._installdir,
3459 b'prefix': self._installdir,
3460 b'libdir': self._pythondir,
3460 b'libdir': self._pythondir,
3461 b'bindir': self._bindir,
3461 b'bindir': self._bindir,
3462 b'nohome': nohome,
3462 b'nohome': nohome,
3463 b'logfile': installerrs,
3463 b'logfile': installerrs,
3464 }
3464 }
3465 )
3465 )
3466
3466
3467 # setuptools requires install directories to exist.
3467 # setuptools requires install directories to exist.
3468 def makedirs(p):
3468 def makedirs(p):
3469 try:
3469 try:
3470 os.makedirs(p)
3470 os.makedirs(p)
3471 except OSError as e:
3471 except OSError as e:
3472 if e.errno != errno.EEXIST:
3472 if e.errno != errno.EEXIST:
3473 raise
3473 raise
3474
3474
3475 makedirs(self._pythondir)
3475 makedirs(self._pythondir)
3476 makedirs(self._bindir)
3476 makedirs(self._bindir)
3477
3477
3478 vlog("# Running", cmd.decode("utf-8"))
3478 vlog("# Running", cmd.decode("utf-8"))
3479 if subprocess.call(_bytes2sys(cmd), shell=True) == 0:
3479 if subprocess.call(_bytes2sys(cmd), shell=True) == 0:
3480 if not self.options.verbose:
3480 if not self.options.verbose:
3481 try:
3481 try:
3482 os.remove(installerrs)
3482 os.remove(installerrs)
3483 except OSError as e:
3483 except OSError as e:
3484 if e.errno != errno.ENOENT:
3484 if e.errno != errno.ENOENT:
3485 raise
3485 raise
3486 else:
3486 else:
3487 with open(installerrs, 'rb') as f:
3487 with open(installerrs, 'rb') as f:
3488 for line in f:
3488 for line in f:
3489 if PYTHON3:
3489 if PYTHON3:
3490 sys.stdout.buffer.write(line)
3490 sys.stdout.buffer.write(line)
3491 else:
3491 else:
3492 sys.stdout.write(line)
3492 sys.stdout.write(line)
3493 sys.exit(1)
3493 sys.exit(1)
3494 os.chdir(self._testdir)
3494 os.chdir(self._testdir)
3495
3495
3496 self._usecorrectpython()
3496 self._usecorrectpython()
3497
3497
3498 hgbat = os.path.join(self._bindir, b'hg.bat')
3498 hgbat = os.path.join(self._bindir, b'hg.bat')
3499 if os.path.isfile(hgbat):
3499 if os.path.isfile(hgbat):
3500 # hg.bat expects to be put in bin/scripts while run-tests.py
3500 # hg.bat expects to be put in bin/scripts while run-tests.py
3501 # installation layout put it in bin/ directly. Fix it
3501 # installation layout put it in bin/ directly. Fix it
3502 with open(hgbat, 'rb') as f:
3502 with open(hgbat, 'rb') as f:
3503 data = f.read()
3503 data = f.read()
3504 if br'"%~dp0..\python" "%~dp0hg" %*' in data:
3504 if br'"%~dp0..\python" "%~dp0hg" %*' in data:
3505 data = data.replace(
3505 data = data.replace(
3506 br'"%~dp0..\python" "%~dp0hg" %*',
3506 br'"%~dp0..\python" "%~dp0hg" %*',
3507 b'"%~dp0python" "%~dp0hg" %*',
3507 b'"%~dp0python" "%~dp0hg" %*',
3508 )
3508 )
3509 with open(hgbat, 'wb') as f:
3509 with open(hgbat, 'wb') as f:
3510 f.write(data)
3510 f.write(data)
3511 else:
3511 else:
3512 print('WARNING: cannot fix hg.bat reference to python.exe')
3512 print('WARNING: cannot fix hg.bat reference to python.exe')
3513
3513
3514 if self.options.anycoverage:
3514 if self.options.anycoverage:
3515 custom = os.path.join(
3515 custom = os.path.join(
3516 osenvironb[b'RUNTESTDIR'], b'sitecustomize.py'
3516 osenvironb[b'RUNTESTDIR'], b'sitecustomize.py'
3517 )
3517 )
3518 target = os.path.join(self._pythondir, b'sitecustomize.py')
3518 target = os.path.join(self._pythondir, b'sitecustomize.py')
3519 vlog('# Installing coverage trigger to %s' % target)
3519 vlog('# Installing coverage trigger to %s' % target)
3520 shutil.copyfile(custom, target)
3520 shutil.copyfile(custom, target)
3521 rc = os.path.join(self._testdir, b'.coveragerc')
3521 rc = os.path.join(self._testdir, b'.coveragerc')
3522 vlog('# Installing coverage rc to %s' % rc)
3522 vlog('# Installing coverage rc to %s' % rc)
3523 osenvironb[b'COVERAGE_PROCESS_START'] = rc
3523 osenvironb[b'COVERAGE_PROCESS_START'] = rc
3524 covdir = os.path.join(self._installdir, b'..', b'coverage')
3524 covdir = os.path.join(self._installdir, b'..', b'coverage')
3525 try:
3525 try:
3526 os.mkdir(covdir)
3526 os.mkdir(covdir)
3527 except OSError as e:
3527 except OSError as e:
3528 if e.errno != errno.EEXIST:
3528 if e.errno != errno.EEXIST:
3529 raise
3529 raise
3530
3530
3531 osenvironb[b'COVERAGE_DIR'] = covdir
3531 osenvironb[b'COVERAGE_DIR'] = covdir
3532
3532
3533 def _checkhglib(self, verb):
3533 def _checkhglib(self, verb):
3534 """Ensure that the 'mercurial' package imported by python is
3534 """Ensure that the 'mercurial' package imported by python is
3535 the one we expect it to be. If not, print a warning to stderr."""
3535 the one we expect it to be. If not, print a warning to stderr."""
3536 if (self._bindir == self._pythondir) and (
3536 if (self._bindir == self._pythondir) and (
3537 self._bindir != self._tmpbindir
3537 self._bindir != self._tmpbindir
3538 ):
3538 ):
3539 # The pythondir has been inferred from --with-hg flag.
3539 # The pythondir has been inferred from --with-hg flag.
3540 # We cannot expect anything sensible here.
3540 # We cannot expect anything sensible here.
3541 return
3541 return
3542 expecthg = os.path.join(self._pythondir, b'mercurial')
3542 expecthg = os.path.join(self._pythondir, b'mercurial')
3543 actualhg = self._gethgpath()
3543 actualhg = self._gethgpath()
3544 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3544 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3545 sys.stderr.write(
3545 sys.stderr.write(
3546 'warning: %s with unexpected mercurial lib: %s\n'
3546 'warning: %s with unexpected mercurial lib: %s\n'
3547 ' (expected %s)\n' % (verb, actualhg, expecthg)
3547 ' (expected %s)\n' % (verb, actualhg, expecthg)
3548 )
3548 )
3549
3549
3550 def _gethgpath(self):
3550 def _gethgpath(self):
3551 """Return the path to the mercurial package that is actually found by
3551 """Return the path to the mercurial package that is actually found by
3552 the current Python interpreter."""
3552 the current Python interpreter."""
3553 if self._hgpath is not None:
3553 if self._hgpath is not None:
3554 return self._hgpath
3554 return self._hgpath
3555
3555
3556 cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
3556 cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
3557 cmd = cmd % PYTHON
3557 cmd = cmd % PYTHON
3558 if PYTHON3:
3558 if PYTHON3:
3559 cmd = _bytes2sys(cmd)
3559 cmd = _bytes2sys(cmd)
3560
3560
3561 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
3561 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
3562 out, err = p.communicate()
3562 out, err = p.communicate()
3563
3563
3564 self._hgpath = out.strip()
3564 self._hgpath = out.strip()
3565
3565
3566 return self._hgpath
3566 return self._hgpath
3567
3567
3568 def _installchg(self):
3568 def _installchg(self):
3569 """Install chg into the test environment"""
3569 """Install chg into the test environment"""
3570 vlog('# Performing temporary installation of CHG')
3570 vlog('# Performing temporary installation of CHG')
3571 assert os.path.dirname(self._bindir) == self._installdir
3571 assert os.path.dirname(self._bindir) == self._installdir
3572 assert self._hgroot, 'must be called after _installhg()'
3572 assert self._hgroot, 'must be called after _installhg()'
3573 cmd = b'"%(make)s" clean install PREFIX="%(prefix)s"' % {
3573 cmd = b'"%(make)s" clean install PREFIX="%(prefix)s"' % {
3574 b'make': b'make', # TODO: switch by option or environment?
3574 b'make': b'make', # TODO: switch by option or environment?
3575 b'prefix': self._installdir,
3575 b'prefix': self._installdir,
3576 }
3576 }
3577 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3577 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3578 vlog("# Running", cmd)
3578 vlog("# Running", cmd)
3579 proc = subprocess.Popen(
3579 proc = subprocess.Popen(
3580 cmd,
3580 cmd,
3581 shell=True,
3581 shell=True,
3582 cwd=cwd,
3582 cwd=cwd,
3583 stdin=subprocess.PIPE,
3583 stdin=subprocess.PIPE,
3584 stdout=subprocess.PIPE,
3584 stdout=subprocess.PIPE,
3585 stderr=subprocess.STDOUT,
3585 stderr=subprocess.STDOUT,
3586 )
3586 )
3587 out, _err = proc.communicate()
3587 out, _err = proc.communicate()
3588 if proc.returncode != 0:
3588 if proc.returncode != 0:
3589 if PYTHON3:
3589 if PYTHON3:
3590 sys.stdout.buffer.write(out)
3590 sys.stdout.buffer.write(out)
3591 else:
3591 else:
3592 sys.stdout.write(out)
3592 sys.stdout.write(out)
3593 sys.exit(1)
3593 sys.exit(1)
3594
3594
3595 def _outputcoverage(self):
3595 def _outputcoverage(self):
3596 """Produce code coverage output."""
3596 """Produce code coverage output."""
3597 import coverage
3597 import coverage
3598
3598
3599 coverage = coverage.coverage
3599 coverage = coverage.coverage
3600
3600
3601 vlog('# Producing coverage report')
3601 vlog('# Producing coverage report')
3602 # chdir is the easiest way to get short, relative paths in the
3602 # chdir is the easiest way to get short, relative paths in the
3603 # output.
3603 # output.
3604 os.chdir(self._hgroot)
3604 os.chdir(self._hgroot)
3605 covdir = os.path.join(_bytes2sys(self._installdir), '..', 'coverage')
3605 covdir = os.path.join(_bytes2sys(self._installdir), '..', 'coverage')
3606 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3606 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3607
3607
3608 # Map install directory paths back to source directory.
3608 # Map install directory paths back to source directory.
3609 cov.config.paths['srcdir'] = ['.', _bytes2sys(self._pythondir)]
3609 cov.config.paths['srcdir'] = ['.', _bytes2sys(self._pythondir)]
3610
3610
3611 cov.combine()
3611 cov.combine()
3612
3612
3613 omit = [
3613 omit = [
3614 _bytes2sys(os.path.join(x, b'*'))
3614 _bytes2sys(os.path.join(x, b'*'))
3615 for x in [self._bindir, self._testdir]
3615 for x in [self._bindir, self._testdir]
3616 ]
3616 ]
3617 cov.report(ignore_errors=True, omit=omit)
3617 cov.report(ignore_errors=True, omit=omit)
3618
3618
3619 if self.options.htmlcov:
3619 if self.options.htmlcov:
3620 htmldir = os.path.join(_bytes2sys(self._outputdir), 'htmlcov')
3620 htmldir = os.path.join(_bytes2sys(self._outputdir), 'htmlcov')
3621 cov.html_report(directory=htmldir, omit=omit)
3621 cov.html_report(directory=htmldir, omit=omit)
3622 if self.options.annotate:
3622 if self.options.annotate:
3623 adir = os.path.join(_bytes2sys(self._outputdir), 'annotated')
3623 adir = os.path.join(_bytes2sys(self._outputdir), 'annotated')
3624 if not os.path.isdir(adir):
3624 if not os.path.isdir(adir):
3625 os.mkdir(adir)
3625 os.mkdir(adir)
3626 cov.annotate(directory=adir, omit=omit)
3626 cov.annotate(directory=adir, omit=omit)
3627
3627
3628 def _findprogram(self, program):
3628 def _findprogram(self, program):
3629 """Search PATH for a executable program"""
3629 """Search PATH for a executable program"""
3630 dpb = _sys2bytes(os.defpath)
3630 dpb = _sys2bytes(os.defpath)
3631 sepb = _sys2bytes(os.pathsep)
3631 sepb = _sys2bytes(os.pathsep)
3632 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3632 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3633 name = os.path.join(p, program)
3633 name = os.path.join(p, program)
3634 if os.name == 'nt' or os.access(name, os.X_OK):
3634 if os.name == 'nt' or os.access(name, os.X_OK):
3635 return name
3635 return name
3636 return None
3636 return None
3637
3637
3638 def _checktools(self):
3638 def _checktools(self):
3639 """Ensure tools required to run tests are present."""
3639 """Ensure tools required to run tests are present."""
3640 for p in self.REQUIREDTOOLS:
3640 for p in self.REQUIREDTOOLS:
3641 if os.name == 'nt' and not p.endswith(b'.exe'):
3641 if os.name == 'nt' and not p.endswith(b'.exe'):
3642 p += b'.exe'
3642 p += b'.exe'
3643 found = self._findprogram(p)
3643 found = self._findprogram(p)
3644 p = p.decode("utf-8")
3644 p = p.decode("utf-8")
3645 if found:
3645 if found:
3646 vlog("# Found prerequisite", p, "at", _bytes2sys(found))
3646 vlog("# Found prerequisite", p, "at", _bytes2sys(found))
3647 else:
3647 else:
3648 print("WARNING: Did not find prerequisite tool: %s " % p)
3648 print("WARNING: Did not find prerequisite tool: %s " % p)
3649
3649
3650
3650
3651 def aggregateexceptions(path):
3651 def aggregateexceptions(path):
3652 exceptioncounts = collections.Counter()
3652 exceptioncounts = collections.Counter()
3653 testsbyfailure = collections.defaultdict(set)
3653 testsbyfailure = collections.defaultdict(set)
3654 failuresbytest = collections.defaultdict(set)
3654 failuresbytest = collections.defaultdict(set)
3655
3655
3656 for f in os.listdir(path):
3656 for f in os.listdir(path):
3657 with open(os.path.join(path, f), 'rb') as fh:
3657 with open(os.path.join(path, f), 'rb') as fh:
3658 data = fh.read().split(b'\0')
3658 data = fh.read().split(b'\0')
3659 if len(data) != 5:
3659 if len(data) != 5:
3660 continue
3660 continue
3661
3661
3662 exc, mainframe, hgframe, hgline, testname = data
3662 exc, mainframe, hgframe, hgline, testname = data
3663 exc = exc.decode('utf-8')
3663 exc = exc.decode('utf-8')
3664 mainframe = mainframe.decode('utf-8')
3664 mainframe = mainframe.decode('utf-8')
3665 hgframe = hgframe.decode('utf-8')
3665 hgframe = hgframe.decode('utf-8')
3666 hgline = hgline.decode('utf-8')
3666 hgline = hgline.decode('utf-8')
3667 testname = testname.decode('utf-8')
3667 testname = testname.decode('utf-8')
3668
3668
3669 key = (hgframe, hgline, exc)
3669 key = (hgframe, hgline, exc)
3670 exceptioncounts[key] += 1
3670 exceptioncounts[key] += 1
3671 testsbyfailure[key].add(testname)
3671 testsbyfailure[key].add(testname)
3672 failuresbytest[testname].add(key)
3672 failuresbytest[testname].add(key)
3673
3673
3674 # Find test having fewest failures for each failure.
3674 # Find test having fewest failures for each failure.
3675 leastfailing = {}
3675 leastfailing = {}
3676 for key, tests in testsbyfailure.items():
3676 for key, tests in testsbyfailure.items():
3677 fewesttest = None
3677 fewesttest = None
3678 fewestcount = 99999999
3678 fewestcount = 99999999
3679 for test in sorted(tests):
3679 for test in sorted(tests):
3680 if len(failuresbytest[test]) < fewestcount:
3680 if len(failuresbytest[test]) < fewestcount:
3681 fewesttest = test
3681 fewesttest = test
3682 fewestcount = len(failuresbytest[test])
3682 fewestcount = len(failuresbytest[test])
3683
3683
3684 leastfailing[key] = (fewestcount, fewesttest)
3684 leastfailing[key] = (fewestcount, fewesttest)
3685
3685
3686 # Create a combined counter so we can sort by total occurrences and
3686 # Create a combined counter so we can sort by total occurrences and
3687 # impacted tests.
3687 # impacted tests.
3688 combined = {}
3688 combined = {}
3689 for key in exceptioncounts:
3689 for key in exceptioncounts:
3690 combined[key] = (
3690 combined[key] = (
3691 exceptioncounts[key],
3691 exceptioncounts[key],
3692 len(testsbyfailure[key]),
3692 len(testsbyfailure[key]),
3693 leastfailing[key][0],
3693 leastfailing[key][0],
3694 leastfailing[key][1],
3694 leastfailing[key][1],
3695 )
3695 )
3696
3696
3697 return {
3697 return {
3698 'exceptioncounts': exceptioncounts,
3698 'exceptioncounts': exceptioncounts,
3699 'total': sum(exceptioncounts.values()),
3699 'total': sum(exceptioncounts.values()),
3700 'combined': combined,
3700 'combined': combined,
3701 'leastfailing': leastfailing,
3701 'leastfailing': leastfailing,
3702 'byfailure': testsbyfailure,
3702 'byfailure': testsbyfailure,
3703 'bytest': failuresbytest,
3703 'bytest': failuresbytest,
3704 }
3704 }
3705
3705
3706
3706
3707 if __name__ == '__main__':
3707 if __name__ == '__main__':
3708 runner = TestRunner()
3708 runner = TestRunner()
3709
3709
3710 try:
3710 try:
3711 import msvcrt
3711 import msvcrt
3712
3712
3713 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3713 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3714 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3714 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3715 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3715 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3716 except ImportError:
3716 except ImportError:
3717 pass
3717 pass
3718
3718
3719 sys.exit(runner.run(sys.argv[1:]))
3719 sys.exit(runner.run(sys.argv[1:]))
@@ -1,394 +1,402 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 from __future__ import absolute_import, print_function
2 from __future__ import absolute_import, print_function
3
3
4 import hashlib
4 import hashlib
5 import os
5 import os
6 import random
6 import random
7 import shutil
7 import shutil
8 import stat
8 import stat
9 import struct
9 import struct
10 import sys
10 import sys
11 import tempfile
11 import tempfile
12 import time
12 import time
13 import unittest
13 import unittest
14
14
15 import silenttestrunner
15 import silenttestrunner
16
16
17 # Load the local remotefilelog, not the system one
17 # Load the local remotefilelog, not the system one
18 sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')]
18 sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')]
19 from mercurial.node import nullid
19 from mercurial.node import nullid
20 from mercurial import policy
21
22 if not policy._packageprefs.get(policy.policy, (False, False))[1]:
23 if __name__ == '__main__':
24 msg = "skipped: pure module not available with module policy:"
25 print(msg, policy.policy, file=sys.stderr)
26 sys.exit(80)
27
20 from mercurial import (
28 from mercurial import (
21 pycompat,
29 pycompat,
22 ui as uimod,
30 ui as uimod,
23 )
31 )
24 from hgext.remotefilelog import (
32 from hgext.remotefilelog import (
25 basepack,
33 basepack,
26 constants,
34 constants,
27 datapack,
35 datapack,
28 )
36 )
29
37
30
38
31 class datapacktestsbase(object):
39 class datapacktestsbase(object):
32 def __init__(self, datapackreader, paramsavailable):
40 def __init__(self, datapackreader, paramsavailable):
33 self.datapackreader = datapackreader
41 self.datapackreader = datapackreader
34 self.paramsavailable = paramsavailable
42 self.paramsavailable = paramsavailable
35
43
36 def setUp(self):
44 def setUp(self):
37 self.tempdirs = []
45 self.tempdirs = []
38
46
39 def tearDown(self):
47 def tearDown(self):
40 for d in self.tempdirs:
48 for d in self.tempdirs:
41 shutil.rmtree(d)
49 shutil.rmtree(d)
42
50
43 def makeTempDir(self):
51 def makeTempDir(self):
44 tempdir = pycompat.bytestr(tempfile.mkdtemp())
52 tempdir = pycompat.bytestr(tempfile.mkdtemp())
45 self.tempdirs.append(tempdir)
53 self.tempdirs.append(tempdir)
46 return tempdir
54 return tempdir
47
55
48 def getHash(self, content):
56 def getHash(self, content):
49 return hashlib.sha1(content).digest()
57 return hashlib.sha1(content).digest()
50
58
51 def getFakeHash(self):
59 def getFakeHash(self):
52 return b''.join(
60 return b''.join(
53 pycompat.bytechr(random.randint(0, 255)) for _ in range(20)
61 pycompat.bytechr(random.randint(0, 255)) for _ in range(20)
54 )
62 )
55
63
56 def createPack(self, revisions=None, packdir=None):
64 def createPack(self, revisions=None, packdir=None):
57 if revisions is None:
65 if revisions is None:
58 revisions = [(b"filename", self.getFakeHash(), nullid, b"content")]
66 revisions = [(b"filename", self.getFakeHash(), nullid, b"content")]
59
67
60 if packdir is None:
68 if packdir is None:
61 packdir = self.makeTempDir()
69 packdir = self.makeTempDir()
62
70
63 packer = datapack.mutabledatapack(uimod.ui(), packdir, version=2)
71 packer = datapack.mutabledatapack(uimod.ui(), packdir, version=2)
64
72
65 for args in revisions:
73 for args in revisions:
66 filename, node, base, content = args[0:4]
74 filename, node, base, content = args[0:4]
67 # meta is optional
75 # meta is optional
68 meta = None
76 meta = None
69 if len(args) > 4:
77 if len(args) > 4:
70 meta = args[4]
78 meta = args[4]
71 packer.add(filename, node, base, content, metadata=meta)
79 packer.add(filename, node, base, content, metadata=meta)
72
80
73 path = packer.close()
81 path = packer.close()
74 return self.datapackreader(path)
82 return self.datapackreader(path)
75
83
76 def _testAddSingle(self, content):
84 def _testAddSingle(self, content):
77 """Test putting a simple blob into a pack and reading it out.
85 """Test putting a simple blob into a pack and reading it out.
78 """
86 """
79 filename = b"foo"
87 filename = b"foo"
80 node = self.getHash(content)
88 node = self.getHash(content)
81
89
82 revisions = [(filename, node, nullid, content)]
90 revisions = [(filename, node, nullid, content)]
83 pack = self.createPack(revisions)
91 pack = self.createPack(revisions)
84 if self.paramsavailable:
92 if self.paramsavailable:
85 self.assertEqual(
93 self.assertEqual(
86 pack.params.fanoutprefix, basepack.SMALLFANOUTPREFIX
94 pack.params.fanoutprefix, basepack.SMALLFANOUTPREFIX
87 )
95 )
88
96
89 chain = pack.getdeltachain(filename, node)
97 chain = pack.getdeltachain(filename, node)
90 self.assertEqual(content, chain[0][4])
98 self.assertEqual(content, chain[0][4])
91
99
92 def testAddSingle(self):
100 def testAddSingle(self):
93 self._testAddSingle(b'')
101 self._testAddSingle(b'')
94
102
95 def testAddSingleEmpty(self):
103 def testAddSingleEmpty(self):
96 self._testAddSingle(b'abcdef')
104 self._testAddSingle(b'abcdef')
97
105
98 def testAddMultiple(self):
106 def testAddMultiple(self):
99 """Test putting multiple unrelated blobs into a pack and reading them
107 """Test putting multiple unrelated blobs into a pack and reading them
100 out.
108 out.
101 """
109 """
102 revisions = []
110 revisions = []
103 for i in range(10):
111 for i in range(10):
104 filename = b"foo%d" % i
112 filename = b"foo%d" % i
105 content = b"abcdef%d" % i
113 content = b"abcdef%d" % i
106 node = self.getHash(content)
114 node = self.getHash(content)
107 revisions.append((filename, node, self.getFakeHash(), content))
115 revisions.append((filename, node, self.getFakeHash(), content))
108
116
109 pack = self.createPack(revisions)
117 pack = self.createPack(revisions)
110
118
111 for filename, node, base, content in revisions:
119 for filename, node, base, content in revisions:
112 entry = pack.getdelta(filename, node)
120 entry = pack.getdelta(filename, node)
113 self.assertEqual((content, filename, base, {}), entry)
121 self.assertEqual((content, filename, base, {}), entry)
114
122
115 chain = pack.getdeltachain(filename, node)
123 chain = pack.getdeltachain(filename, node)
116 self.assertEqual(content, chain[0][4])
124 self.assertEqual(content, chain[0][4])
117
125
118 def testAddDeltas(self):
126 def testAddDeltas(self):
119 """Test putting multiple delta blobs into a pack and read the chain.
127 """Test putting multiple delta blobs into a pack and read the chain.
120 """
128 """
121 revisions = []
129 revisions = []
122 filename = b"foo"
130 filename = b"foo"
123 lastnode = nullid
131 lastnode = nullid
124 for i in range(10):
132 for i in range(10):
125 content = b"abcdef%d" % i
133 content = b"abcdef%d" % i
126 node = self.getHash(content)
134 node = self.getHash(content)
127 revisions.append((filename, node, lastnode, content))
135 revisions.append((filename, node, lastnode, content))
128 lastnode = node
136 lastnode = node
129
137
130 pack = self.createPack(revisions)
138 pack = self.createPack(revisions)
131
139
132 entry = pack.getdelta(filename, revisions[0][1])
140 entry = pack.getdelta(filename, revisions[0][1])
133 realvalue = (revisions[0][3], filename, revisions[0][2], {})
141 realvalue = (revisions[0][3], filename, revisions[0][2], {})
134 self.assertEqual(entry, realvalue)
142 self.assertEqual(entry, realvalue)
135
143
136 # Test that the chain for the final entry has all the others
144 # Test that the chain for the final entry has all the others
137 chain = pack.getdeltachain(filename, node)
145 chain = pack.getdeltachain(filename, node)
138 for i in range(10):
146 for i in range(10):
139 content = b"abcdef%d" % i
147 content = b"abcdef%d" % i
140 self.assertEqual(content, chain[-i - 1][4])
148 self.assertEqual(content, chain[-i - 1][4])
141
149
142 def testPackMany(self):
150 def testPackMany(self):
143 """Pack many related and unrelated objects.
151 """Pack many related and unrelated objects.
144 """
152 """
145 # Build a random pack file
153 # Build a random pack file
146 revisions = []
154 revisions = []
147 blobs = {}
155 blobs = {}
148 random.seed(0)
156 random.seed(0)
149 for i in range(100):
157 for i in range(100):
150 filename = b"filename-%d" % i
158 filename = b"filename-%d" % i
151 filerevs = []
159 filerevs = []
152 for j in range(random.randint(1, 100)):
160 for j in range(random.randint(1, 100)):
153 content = b"content-%d" % j
161 content = b"content-%d" % j
154 node = self.getHash(content)
162 node = self.getHash(content)
155 lastnode = nullid
163 lastnode = nullid
156 if len(filerevs) > 0:
164 if len(filerevs) > 0:
157 lastnode = filerevs[random.randint(0, len(filerevs) - 1)]
165 lastnode = filerevs[random.randint(0, len(filerevs) - 1)]
158 filerevs.append(node)
166 filerevs.append(node)
159 blobs[(filename, node, lastnode)] = content
167 blobs[(filename, node, lastnode)] = content
160 revisions.append((filename, node, lastnode, content))
168 revisions.append((filename, node, lastnode, content))
161
169
162 pack = self.createPack(revisions)
170 pack = self.createPack(revisions)
163
171
164 # Verify the pack contents
172 # Verify the pack contents
165 for (filename, node, lastnode), content in sorted(blobs.items()):
173 for (filename, node, lastnode), content in sorted(blobs.items()):
166 chain = pack.getdeltachain(filename, node)
174 chain = pack.getdeltachain(filename, node)
167 for entry in chain:
175 for entry in chain:
168 expectedcontent = blobs[(entry[0], entry[1], entry[3])]
176 expectedcontent = blobs[(entry[0], entry[1], entry[3])]
169 self.assertEqual(entry[4], expectedcontent)
177 self.assertEqual(entry[4], expectedcontent)
170
178
171 def testPackMetadata(self):
179 def testPackMetadata(self):
172 revisions = []
180 revisions = []
173 for i in range(100):
181 for i in range(100):
174 filename = b'%d.txt' % i
182 filename = b'%d.txt' % i
175 content = b'put-something-here \n' * i
183 content = b'put-something-here \n' * i
176 node = self.getHash(content)
184 node = self.getHash(content)
177 meta = {
185 meta = {
178 constants.METAKEYFLAG: i ** 4,
186 constants.METAKEYFLAG: i ** 4,
179 constants.METAKEYSIZE: len(content),
187 constants.METAKEYSIZE: len(content),
180 b'Z': b'random_string',
188 b'Z': b'random_string',
181 b'_': b'\0' * i,
189 b'_': b'\0' * i,
182 }
190 }
183 revisions.append((filename, node, nullid, content, meta))
191 revisions.append((filename, node, nullid, content, meta))
184 pack = self.createPack(revisions)
192 pack = self.createPack(revisions)
185 for name, node, x, content, origmeta in revisions:
193 for name, node, x, content, origmeta in revisions:
186 parsedmeta = pack.getmeta(name, node)
194 parsedmeta = pack.getmeta(name, node)
187 # flag == 0 should be optimized out
195 # flag == 0 should be optimized out
188 if origmeta[constants.METAKEYFLAG] == 0:
196 if origmeta[constants.METAKEYFLAG] == 0:
189 del origmeta[constants.METAKEYFLAG]
197 del origmeta[constants.METAKEYFLAG]
190 self.assertEqual(parsedmeta, origmeta)
198 self.assertEqual(parsedmeta, origmeta)
191
199
192 def testGetMissing(self):
200 def testGetMissing(self):
193 """Test the getmissing() api.
201 """Test the getmissing() api.
194 """
202 """
195 revisions = []
203 revisions = []
196 filename = b"foo"
204 filename = b"foo"
197 lastnode = nullid
205 lastnode = nullid
198 for i in range(10):
206 for i in range(10):
199 content = b"abcdef%d" % i
207 content = b"abcdef%d" % i
200 node = self.getHash(content)
208 node = self.getHash(content)
201 revisions.append((filename, node, lastnode, content))
209 revisions.append((filename, node, lastnode, content))
202 lastnode = node
210 lastnode = node
203
211
204 pack = self.createPack(revisions)
212 pack = self.createPack(revisions)
205
213
206 missing = pack.getmissing([(b"foo", revisions[0][1])])
214 missing = pack.getmissing([(b"foo", revisions[0][1])])
207 self.assertFalse(missing)
215 self.assertFalse(missing)
208
216
209 missing = pack.getmissing(
217 missing = pack.getmissing(
210 [(b"foo", revisions[0][1]), (b"foo", revisions[1][1])]
218 [(b"foo", revisions[0][1]), (b"foo", revisions[1][1])]
211 )
219 )
212 self.assertFalse(missing)
220 self.assertFalse(missing)
213
221
214 fakenode = self.getFakeHash()
222 fakenode = self.getFakeHash()
215 missing = pack.getmissing(
223 missing = pack.getmissing(
216 [(b"foo", revisions[0][1]), (b"foo", fakenode)]
224 [(b"foo", revisions[0][1]), (b"foo", fakenode)]
217 )
225 )
218 self.assertEqual(missing, [(b"foo", fakenode)])
226 self.assertEqual(missing, [(b"foo", fakenode)])
219
227
220 def testAddThrows(self):
228 def testAddThrows(self):
221 pack = self.createPack()
229 pack = self.createPack()
222
230
223 try:
231 try:
224 pack.add(b'filename', nullid, b'contents')
232 pack.add(b'filename', nullid, b'contents')
225 self.assertTrue(False, "datapack.add should throw")
233 self.assertTrue(False, "datapack.add should throw")
226 except RuntimeError:
234 except RuntimeError:
227 pass
235 pass
228
236
229 def testBadVersionThrows(self):
237 def testBadVersionThrows(self):
230 pack = self.createPack()
238 pack = self.createPack()
231 path = pack.path + b'.datapack'
239 path = pack.path + b'.datapack'
232 with open(path, 'rb') as f:
240 with open(path, 'rb') as f:
233 raw = f.read()
241 raw = f.read()
234 raw = struct.pack('!B', 255) + raw[1:]
242 raw = struct.pack('!B', 255) + raw[1:]
235 os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE)
243 os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE)
236 with open(path, 'wb+') as f:
244 with open(path, 'wb+') as f:
237 f.write(raw)
245 f.write(raw)
238
246
239 try:
247 try:
240 self.datapackreader(pack.path)
248 self.datapackreader(pack.path)
241 self.assertTrue(False, "bad version number should have thrown")
249 self.assertTrue(False, "bad version number should have thrown")
242 except RuntimeError:
250 except RuntimeError:
243 pass
251 pass
244
252
245 def testMissingDeltabase(self):
253 def testMissingDeltabase(self):
246 fakenode = self.getFakeHash()
254 fakenode = self.getFakeHash()
247 revisions = [(b"filename", fakenode, self.getFakeHash(), b"content")]
255 revisions = [(b"filename", fakenode, self.getFakeHash(), b"content")]
248 pack = self.createPack(revisions)
256 pack = self.createPack(revisions)
249 chain = pack.getdeltachain(b"filename", fakenode)
257 chain = pack.getdeltachain(b"filename", fakenode)
250 self.assertEqual(len(chain), 1)
258 self.assertEqual(len(chain), 1)
251
259
252 def testLargePack(self):
260 def testLargePack(self):
253 """Test creating and reading from a large pack with over X entries.
261 """Test creating and reading from a large pack with over X entries.
254 This causes it to use a 2^16 fanout table instead."""
262 This causes it to use a 2^16 fanout table instead."""
255 revisions = []
263 revisions = []
256 blobs = {}
264 blobs = {}
257 total = basepack.SMALLFANOUTCUTOFF + 1
265 total = basepack.SMALLFANOUTCUTOFF + 1
258 for i in pycompat.xrange(total):
266 for i in pycompat.xrange(total):
259 filename = b"filename-%d" % i
267 filename = b"filename-%d" % i
260 content = filename
268 content = filename
261 node = self.getHash(content)
269 node = self.getHash(content)
262 blobs[(filename, node)] = content
270 blobs[(filename, node)] = content
263 revisions.append((filename, node, nullid, content))
271 revisions.append((filename, node, nullid, content))
264
272
265 pack = self.createPack(revisions)
273 pack = self.createPack(revisions)
266 if self.paramsavailable:
274 if self.paramsavailable:
267 self.assertEqual(
275 self.assertEqual(
268 pack.params.fanoutprefix, basepack.LARGEFANOUTPREFIX
276 pack.params.fanoutprefix, basepack.LARGEFANOUTPREFIX
269 )
277 )
270
278
271 for (filename, node), content in blobs.items():
279 for (filename, node), content in blobs.items():
272 actualcontent = pack.getdeltachain(filename, node)[0][4]
280 actualcontent = pack.getdeltachain(filename, node)[0][4]
273 self.assertEqual(actualcontent, content)
281 self.assertEqual(actualcontent, content)
274
282
275 def testPacksCache(self):
283 def testPacksCache(self):
276 """Test that we remember the most recent packs while fetching the delta
284 """Test that we remember the most recent packs while fetching the delta
277 chain."""
285 chain."""
278
286
279 packdir = self.makeTempDir()
287 packdir = self.makeTempDir()
280 deltachains = []
288 deltachains = []
281
289
282 numpacks = 10
290 numpacks = 10
283 revisionsperpack = 100
291 revisionsperpack = 100
284
292
285 for i in range(numpacks):
293 for i in range(numpacks):
286 chain = []
294 chain = []
287 revision = (b'%d' % i, self.getFakeHash(), nullid, b"content")
295 revision = (b'%d' % i, self.getFakeHash(), nullid, b"content")
288
296
289 for _ in range(revisionsperpack):
297 for _ in range(revisionsperpack):
290 chain.append(revision)
298 chain.append(revision)
291 revision = (
299 revision = (
292 b'%d' % i,
300 b'%d' % i,
293 self.getFakeHash(),
301 self.getFakeHash(),
294 revision[1],
302 revision[1],
295 self.getFakeHash(),
303 self.getFakeHash(),
296 )
304 )
297
305
298 self.createPack(chain, packdir)
306 self.createPack(chain, packdir)
299 deltachains.append(chain)
307 deltachains.append(chain)
300
308
301 class testdatapackstore(datapack.datapackstore):
309 class testdatapackstore(datapack.datapackstore):
302 # Ensures that we are not keeping everything in the cache.
310 # Ensures that we are not keeping everything in the cache.
303 DEFAULTCACHESIZE = numpacks // 2
311 DEFAULTCACHESIZE = numpacks // 2
304
312
305 store = testdatapackstore(uimod.ui(), packdir)
313 store = testdatapackstore(uimod.ui(), packdir)
306
314
307 random.shuffle(deltachains)
315 random.shuffle(deltachains)
308 for randomchain in deltachains:
316 for randomchain in deltachains:
309 revision = random.choice(randomchain)
317 revision = random.choice(randomchain)
310 chain = store.getdeltachain(revision[0], revision[1])
318 chain = store.getdeltachain(revision[0], revision[1])
311
319
312 mostrecentpack = next(iter(store.packs), None)
320 mostrecentpack = next(iter(store.packs), None)
313 self.assertEqual(
321 self.assertEqual(
314 mostrecentpack.getdeltachain(revision[0], revision[1]), chain
322 mostrecentpack.getdeltachain(revision[0], revision[1]), chain
315 )
323 )
316
324
317 self.assertEqual(randomchain.index(revision) + 1, len(chain))
325 self.assertEqual(randomchain.index(revision) + 1, len(chain))
318
326
319 # perf test off by default since it's slow
327 # perf test off by default since it's slow
320 def _testIndexPerf(self):
328 def _testIndexPerf(self):
321 random.seed(0)
329 random.seed(0)
322 print("Multi-get perf test")
330 print("Multi-get perf test")
323 packsizes = [
331 packsizes = [
324 100,
332 100,
325 10000,
333 10000,
326 100000,
334 100000,
327 500000,
335 500000,
328 1000000,
336 1000000,
329 3000000,
337 3000000,
330 ]
338 ]
331 lookupsizes = [
339 lookupsizes = [
332 10,
340 10,
333 100,
341 100,
334 1000,
342 1000,
335 10000,
343 10000,
336 100000,
344 100000,
337 1000000,
345 1000000,
338 ]
346 ]
339 for packsize in packsizes:
347 for packsize in packsizes:
340 revisions = []
348 revisions = []
341 for i in pycompat.xrange(packsize):
349 for i in pycompat.xrange(packsize):
342 filename = b"filename-%d" % i
350 filename = b"filename-%d" % i
343 content = b"content-%d" % i
351 content = b"content-%d" % i
344 node = self.getHash(content)
352 node = self.getHash(content)
345 revisions.append((filename, node, nullid, content))
353 revisions.append((filename, node, nullid, content))
346
354
347 path = self.createPack(revisions).path
355 path = self.createPack(revisions).path
348
356
349 # Perf of large multi-get
357 # Perf of large multi-get
350 import gc
358 import gc
351
359
352 gc.disable()
360 gc.disable()
353 pack = self.datapackreader(path)
361 pack = self.datapackreader(path)
354 for lookupsize in lookupsizes:
362 for lookupsize in lookupsizes:
355 if lookupsize > packsize:
363 if lookupsize > packsize:
356 continue
364 continue
357 random.shuffle(revisions)
365 random.shuffle(revisions)
358 findnodes = [(rev[0], rev[1]) for rev in revisions]
366 findnodes = [(rev[0], rev[1]) for rev in revisions]
359
367
360 start = time.time()
368 start = time.time()
361 pack.getmissing(findnodes[:lookupsize])
369 pack.getmissing(findnodes[:lookupsize])
362 elapsed = time.time() - start
370 elapsed = time.time() - start
363 print(
371 print(
364 "%s pack %d lookups = %0.04f"
372 "%s pack %d lookups = %0.04f"
365 % (
373 % (
366 ('%d' % packsize).rjust(7),
374 ('%d' % packsize).rjust(7),
367 ('%d' % lookupsize).rjust(7),
375 ('%d' % lookupsize).rjust(7),
368 elapsed,
376 elapsed,
369 )
377 )
370 )
378 )
371
379
372 print("")
380 print("")
373 gc.enable()
381 gc.enable()
374
382
375 # The perf test is meant to produce output, so we always fail the test
383 # The perf test is meant to produce output, so we always fail the test
376 # so the user sees the output.
384 # so the user sees the output.
377 raise RuntimeError("perf test always fails")
385 raise RuntimeError("perf test always fails")
378
386
379
387
380 class datapacktests(datapacktestsbase, unittest.TestCase):
388 class datapacktests(datapacktestsbase, unittest.TestCase):
381 def __init__(self, *args, **kwargs):
389 def __init__(self, *args, **kwargs):
382 datapacktestsbase.__init__(self, datapack.datapack, True)
390 datapacktestsbase.__init__(self, datapack.datapack, True)
383 unittest.TestCase.__init__(self, *args, **kwargs)
391 unittest.TestCase.__init__(self, *args, **kwargs)
384
392
385
393
386 # TODO:
394 # TODO:
387 # datapack store:
395 # datapack store:
388 # - getmissing
396 # - getmissing
389 # - GC two packs into one
397 # - GC two packs into one
390
398
391 if __name__ == '__main__':
399 if __name__ == '__main__':
392 if pycompat.iswindows:
400 if pycompat.iswindows:
393 sys.exit(80) # Skip on Windows
401 sys.exit(80) # Skip on Windows
394 silenttestrunner.main(__name__)
402 silenttestrunner.main(__name__)
General Comments 0
You need to be logged in to leave comments. Login now