##// END OF EJS Templates
merge with stable
Martin von Zweigbergk -
r44960:a08bbdf8 merge default
parent child Browse files
Show More
@@ -0,0 +1,91 b''
1 ====================================
2 Testing head checking code: Case E-1
3 ====================================
4
5 Mercurial checks for the introduction of new heads on push. Evolution comes
6 into play to detect if existing branches on the server are being replaced by
7 some of the new one we push.
8
9 This case is part of a series of tests checking this behavior.
10
11 Category E: case involving changeset on multiple branch
12 TestCase 8: moving a branch to another location
13
14 .. old-state:
15 ..
16 .. * 1-changeset on branch default
17 .. * 1-changeset on branch Z (above default)
18 ..
19 .. new-state:
20 ..
21 .. * 1-changeset on branch default
22 .. * 1-changeset on branch Z (rebased away from A0)
23 ..
24 .. expected-result:
25 ..
26 .. * push allowed
27 ..
28 .. graph-summary:
29 ..
30 .. B ΓΈβ‡ β—” B'
31 .. | |
32 .. A β—” |
33 .. |/
34 .. ●
35
36 $ . $TESTDIR/testlib/push-checkheads-util.sh
37
38 Test setup
39 ----------
40
41 $ mkdir E1
42 $ cd E1
43 $ setuprepos
44 creating basic server and client repo
45 updating to branch default
46 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
47 $ cd client
48 $ hg branch Z
49 marked working directory as branch Z
50 (branches are permanent and global, did you want a bookmark?)
51 $ mkcommit B0
52 $ hg push --new-branch
53 pushing to $TESTTMP/E1/server
54 searching for changes
55 adding changesets
56 adding manifests
57 adding file changes
58 added 1 changesets with 1 changes to 1 files
59 $ hg up 0
60 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
61 $ hg branch --force Z
62 marked working directory as branch Z
63 $ mkcommit B1
64 created new head
65 $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"`
66 1 new obsolescence markers
67 obsoleted 1 changesets
68 $ hg log -G --hidden
69 @ c98b855401e7 (draft): B1
70 |
71 | x 93e5c1321ece (draft): B0
72 | |
73 | o 8aaa48160adc (draft): A0
74 |/
75 o 1e4be0697311 (public): root
76
77
78 Actual testing
79 --------------
80
81 $ hg push
82 pushing to $TESTTMP/E1/server
83 searching for changes
84 adding changesets
85 adding manifests
86 adding file changes
87 added 1 changesets with 1 changes to 1 files (+1 heads)
88 1 new obsolescence markers
89 obsoleted 1 changesets
90
91 $ cd ../..
@@ -0,0 +1,105 b''
1 ====================================
2 Testing head checking code: Case E-2
3 ====================================
4
5 Mercurial checks for the introduction of new heads on push. Evolution comes
6 into play to detect if existing branches on the server are being replaced by
7 some of the new one we push.
8
9 This case is part of a series of tests checking this behavior.
10
11 Category E: case involving changeset on multiple branch
12 TestCase 8: moving interleaved branch away from each other
13
14 .. old-state:
15 ..
16 .. * 2-changeset on branch default
17 .. * 1-changeset on branch Z (between the two other)
18 ..
19 .. new-state:
20 ..
21 .. * 2-changeset on branch default, aligned
22 .. * 1-changeset on branch Z (at the same location)
23 ..
24 .. expected-result:
25 ..
26 .. * push allowed
27 ..
28 .. graph-summary:
29 ..
30 .. C ΓΈβ‡ β—” C'
31 .. | |
32 .. B β—” |
33 .. | |
34 .. A ΓΈβ‡ β—” A'
35 .. |/
36 .. ●
37
38 $ . $TESTDIR/testlib/push-checkheads-util.sh
39
40 Test setup
41 ----------
42
43 $ mkdir E1
44 $ cd E1
45 $ setuprepos
46 creating basic server and client repo
47 updating to branch default
48 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
49 $ cd client
50 $ hg branch Z
51 marked working directory as branch Z
52 (branches are permanent and global, did you want a bookmark?)
53 $ mkcommit B0
54 $ hg branch default --force
55 marked working directory as branch default
56 $ mkcommit C0
57 created new head
58 $ hg push --new-branch
59 pushing to $TESTTMP/E1/server
60 searching for changes
61 adding changesets
62 adding manifests
63 adding file changes
64 added 2 changesets with 2 changes to 2 files
65 $ hg up 0
66 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
67 $ mkcommit A1
68 created new head
69 $ mkcommit C1
70 $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"`
71 1 new obsolescence markers
72 obsoleted 1 changesets
73 2 new orphan changesets
74 $ hg debugobsolete `getid "desc(C0)" ` `getid "desc(C1)"`
75 1 new obsolescence markers
76 obsoleted 1 changesets
77 $ hg log -G --hidden
78 @ 0c76bc104656 (draft): C1
79 |
80 o f6082bc4ffef (draft): A1
81 |
82 | x afc55ba2ce61 (draft): C0
83 | |
84 | * 93e5c1321ece (draft): B0
85 | |
86 | x 8aaa48160adc (draft): A0
87 |/
88 o 1e4be0697311 (public): root
89
90
91 Actual testing
92 --------------
93
94 $ hg push -r 'desc("C1")'
95 pushing to $TESTTMP/E1/server
96 searching for changes
97 adding changesets
98 adding manifests
99 adding file changes
100 added 2 changesets with 2 changes to 2 files (+1 heads)
101 2 new obsolescence markers
102 obsoleted 2 changesets
103 1 new orphan changesets
104
105 $ cd ../..
@@ -0,0 +1,94 b''
1 ====================================
2 Testing head checking code: Case E-3
3 ====================================
4
5 Mercurial checks for the introduction of new heads on push. Evolution comes
6 into play to detect if existing branches on the server are being replaced by
7 some of the new one we push.
8
9 This case is part of a series of tests checking this behavior.
10
11 Category E: case involving changeset on multiple branch
12 TestCase 8: moving only part of the interleaved branch away, creating 2 heads
13
14 .. old-state:
15 ..
16 .. * 2-changeset on branch default
17 .. * 1-changeset on branch Z (between the two other)
18 ..
19 .. new-state:
20 ..
21 .. * 2-changeset on branch default, on untouched, the other moved
22 .. * 1-changeset on branch Z (at the same location)
23 ..
24 .. expected-result:
25 ..
26 .. * push rejected
27 ..
28 .. graph-summary:
29 ..
30 .. C ΓΈβ‡ β—” C'
31 .. | |
32 .. B β—” |
33 .. | |
34 .. A β—” |
35 .. |/
36 .. ●
37
38 $ . $TESTDIR/testlib/push-checkheads-util.sh
39
40 Test setup
41 ----------
42
43 $ mkdir E1
44 $ cd E1
45 $ setuprepos
46 creating basic server and client repo
47 updating to branch default
48 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
49 $ cd client
50 $ hg branch Z
51 marked working directory as branch Z
52 (branches are permanent and global, did you want a bookmark?)
53 $ mkcommit B0
54 $ hg branch default --force
55 marked working directory as branch default
56 $ mkcommit C0
57 created new head
58 $ hg push --new-branch
59 pushing to $TESTTMP/E1/server
60 searching for changes
61 adding changesets
62 adding manifests
63 adding file changes
64 added 2 changesets with 2 changes to 2 files
65 $ hg up 0
66 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
67 $ mkcommit C1
68 created new head
69 $ hg debugobsolete `getid "desc(C0)" ` `getid "desc(C1)"`
70 1 new obsolescence markers
71 obsoleted 1 changesets
72 $ hg log -G --hidden
73 @ dc44c53142f0 (draft): C1
74 |
75 | x afc55ba2ce61 (draft): C0
76 | |
77 | o 93e5c1321ece (draft): B0
78 | |
79 | o 8aaa48160adc (draft): A0
80 |/
81 o 1e4be0697311 (public): root
82
83
84 Actual testing
85 --------------
86
87 $ hg push -r 'desc("C1")'
88 pushing to $TESTTMP/E1/server
89 searching for changes
90 abort: push creates new remote head dc44c53142f0!
91 (merge or see 'hg help push' for details about pushing new heads)
92 [255]
93
94 $ cd ../..
@@ -1,593 +1,598 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 nullid,
16 16 short,
17 17 )
18 18
19 19 from . import (
20 20 bookmarks,
21 21 branchmap,
22 22 error,
23 23 phases,
24 24 pycompat,
25 25 scmutil,
26 26 setdiscovery,
27 27 treediscovery,
28 28 util,
29 29 )
30 30
31 31
32 32 def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
33 33 """Return a tuple (common, anyincoming, heads) used to identify the common
34 34 subset of nodes between repo and remote.
35 35
36 36 "common" is a list of (at least) the heads of the common subset.
37 37 "anyincoming" is testable as a boolean indicating if any nodes are missing
38 38 locally. If remote does not support getbundle, this actually is a list of
39 39 roots of the nodes that would be incoming, to be supplied to
40 40 changegroupsubset. No code except for pull should be relying on this fact
41 41 any longer.
42 42 "heads" is either the supplied heads, or else the remote's heads.
43 43 "ancestorsof" if not None, restrict the discovery to a subset defined by
44 44 these nodes. Changeset outside of this set won't be considered (and
45 45 won't appears in "common")
46 46
47 47 If you pass heads and they are all known locally, the response lists just
48 48 these heads in "common" and in "heads".
49 49
50 50 Please use findcommonoutgoing to compute the set of outgoing nodes to give
51 51 extensions a good hook into outgoing.
52 52 """
53 53
54 54 if not remote.capable(b'getbundle'):
55 55 return treediscovery.findcommonincoming(repo, remote, heads, force)
56 56
57 57 if heads:
58 58 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
59 59 if all(knownnode(h) for h in heads):
60 60 return (heads, False, heads)
61 61
62 62 res = setdiscovery.findcommonheads(
63 63 repo.ui,
64 64 repo,
65 65 remote,
66 66 abortwhenunrelated=not force,
67 67 ancestorsof=ancestorsof,
68 68 )
69 69 common, anyinc, srvheads = res
70 70 return (list(common), anyinc, heads or list(srvheads))
71 71
72 72
73 73 class outgoing(object):
74 74 '''Represents the set of nodes present in a local repo but not in a
75 75 (possibly) remote one.
76 76
77 77 Members:
78 78
79 79 missing is a list of all nodes present in local but not in remote.
80 80 common is a list of all nodes shared between the two repos.
81 81 excluded is the list of missing changeset that shouldn't be sent remotely.
82 82 missingheads is the list of heads of missing.
83 83 commonheads is the list of heads of common.
84 84
85 85 The sets are computed on demand from the heads, unless provided upfront
86 86 by discovery.'''
87 87
88 88 def __init__(
89 89 self, repo, commonheads=None, missingheads=None, missingroots=None
90 90 ):
91 91 # at least one of them must not be set
92 92 assert None in (commonheads, missingroots)
93 93 cl = repo.changelog
94 94 if missingheads is None:
95 95 missingheads = cl.heads()
96 96 if missingroots:
97 97 discbases = []
98 98 for n in missingroots:
99 99 discbases.extend([p for p in cl.parents(n) if p != nullid])
100 100 # TODO remove call to nodesbetween.
101 101 # TODO populate attributes on outgoing instance instead of setting
102 102 # discbases.
103 103 csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
104 104 included = set(csets)
105 105 missingheads = heads
106 106 commonheads = [n for n in discbases if n not in included]
107 107 elif not commonheads:
108 108 commonheads = [nullid]
109 109 self.commonheads = commonheads
110 110 self.missingheads = missingheads
111 111 self._revlog = cl
112 112 self._common = None
113 113 self._missing = None
114 114 self.excluded = []
115 115
116 116 def _computecommonmissing(self):
117 117 sets = self._revlog.findcommonmissing(
118 118 self.commonheads, self.missingheads
119 119 )
120 120 self._common, self._missing = sets
121 121
122 122 @util.propertycache
123 123 def common(self):
124 124 if self._common is None:
125 125 self._computecommonmissing()
126 126 return self._common
127 127
128 128 @util.propertycache
129 129 def missing(self):
130 130 if self._missing is None:
131 131 self._computecommonmissing()
132 132 return self._missing
133 133
134 134
135 135 def findcommonoutgoing(
136 136 repo, other, onlyheads=None, force=False, commoninc=None, portable=False
137 137 ):
138 138 '''Return an outgoing instance to identify the nodes present in repo but
139 139 not in other.
140 140
141 141 If onlyheads is given, only nodes ancestral to nodes in onlyheads
142 142 (inclusive) are included. If you already know the local repo's heads,
143 143 passing them in onlyheads is faster than letting them be recomputed here.
144 144
145 145 If commoninc is given, it must be the result of a prior call to
146 146 findcommonincoming(repo, other, force) to avoid recomputing it here.
147 147
148 148 If portable is given, compute more conservative common and missingheads,
149 149 to make bundles created from the instance more portable.'''
150 150 # declare an empty outgoing object to be filled later
151 151 og = outgoing(repo, None, None)
152 152
153 153 # get common set if not provided
154 154 if commoninc is None:
155 155 commoninc = findcommonincoming(
156 156 repo, other, force=force, ancestorsof=onlyheads
157 157 )
158 158 og.commonheads, _any, _hds = commoninc
159 159
160 160 # compute outgoing
161 161 mayexclude = repo._phasecache.phaseroots[phases.secret] or repo.obsstore
162 162 if not mayexclude:
163 163 og.missingheads = onlyheads or repo.heads()
164 164 elif onlyheads is None:
165 165 # use visible heads as it should be cached
166 166 og.missingheads = repo.filtered(b"served").heads()
167 167 og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
168 168 else:
169 169 # compute common, missing and exclude secret stuff
170 170 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
171 171 og._common, allmissing = sets
172 172 og._missing = missing = []
173 173 og.excluded = excluded = []
174 174 for node in allmissing:
175 175 ctx = repo[node]
176 176 if ctx.phase() >= phases.secret or ctx.extinct():
177 177 excluded.append(node)
178 178 else:
179 179 missing.append(node)
180 180 if len(missing) == len(allmissing):
181 181 missingheads = onlyheads
182 182 else: # update missing heads
183 183 missingheads = phases.newheads(repo, onlyheads, excluded)
184 184 og.missingheads = missingheads
185 185 if portable:
186 186 # recompute common and missingheads as if -r<rev> had been given for
187 187 # each head of missing, and --base <rev> for each head of the proper
188 188 # ancestors of missing
189 189 og._computecommonmissing()
190 190 cl = repo.changelog
191 191 missingrevs = {cl.rev(n) for n in og._missing}
192 192 og._common = set(cl.ancestors(missingrevs)) - missingrevs
193 193 commonheads = set(og.commonheads)
194 194 og.missingheads = [h for h in og.missingheads if h not in commonheads]
195 195
196 196 return og
197 197
198 198
199 199 def _headssummary(pushop):
200 200 """compute a summary of branch and heads status before and after push
201 201
202 202 return {'branch': ([remoteheads], [newheads],
203 203 [unsyncedheads], [discardedheads])} mapping
204 204
205 205 - branch: the branch name,
206 206 - remoteheads: the list of remote heads known locally
207 207 None if the branch is new,
208 208 - newheads: the new remote heads (known locally) with outgoing pushed,
209 209 - unsyncedheads: the list of remote heads unknown locally,
210 210 - discardedheads: the list of heads made obsolete by the push.
211 211 """
212 212 repo = pushop.repo.unfiltered()
213 213 remote = pushop.remote
214 214 outgoing = pushop.outgoing
215 215 cl = repo.changelog
216 216 headssum = {}
217 217 missingctx = set()
218 218 # A. Create set of branches involved in the push.
219 219 branches = set()
220 220 for n in outgoing.missing:
221 221 ctx = repo[n]
222 222 missingctx.add(ctx)
223 223 branches.add(ctx.branch())
224 224
225 225 with remote.commandexecutor() as e:
226 226 remotemap = e.callcommand(b'branchmap', {}).result()
227 227
228 228 knownnode = cl.hasnode # do not use nodemap until it is filtered
229 229 # A. register remote heads of branches which are in outgoing set
230 230 for branch, heads in pycompat.iteritems(remotemap):
231 231 # don't add head info about branches which we don't have locally
232 232 if branch not in branches:
233 233 continue
234 234 known = []
235 235 unsynced = []
236 236 for h in heads:
237 237 if knownnode(h):
238 238 known.append(h)
239 239 else:
240 240 unsynced.append(h)
241 241 headssum[branch] = (known, list(known), unsynced)
242 242
243 243 # B. add new branch data
244 244 for branch in branches:
245 245 if branch not in headssum:
246 246 headssum[branch] = (None, [], [])
247 247
248 248 # C. Update newmap with outgoing changes.
249 249 # This will possibly add new heads and remove existing ones.
250 250 newmap = branchmap.remotebranchcache(
251 251 (branch, heads[1])
252 252 for branch, heads in pycompat.iteritems(headssum)
253 253 if heads[0] is not None
254 254 )
255 255 newmap.update(repo, (ctx.rev() for ctx in missingctx))
256 256 for branch, newheads in pycompat.iteritems(newmap):
257 257 headssum[branch][1][:] = newheads
258 258 for branch, items in pycompat.iteritems(headssum):
259 259 for l in items:
260 260 if l is not None:
261 261 l.sort()
262 262 headssum[branch] = items + ([],)
263 263
264 264 # If there are no obsstore, no post processing are needed.
265 265 if repo.obsstore:
266 266 torev = repo.changelog.rev
267 267 futureheads = {torev(h) for h in outgoing.missingheads}
268 268 futureheads |= {torev(h) for h in outgoing.commonheads}
269 269 allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
270 270 for branch, heads in sorted(pycompat.iteritems(headssum)):
271 271 remoteheads, newheads, unsyncedheads, placeholder = heads
272 272 result = _postprocessobsolete(pushop, allfuturecommon, newheads)
273 273 headssum[branch] = (
274 274 remoteheads,
275 275 sorted(result[0]),
276 276 unsyncedheads,
277 277 sorted(result[1]),
278 278 )
279 279 return headssum
280 280
281 281
282 282 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
283 283 """Compute branchmapsummary for repo without branchmap support"""
284 284
285 285 # 1-4b. old servers: Check for new topological heads.
286 286 # Construct {old,new}map with branch = None (topological branch).
287 287 # (code based on update)
288 288 knownnode = repo.changelog.hasnode # no nodemap until it is filtered
289 289 oldheads = sorted(h for h in remoteheads if knownnode(h))
290 290 # all nodes in outgoing.missing are children of either:
291 291 # - an element of oldheads
292 292 # - another element of outgoing.missing
293 293 # - nullrev
294 294 # This explains why the new head are very simple to compute.
295 295 r = repo.set(b'heads(%ln + %ln)', oldheads, outgoing.missing)
296 296 newheads = sorted(c.node() for c in r)
297 297 # set some unsynced head to issue the "unsynced changes" warning
298 298 if inc:
299 299 unsynced = [None]
300 300 else:
301 301 unsynced = []
302 302 return {None: (oldheads, newheads, unsynced, [])}
303 303
304 304
305 305 def _nowarnheads(pushop):
306 306 # Compute newly pushed bookmarks. We don't warn about bookmarked heads.
307 307 repo = pushop.repo.unfiltered()
308 308 remote = pushop.remote
309 309 localbookmarks = repo._bookmarks
310 310
311 311 with remote.commandexecutor() as e:
312 312 remotebookmarks = e.callcommand(
313 313 b'listkeys', {b'namespace': b'bookmarks',}
314 314 ).result()
315 315
316 316 bookmarkedheads = set()
317 317
318 318 # internal config: bookmarks.pushing
319 319 newbookmarks = [
320 320 localbookmarks.expandname(b)
321 321 for b in pushop.ui.configlist(b'bookmarks', b'pushing')
322 322 ]
323 323
324 324 for bm in localbookmarks:
325 325 rnode = remotebookmarks.get(bm)
326 326 if rnode and rnode in repo:
327 327 lctx, rctx = repo[localbookmarks[bm]], repo[rnode]
328 328 if bookmarks.validdest(repo, rctx, lctx):
329 329 bookmarkedheads.add(lctx.node())
330 330 else:
331 331 if bm in newbookmarks and bm not in remotebookmarks:
332 332 bookmarkedheads.add(localbookmarks[bm])
333 333
334 334 return bookmarkedheads
335 335
336 336
337 337 def checkheads(pushop):
338 338 """Check that a push won't add any outgoing head
339 339
340 340 raise Abort error and display ui message as needed.
341 341 """
342 342
343 343 repo = pushop.repo.unfiltered()
344 344 remote = pushop.remote
345 345 outgoing = pushop.outgoing
346 346 remoteheads = pushop.remoteheads
347 347 newbranch = pushop.newbranch
348 348 inc = bool(pushop.incoming)
349 349
350 350 # Check for each named branch if we're creating new remote heads.
351 351 # To be a remote head after push, node must be either:
352 352 # - unknown locally
353 353 # - a local outgoing head descended from update
354 354 # - a remote head that's known locally and not
355 355 # ancestral to an outgoing head
356 356 if remoteheads == [nullid]:
357 357 # remote is empty, nothing to check.
358 358 return
359 359
360 360 if remote.capable(b'branchmap'):
361 361 headssum = _headssummary(pushop)
362 362 else:
363 363 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
364 364 pushop.pushbranchmap = headssum
365 365 newbranches = [
366 366 branch
367 367 for branch, heads in pycompat.iteritems(headssum)
368 368 if heads[0] is None
369 369 ]
370 370 # 1. Check for new branches on the remote.
371 371 if newbranches and not newbranch: # new branch requires --new-branch
372 372 branchnames = b', '.join(sorted(newbranches))
373 373 # Calculate how many of the new branches are closed branches
374 374 closedbranches = set()
375 375 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
376 376 if isclosed:
377 377 closedbranches.add(tag)
378 378 closedbranches = closedbranches & set(newbranches)
379 379 if closedbranches:
380 380 errmsg = _(b"push creates new remote branches: %s (%d closed)!") % (
381 381 branchnames,
382 382 len(closedbranches),
383 383 )
384 384 else:
385 385 errmsg = _(b"push creates new remote branches: %s!") % branchnames
386 386 hint = _(b"use 'hg push --new-branch' to create new remote branches")
387 387 raise error.Abort(errmsg, hint=hint)
388 388
389 389 # 2. Find heads that we need not warn about
390 390 nowarnheads = _nowarnheads(pushop)
391 391
392 392 # 3. Check for new heads.
393 393 # If there are more heads after the push than before, a suitable
394 394 # error message, depending on unsynced status, is displayed.
395 395 errormsg = None
396 396 for branch, heads in sorted(pycompat.iteritems(headssum)):
397 397 remoteheads, newheads, unsyncedheads, discardedheads = heads
398 398 # add unsynced data
399 399 if remoteheads is None:
400 400 oldhs = set()
401 401 else:
402 402 oldhs = set(remoteheads)
403 403 oldhs.update(unsyncedheads)
404 404 dhs = None # delta heads, the new heads on branch
405 405 newhs = set(newheads)
406 406 newhs.update(unsyncedheads)
407 407 if unsyncedheads:
408 408 if None in unsyncedheads:
409 409 # old remote, no heads data
410 410 heads = None
411 411 else:
412 412 heads = scmutil.nodesummaries(repo, unsyncedheads)
413 413 if heads is None:
414 414 repo.ui.status(
415 415 _(b"remote has heads that are not known locally\n")
416 416 )
417 417 elif branch is None:
418 418 repo.ui.status(
419 419 _(b"remote has heads that are not known locally: %s\n")
420 420 % heads
421 421 )
422 422 else:
423 423 repo.ui.status(
424 424 _(
425 425 b"remote has heads on branch '%s' that are "
426 426 b"not known locally: %s\n"
427 427 )
428 428 % (branch, heads)
429 429 )
430 430 if remoteheads is None:
431 431 if len(newhs) > 1:
432 432 dhs = list(newhs)
433 433 if errormsg is None:
434 434 errormsg = (
435 435 _(b"push creates new branch '%s' with multiple heads")
436 436 % branch
437 437 )
438 438 hint = _(
439 439 b"merge or"
440 440 b" see 'hg help push' for details about"
441 441 b" pushing new heads"
442 442 )
443 443 elif len(newhs) > len(oldhs):
444 444 # remove bookmarked or existing remote heads from the new heads list
445 445 dhs = sorted(newhs - nowarnheads - oldhs)
446 446 if dhs:
447 447 if errormsg is None:
448 448 if branch not in (b'default', None):
449 449 errormsg = _(
450 450 b"push creates new remote head %s on branch '%s'!"
451 451 ) % (short(dhs[0]), branch,)
452 452 elif repo[dhs[0]].bookmarks():
453 453 errormsg = _(
454 454 b"push creates new remote head %s "
455 455 b"with bookmark '%s'!"
456 456 ) % (short(dhs[0]), repo[dhs[0]].bookmarks()[0])
457 457 else:
458 458 errormsg = _(b"push creates new remote head %s!") % short(
459 459 dhs[0]
460 460 )
461 461 if unsyncedheads:
462 462 hint = _(
463 463 b"pull and merge or"
464 464 b" see 'hg help push' for details about"
465 465 b" pushing new heads"
466 466 )
467 467 else:
468 468 hint = _(
469 469 b"merge or"
470 470 b" see 'hg help push' for details about"
471 471 b" pushing new heads"
472 472 )
473 473 if branch is None:
474 474 repo.ui.note(_(b"new remote heads:\n"))
475 475 else:
476 476 repo.ui.note(_(b"new remote heads on branch '%s':\n") % branch)
477 477 for h in dhs:
478 478 repo.ui.note(b" %s\n" % short(h))
479 479 if errormsg:
480 480 raise error.Abort(errormsg, hint=hint)
481 481
482 482
483 483 def _postprocessobsolete(pushop, futurecommon, candidate_newhs):
484 484 """post process the list of new heads with obsolescence information
485 485
486 486 Exists as a sub-function to contain the complexity and allow extensions to
487 487 experiment with smarter logic.
488 488
489 489 Returns (newheads, discarded_heads) tuple
490 490 """
491 491 # known issue
492 492 #
493 493 # * We "silently" skip processing on all changeset unknown locally
494 494 #
495 495 # * if <nh> is public on the remote, it won't be affected by obsolete
496 496 # marker and a new is created
497 497
498 498 # define various utilities and containers
499 499 repo = pushop.repo
500 500 unfi = repo.unfiltered()
501 tonode = unfi.changelog.node
502 501 torev = unfi.changelog.index.get_rev
503 502 public = phases.public
504 503 getphase = unfi._phasecache.phase
505 504 ispublic = lambda r: getphase(unfi, r) == public
506 505 ispushed = lambda n: torev(n) in futurecommon
507 506 hasoutmarker = functools.partial(pushingmarkerfor, unfi.obsstore, ispushed)
508 507 successorsmarkers = unfi.obsstore.successors
509 508 newhs = set() # final set of new heads
510 509 discarded = set() # new head of fully replaced branch
511 510
512 511 localcandidate = set() # candidate heads known locally
513 512 unknownheads = set() # candidate heads unknown locally
514 513 for h in candidate_newhs:
515 514 if h in unfi:
516 515 localcandidate.add(h)
517 516 else:
518 517 if successorsmarkers.get(h) is not None:
519 518 msg = (
520 519 b'checkheads: remote head unknown locally has'
521 520 b' local marker: %s\n'
522 521 )
523 522 repo.ui.debug(msg % hex(h))
524 523 unknownheads.add(h)
525 524
526 525 # fast path the simple case
527 526 if len(localcandidate) == 1:
528 527 return unknownheads | set(candidate_newhs), set()
529 528
530 529 # actually process branch replacement
531 530 while localcandidate:
532 531 nh = localcandidate.pop()
532 current_branch = unfi[nh].branch()
533 533 # run this check early to skip the evaluation of the whole branch
534 534 if torev(nh) in futurecommon or ispublic(torev(nh)):
535 535 newhs.add(nh)
536 536 continue
537 537
538 538 # Get all revs/nodes on the branch exclusive to this head
539 539 # (already filtered heads are "ignored"))
540 540 branchrevs = unfi.revs(
541 541 b'only(%n, (%ln+%ln))', nh, localcandidate, newhs
542 542 )
543 branchnodes = [tonode(r) for r in branchrevs]
543
544 branchnodes = []
545 for r in branchrevs:
546 c = unfi[r]
547 if c.branch() == current_branch:
548 branchnodes.append(c.node())
544 549
545 550 # The branch won't be hidden on the remote if
546 551 # * any part of it is public,
547 552 # * any part of it is considered part of the result by previous logic,
548 553 # * if we have no markers to push to obsolete it.
549 554 if (
550 555 any(ispublic(r) for r in branchrevs)
551 556 or any(torev(n) in futurecommon for n in branchnodes)
552 557 or any(not hasoutmarker(n) for n in branchnodes)
553 558 ):
554 559 newhs.add(nh)
555 560 else:
556 561 # note: there is a corner case if there is a merge in the branch.
557 562 # we might end up with -more- heads. However, these heads are not
558 563 # "added" by the push, but more by the "removal" on the remote so I
559 564 # think is a okay to ignore them,
560 565 discarded.add(nh)
561 566 newhs |= unknownheads
562 567 return newhs, discarded
563 568
564 569
565 570 def pushingmarkerfor(obsstore, ispushed, node):
566 571 """true if some markers are to be pushed for node
567 572
568 573 We cannot just look in to the pushed obsmarkers from the pushop because
569 574 discovery might have filtered relevant markers. In addition listing all
570 575 markers relevant to all changesets in the pushed set would be too expensive
571 576 (O(len(repo)))
572 577
573 578 (note: There are cache opportunity in this function. but it would requires
574 579 a two dimensional stack.)
575 580 """
576 581 successorsmarkers = obsstore.successors
577 582 stack = [node]
578 583 seen = set(stack)
579 584 while stack:
580 585 current = stack.pop()
581 586 if ispushed(current):
582 587 return True
583 588 markers = successorsmarkers.get(current, ())
584 589 # markers fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
585 590 for m in markers:
586 591 nexts = m[1] # successors
587 592 if not nexts: # this is a prune marker
588 593 nexts = m[5] or () # parents
589 594 for n in nexts:
590 595 if n not in seen:
591 596 seen.add(n)
592 597 stack.append(n)
593 598 return False
@@ -1,3719 +1,3719 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # run-tests.py - Run a set of tests on Mercurial
4 4 #
5 5 # Copyright 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 # Modifying this script is tricky because it has many modes:
11 11 # - serial (default) vs parallel (-jN, N > 1)
12 12 # - no coverage (default) vs coverage (-c, -C, -s)
13 13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 14 # - tests are a mix of shell scripts and Python scripts
15 15 #
16 16 # If you change this script, it is recommended that you ensure you
17 17 # haven't broken it by running it in various modes with a representative
18 18 # sample of test scripts. For example:
19 19 #
20 20 # 1) serial, no coverage, temp install:
21 21 # ./run-tests.py test-s*
22 22 # 2) serial, no coverage, local hg:
23 23 # ./run-tests.py --local test-s*
24 24 # 3) serial, coverage, temp install:
25 25 # ./run-tests.py -c test-s*
26 26 # 4) serial, coverage, local hg:
27 27 # ./run-tests.py -c --local test-s* # unsupported
28 28 # 5) parallel, no coverage, temp install:
29 29 # ./run-tests.py -j2 test-s*
30 30 # 6) parallel, no coverage, local hg:
31 31 # ./run-tests.py -j2 --local test-s*
32 32 # 7) parallel, coverage, temp install:
33 33 # ./run-tests.py -j2 -c test-s* # currently broken
34 34 # 8) parallel, coverage, local install:
35 35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 36 # 9) parallel, custom tmp dir:
37 37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 38 # 10) parallel, pure, tests that call run-tests:
39 39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 40 #
41 41 # (You could use any subset of the tests: test-s* happens to match
42 42 # enough that it's worth doing parallel runs, few enough that it
43 43 # completes fairly quickly, includes both shell and Python scripts, and
44 44 # includes some scripts that run daemon processes.)
45 45
46 46 from __future__ import absolute_import, print_function
47 47
48 48 import argparse
49 49 import collections
50 50 import difflib
51 51 import distutils.version as version
52 52 import errno
53 53 import json
54 54 import multiprocessing
55 55 import os
56 56 import random
57 57 import re
58 58 import shutil
59 59 import signal
60 60 import socket
61 61 import subprocess
62 62 import sys
63 63 import sysconfig
64 64 import tempfile
65 65 import threading
66 66 import time
67 67 import unittest
68 68 import uuid
69 69 import xml.dom.minidom as minidom
70 70
71 71 try:
72 72 import Queue as queue
73 73 except ImportError:
74 74 import queue
75 75
76 76 try:
77 77 import shlex
78 78
79 79 shellquote = shlex.quote
80 80 except (ImportError, AttributeError):
81 81 import pipes
82 82
83 83 shellquote = pipes.quote
84 84
85 85 processlock = threading.Lock()
86 86
87 87 pygmentspresent = False
88 88 # ANSI color is unsupported prior to Windows 10
89 89 if os.name != 'nt':
90 90 try: # is pygments installed
91 91 import pygments
92 92 import pygments.lexers as lexers
93 93 import pygments.lexer as lexer
94 94 import pygments.formatters as formatters
95 95 import pygments.token as token
96 96 import pygments.style as style
97 97
98 98 pygmentspresent = True
99 99 difflexer = lexers.DiffLexer()
100 100 terminal256formatter = formatters.Terminal256Formatter()
101 101 except ImportError:
102 102 pass
103 103
104 104 if pygmentspresent:
105 105
106 106 class TestRunnerStyle(style.Style):
107 107 default_style = ""
108 108 skipped = token.string_to_tokentype("Token.Generic.Skipped")
109 109 failed = token.string_to_tokentype("Token.Generic.Failed")
110 110 skippedname = token.string_to_tokentype("Token.Generic.SName")
111 111 failedname = token.string_to_tokentype("Token.Generic.FName")
112 112 styles = {
113 113 skipped: '#e5e5e5',
114 114 skippedname: '#00ffff',
115 115 failed: '#7f0000',
116 116 failedname: '#ff0000',
117 117 }
118 118
119 119 class TestRunnerLexer(lexer.RegexLexer):
120 120 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
121 121 tokens = {
122 122 'root': [
123 123 (r'^Skipped', token.Generic.Skipped, 'skipped'),
124 124 (r'^Failed ', token.Generic.Failed, 'failed'),
125 125 (r'^ERROR: ', token.Generic.Failed, 'failed'),
126 126 ],
127 127 'skipped': [
128 128 (testpattern, token.Generic.SName),
129 129 (r':.*', token.Generic.Skipped),
130 130 ],
131 131 'failed': [
132 132 (testpattern, token.Generic.FName),
133 133 (r'(:| ).*', token.Generic.Failed),
134 134 ],
135 135 }
136 136
137 137 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
138 138 runnerlexer = TestRunnerLexer()
139 139
140 140 origenviron = os.environ.copy()
141 141
142 142 if sys.version_info > (3, 5, 0):
143 143 PYTHON3 = True
144 144 xrange = range # we use xrange in one place, and we'd rather not use range
145 145
146 146 def _sys2bytes(p):
147 147 if p is None:
148 148 return p
149 149 return p.encode('utf-8')
150 150
151 151 def _bytes2sys(p):
152 152 if p is None:
153 153 return p
154 154 return p.decode('utf-8')
155 155
156 156 osenvironb = getattr(os, 'environb', None)
157 157 if osenvironb is None:
158 158 # Windows lacks os.environb, for instance. A proxy over the real thing
159 159 # instead of a copy allows the environment to be updated via bytes on
160 160 # all platforms.
161 161 class environbytes(object):
162 162 def __init__(self, strenv):
163 163 self.__len__ = strenv.__len__
164 164 self.clear = strenv.clear
165 165 self._strenv = strenv
166 166
167 167 def __getitem__(self, k):
168 168 v = self._strenv.__getitem__(_bytes2sys(k))
169 169 return _sys2bytes(v)
170 170
171 171 def __setitem__(self, k, v):
172 172 self._strenv.__setitem__(_bytes2sys(k), _bytes2sys(v))
173 173
174 174 def __delitem__(self, k):
175 175 self._strenv.__delitem__(_bytes2sys(k))
176 176
177 177 def __contains__(self, k):
178 178 return self._strenv.__contains__(_bytes2sys(k))
179 179
180 180 def __iter__(self):
181 181 return iter([_sys2bytes(k) for k in iter(self._strenv)])
182 182
183 183 def get(self, k, default=None):
184 184 v = self._strenv.get(_bytes2sys(k), _bytes2sys(default))
185 185 return _sys2bytes(v)
186 186
187 187 def pop(self, k, default=None):
188 188 v = self._strenv.pop(_bytes2sys(k), _bytes2sys(default))
189 189 return _sys2bytes(v)
190 190
191 191 osenvironb = environbytes(os.environ)
192 192
193 193 getcwdb = getattr(os, 'getcwdb')
194 194 if not getcwdb or os.name == 'nt':
195 195 getcwdb = lambda: _sys2bytes(os.getcwd())
196 196
197 197 elif sys.version_info >= (3, 0, 0):
198 198 print(
199 199 '%s is only supported on Python 3.5+ and 2.7, not %s'
200 200 % (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))
201 201 )
202 202 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
203 203 else:
204 204 PYTHON3 = False
205 205
206 206 # In python 2.x, path operations are generally done using
207 207 # bytestrings by default, so we don't have to do any extra
208 208 # fiddling there. We define the wrapper functions anyway just to
209 209 # help keep code consistent between platforms.
210 210 def _sys2bytes(p):
211 211 return p
212 212
213 213 _bytes2sys = _sys2bytes
214 214 osenvironb = os.environ
215 215 getcwdb = os.getcwd
216 216
217 217 # For Windows support
218 218 wifexited = getattr(os, "WIFEXITED", lambda x: False)
219 219
220 220 # Whether to use IPv6
221 221 def checksocketfamily(name, port=20058):
222 222 """return true if we can listen on localhost using family=name
223 223
224 224 name should be either 'AF_INET', or 'AF_INET6'.
225 225 port being used is okay - EADDRINUSE is considered as successful.
226 226 """
227 227 family = getattr(socket, name, None)
228 228 if family is None:
229 229 return False
230 230 try:
231 231 s = socket.socket(family, socket.SOCK_STREAM)
232 232 s.bind(('localhost', port))
233 233 s.close()
234 234 return True
235 235 except socket.error as exc:
236 236 if exc.errno == errno.EADDRINUSE:
237 237 return True
238 238 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
239 239 return False
240 240 else:
241 241 raise
242 242 else:
243 243 return False
244 244
245 245
246 246 # useipv6 will be set by parseargs
247 247 useipv6 = None
248 248
249 249
250 250 def checkportisavailable(port):
251 251 """return true if a port seems free to bind on localhost"""
252 252 if useipv6:
253 253 family = socket.AF_INET6
254 254 else:
255 255 family = socket.AF_INET
256 256 try:
257 257 s = socket.socket(family, socket.SOCK_STREAM)
258 258 s.bind(('localhost', port))
259 259 s.close()
260 260 return True
261 261 except socket.error as exc:
262 262 if exc.errno not in (
263 263 errno.EADDRINUSE,
264 264 errno.EADDRNOTAVAIL,
265 265 errno.EPROTONOSUPPORT,
266 266 ):
267 267 raise
268 268 return False
269 269
270 270
271 271 closefds = os.name == 'posix'
272 272
273 273
274 274 def Popen4(cmd, wd, timeout, env=None):
275 275 processlock.acquire()
276 276 p = subprocess.Popen(
277 277 _bytes2sys(cmd),
278 278 shell=True,
279 279 bufsize=-1,
280 280 cwd=_bytes2sys(wd),
281 281 env=env,
282 282 close_fds=closefds,
283 283 stdin=subprocess.PIPE,
284 284 stdout=subprocess.PIPE,
285 285 stderr=subprocess.STDOUT,
286 286 )
287 287 processlock.release()
288 288
289 289 p.fromchild = p.stdout
290 290 p.tochild = p.stdin
291 291 p.childerr = p.stderr
292 292
293 293 p.timeout = False
294 294 if timeout:
295 295
296 296 def t():
297 297 start = time.time()
298 298 while time.time() - start < timeout and p.returncode is None:
299 299 time.sleep(0.1)
300 300 p.timeout = True
301 301 if p.returncode is None:
302 302 terminate(p)
303 303
304 304 threading.Thread(target=t).start()
305 305
306 306 return p
307 307
308 308
309 309 if sys.executable:
310 310 sysexecutable = sys.executable
311 311 elif os.environ.get('PYTHONEXECUTABLE'):
312 312 sysexecutable = os.environ['PYTHONEXECUTABLE']
313 313 elif os.environ.get('PYTHON'):
314 314 sysexecutable = os.environ['PYTHON']
315 315 else:
316 316 raise AssertionError('Could not find Python interpreter')
317 317
318 318 PYTHON = _sys2bytes(sysexecutable.replace('\\', '/'))
319 319 IMPL_PATH = b'PYTHONPATH'
320 320 if 'java' in sys.platform:
321 321 IMPL_PATH = b'JYTHONPATH'
322 322
323 323 defaults = {
324 324 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
325 325 'timeout': ('HGTEST_TIMEOUT', 180),
326 326 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 1500),
327 327 'port': ('HGTEST_PORT', 20059),
328 328 'shell': ('HGTEST_SHELL', 'sh'),
329 329 }
330 330
331 331
332 332 def canonpath(path):
333 333 return os.path.realpath(os.path.expanduser(path))
334 334
335 335
336 336 def parselistfiles(files, listtype, warn=True):
337 337 entries = dict()
338 338 for filename in files:
339 339 try:
340 340 path = os.path.expanduser(os.path.expandvars(filename))
341 341 f = open(path, "rb")
342 342 except IOError as err:
343 343 if err.errno != errno.ENOENT:
344 344 raise
345 345 if warn:
346 346 print("warning: no such %s file: %s" % (listtype, filename))
347 347 continue
348 348
349 349 for line in f.readlines():
350 350 line = line.split(b'#', 1)[0].strip()
351 351 if line:
352 352 entries[line] = filename
353 353
354 354 f.close()
355 355 return entries
356 356
357 357
358 358 def parsettestcases(path):
359 359 """read a .t test file, return a set of test case names
360 360
361 361 If path does not exist, return an empty set.
362 362 """
363 363 cases = []
364 364 try:
365 365 with open(path, 'rb') as f:
366 366 for l in f:
367 367 if l.startswith(b'#testcases '):
368 368 cases.append(sorted(l[11:].split()))
369 369 except IOError as ex:
370 370 if ex.errno != errno.ENOENT:
371 371 raise
372 372 return cases
373 373
374 374
375 375 def getparser():
376 376 """Obtain the OptionParser used by the CLI."""
377 377 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
378 378
379 379 selection = parser.add_argument_group('Test Selection')
380 380 selection.add_argument(
381 381 '--allow-slow-tests',
382 382 action='store_true',
383 383 help='allow extremely slow tests',
384 384 )
385 385 selection.add_argument(
386 386 "--blacklist",
387 387 action="append",
388 388 help="skip tests listed in the specified blacklist file",
389 389 )
390 390 selection.add_argument(
391 391 "--changed",
392 392 help="run tests that are changed in parent rev or working directory",
393 393 )
394 394 selection.add_argument(
395 395 "-k", "--keywords", help="run tests matching keywords"
396 396 )
397 397 selection.add_argument(
398 398 "-r", "--retest", action="store_true", help="retest failed tests"
399 399 )
400 400 selection.add_argument(
401 401 "--test-list",
402 402 action="append",
403 403 help="read tests to run from the specified file",
404 404 )
405 405 selection.add_argument(
406 406 "--whitelist",
407 407 action="append",
408 408 help="always run tests listed in the specified whitelist file",
409 409 )
410 410 selection.add_argument(
411 411 'tests', metavar='TESTS', nargs='*', help='Tests to run'
412 412 )
413 413
414 414 harness = parser.add_argument_group('Test Harness Behavior')
415 415 harness.add_argument(
416 416 '--bisect-repo',
417 417 metavar='bisect_repo',
418 418 help=(
419 419 "Path of a repo to bisect. Use together with " "--known-good-rev"
420 420 ),
421 421 )
422 422 harness.add_argument(
423 423 "-d",
424 424 "--debug",
425 425 action="store_true",
426 426 help="debug mode: write output of test scripts to console"
427 427 " rather than capturing and diffing it (disables timeout)",
428 428 )
429 429 harness.add_argument(
430 430 "-f",
431 431 "--first",
432 432 action="store_true",
433 433 help="exit on the first test failure",
434 434 )
435 435 harness.add_argument(
436 436 "-i",
437 437 "--interactive",
438 438 action="store_true",
439 439 help="prompt to accept changed output",
440 440 )
441 441 harness.add_argument(
442 442 "-j",
443 443 "--jobs",
444 444 type=int,
445 445 help="number of jobs to run in parallel"
446 446 " (default: $%s or %d)" % defaults['jobs'],
447 447 )
448 448 harness.add_argument(
449 449 "--keep-tmpdir",
450 450 action="store_true",
451 451 help="keep temporary directory after running tests",
452 452 )
453 453 harness.add_argument(
454 454 '--known-good-rev',
455 455 metavar="known_good_rev",
456 456 help=(
457 457 "Automatically bisect any failures using this "
458 458 "revision as a known-good revision."
459 459 ),
460 460 )
461 461 harness.add_argument(
462 462 "--list-tests",
463 463 action="store_true",
464 464 help="list tests instead of running them",
465 465 )
466 466 harness.add_argument(
467 467 "--loop", action="store_true", help="loop tests repeatedly"
468 468 )
469 469 harness.add_argument(
470 470 '--random', action="store_true", help='run tests in random order'
471 471 )
472 472 harness.add_argument(
473 473 '--order-by-runtime',
474 474 action="store_true",
475 475 help='run slowest tests first, according to .testtimes',
476 476 )
477 477 harness.add_argument(
478 478 "-p",
479 479 "--port",
480 480 type=int,
481 481 help="port on which servers should listen"
482 482 " (default: $%s or %d)" % defaults['port'],
483 483 )
484 484 harness.add_argument(
485 485 '--profile-runner',
486 486 action='store_true',
487 487 help='run statprof on run-tests',
488 488 )
489 489 harness.add_argument(
490 490 "-R", "--restart", action="store_true", help="restart at last error"
491 491 )
492 492 harness.add_argument(
493 493 "--runs-per-test",
494 494 type=int,
495 495 dest="runs_per_test",
496 496 help="run each test N times (default=1)",
497 497 default=1,
498 498 )
499 499 harness.add_argument(
500 500 "--shell", help="shell to use (default: $%s or %s)" % defaults['shell']
501 501 )
502 502 harness.add_argument(
503 503 '--showchannels', action='store_true', help='show scheduling channels'
504 504 )
505 505 harness.add_argument(
506 506 "--slowtimeout",
507 507 type=int,
508 508 help="kill errant slow tests after SLOWTIMEOUT seconds"
509 509 " (default: $%s or %d)" % defaults['slowtimeout'],
510 510 )
511 511 harness.add_argument(
512 512 "-t",
513 513 "--timeout",
514 514 type=int,
515 515 help="kill errant tests after TIMEOUT seconds"
516 516 " (default: $%s or %d)" % defaults['timeout'],
517 517 )
518 518 harness.add_argument(
519 519 "--tmpdir",
520 520 help="run tests in the given temporary directory"
521 521 " (implies --keep-tmpdir)",
522 522 )
523 523 harness.add_argument(
524 524 "-v", "--verbose", action="store_true", help="output verbose messages"
525 525 )
526 526
527 527 hgconf = parser.add_argument_group('Mercurial Configuration')
528 528 hgconf.add_argument(
529 529 "--chg",
530 530 action="store_true",
531 531 help="install and use chg wrapper in place of hg",
532 532 )
533 533 hgconf.add_argument("--compiler", help="compiler to build with")
534 534 hgconf.add_argument(
535 535 '--extra-config-opt',
536 536 action="append",
537 537 default=[],
538 538 help='set the given config opt in the test hgrc',
539 539 )
540 540 hgconf.add_argument(
541 541 "-l",
542 542 "--local",
543 543 action="store_true",
544 544 help="shortcut for --with-hg=<testdir>/../hg, "
545 545 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set",
546 546 )
547 547 hgconf.add_argument(
548 548 "--ipv6",
549 549 action="store_true",
550 550 help="prefer IPv6 to IPv4 for network related tests",
551 551 )
552 552 hgconf.add_argument(
553 553 "--pure",
554 554 action="store_true",
555 555 help="use pure Python code instead of C extensions",
556 556 )
557 557 hgconf.add_argument(
558 558 "--with-chg",
559 559 metavar="CHG",
560 560 help="use specified chg wrapper in place of hg",
561 561 )
562 562 hgconf.add_argument(
563 563 "--with-hg",
564 564 metavar="HG",
565 565 help="test using specified hg script rather than a "
566 566 "temporary installation",
567 567 )
568 568
569 569 reporting = parser.add_argument_group('Results Reporting')
570 570 reporting.add_argument(
571 571 "-C",
572 572 "--annotate",
573 573 action="store_true",
574 574 help="output files annotated with coverage",
575 575 )
576 576 reporting.add_argument(
577 577 "--color",
578 578 choices=["always", "auto", "never"],
579 579 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
580 580 help="colorisation: always|auto|never (default: auto)",
581 581 )
582 582 reporting.add_argument(
583 583 "-c",
584 584 "--cover",
585 585 action="store_true",
586 586 help="print a test coverage report",
587 587 )
588 588 reporting.add_argument(
589 589 '--exceptions',
590 590 action='store_true',
591 591 help='log all exceptions and generate an exception report',
592 592 )
593 593 reporting.add_argument(
594 594 "-H",
595 595 "--htmlcov",
596 596 action="store_true",
597 597 help="create an HTML report of the coverage of the files",
598 598 )
599 599 reporting.add_argument(
600 600 "--json",
601 601 action="store_true",
602 602 help="store test result data in 'report.json' file",
603 603 )
604 604 reporting.add_argument(
605 605 "--outputdir",
606 606 help="directory to write error logs to (default=test directory)",
607 607 )
608 608 reporting.add_argument(
609 609 "-n", "--nodiff", action="store_true", help="skip showing test changes"
610 610 )
611 611 reporting.add_argument(
612 612 "-S",
613 613 "--noskips",
614 614 action="store_true",
615 615 help="don't report skip tests verbosely",
616 616 )
617 617 reporting.add_argument(
618 618 "--time", action="store_true", help="time how long each test takes"
619 619 )
620 620 reporting.add_argument("--view", help="external diff viewer")
621 621 reporting.add_argument(
622 622 "--xunit", help="record xunit results at specified path"
623 623 )
624 624
625 625 for option, (envvar, default) in defaults.items():
626 626 defaults[option] = type(default)(os.environ.get(envvar, default))
627 627 parser.set_defaults(**defaults)
628 628
629 629 return parser
630 630
631 631
632 632 def parseargs(args, parser):
633 633 """Parse arguments with our OptionParser and validate results."""
634 634 options = parser.parse_args(args)
635 635
636 636 # jython is always pure
637 637 if 'java' in sys.platform or '__pypy__' in sys.modules:
638 638 options.pure = True
639 639
640 640 if options.local:
641 641 if options.with_hg or options.with_chg:
642 642 parser.error('--local cannot be used with --with-hg or --with-chg')
643 643 testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
644 644 reporootdir = os.path.dirname(testdir)
645 645 pathandattrs = [(b'hg', 'with_hg')]
646 646 if options.chg:
647 647 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
648 648 for relpath, attr in pathandattrs:
649 649 binpath = os.path.join(reporootdir, relpath)
650 650 if os.name != 'nt' and not os.access(binpath, os.X_OK):
651 651 parser.error(
652 652 '--local specified, but %r not found or '
653 653 'not executable' % binpath
654 654 )
655 655 setattr(options, attr, _bytes2sys(binpath))
656 656
657 657 if options.with_hg:
658 658 options.with_hg = canonpath(_sys2bytes(options.with_hg))
659 659 if not (
660 660 os.path.isfile(options.with_hg)
661 661 and os.access(options.with_hg, os.X_OK)
662 662 ):
663 663 parser.error('--with-hg must specify an executable hg script')
664 664 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
665 665 sys.stderr.write('warning: --with-hg should specify an hg script\n')
666 666 sys.stderr.flush()
667 667
668 668 if (options.chg or options.with_chg) and os.name == 'nt':
669 669 parser.error('chg does not work on %s' % os.name)
670 670 if options.with_chg:
671 671 options.chg = False # no installation to temporary location
672 672 options.with_chg = canonpath(_sys2bytes(options.with_chg))
673 673 if not (
674 674 os.path.isfile(options.with_chg)
675 675 and os.access(options.with_chg, os.X_OK)
676 676 ):
677 677 parser.error('--with-chg must specify a chg executable')
678 678 if options.chg and options.with_hg:
679 679 # chg shares installation location with hg
680 680 parser.error(
681 681 '--chg does not work when --with-hg is specified '
682 682 '(use --with-chg instead)'
683 683 )
684 684
685 685 if options.color == 'always' and not pygmentspresent:
686 686 sys.stderr.write(
687 687 'warning: --color=always ignored because '
688 688 'pygments is not installed\n'
689 689 )
690 690
691 691 if options.bisect_repo and not options.known_good_rev:
692 692 parser.error("--bisect-repo cannot be used without --known-good-rev")
693 693
694 694 global useipv6
695 695 if options.ipv6:
696 696 useipv6 = checksocketfamily('AF_INET6')
697 697 else:
698 698 # only use IPv6 if IPv4 is unavailable and IPv6 is available
699 699 useipv6 = (not checksocketfamily('AF_INET')) and checksocketfamily(
700 700 'AF_INET6'
701 701 )
702 702
703 703 options.anycoverage = options.cover or options.annotate or options.htmlcov
704 704 if options.anycoverage:
705 705 try:
706 706 import coverage
707 707
708 708 covver = version.StrictVersion(coverage.__version__).version
709 709 if covver < (3, 3):
710 710 parser.error('coverage options require coverage 3.3 or later')
711 711 except ImportError:
712 712 parser.error('coverage options now require the coverage package')
713 713
714 714 if options.anycoverage and options.local:
715 715 # this needs some path mangling somewhere, I guess
716 716 parser.error(
717 717 "sorry, coverage options do not work when --local " "is specified"
718 718 )
719 719
720 720 if options.anycoverage and options.with_hg:
721 721 parser.error(
722 722 "sorry, coverage options do not work when --with-hg " "is specified"
723 723 )
724 724
725 725 global verbose
726 726 if options.verbose:
727 727 verbose = ''
728 728
729 729 if options.tmpdir:
730 730 options.tmpdir = canonpath(options.tmpdir)
731 731
732 732 if options.jobs < 1:
733 733 parser.error('--jobs must be positive')
734 734 if options.interactive and options.debug:
735 735 parser.error("-i/--interactive and -d/--debug are incompatible")
736 736 if options.debug:
737 737 if options.timeout != defaults['timeout']:
738 738 sys.stderr.write('warning: --timeout option ignored with --debug\n')
739 739 if options.slowtimeout != defaults['slowtimeout']:
740 740 sys.stderr.write(
741 741 'warning: --slowtimeout option ignored with --debug\n'
742 742 )
743 743 options.timeout = 0
744 744 options.slowtimeout = 0
745 745
746 746 if options.blacklist:
747 747 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
748 748 if options.whitelist:
749 749 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
750 750 else:
751 751 options.whitelisted = {}
752 752
753 753 if options.showchannels:
754 754 options.nodiff = True
755 755
756 756 return options
757 757
758 758
759 759 def rename(src, dst):
760 760 """Like os.rename(), trade atomicity and opened files friendliness
761 761 for existing destination support.
762 762 """
763 763 shutil.copy(src, dst)
764 764 os.remove(src)
765 765
766 766
767 767 def makecleanable(path):
768 768 """Try to fix directory permission recursively so that the entire tree
769 769 can be deleted"""
770 770 for dirpath, dirnames, _filenames in os.walk(path, topdown=True):
771 771 for d in dirnames:
772 772 p = os.path.join(dirpath, d)
773 773 try:
774 774 os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx
775 775 except OSError:
776 776 pass
777 777
778 778
779 779 _unified_diff = difflib.unified_diff
780 780 if PYTHON3:
781 781 import functools
782 782
783 783 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
784 784
785 785
786 786 def getdiff(expected, output, ref, err):
787 787 servefail = False
788 788 lines = []
789 789 for line in _unified_diff(expected, output, ref, err):
790 790 if line.startswith(b'+++') or line.startswith(b'---'):
791 791 line = line.replace(b'\\', b'/')
792 792 if line.endswith(b' \n'):
793 793 line = line[:-2] + b'\n'
794 794 lines.append(line)
795 795 if not servefail and line.startswith(
796 796 b'+ abort: child process failed to start'
797 797 ):
798 798 servefail = True
799 799
800 800 return servefail, lines
801 801
802 802
803 803 verbose = False
804 804
805 805
806 806 def vlog(*msg):
807 807 """Log only when in verbose mode."""
808 808 if verbose is False:
809 809 return
810 810
811 811 return log(*msg)
812 812
813 813
814 814 # Bytes that break XML even in a CDATA block: control characters 0-31
815 815 # sans \t, \n and \r
816 816 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
817 817
818 818 # Match feature conditionalized output lines in the form, capturing the feature
819 819 # list in group 2, and the preceeding line output in group 1:
820 820 #
821 821 # output..output (feature !)\n
822 822 optline = re.compile(br'(.*) \((.+?) !\)\n$')
823 823
824 824
825 825 def cdatasafe(data):
826 826 """Make a string safe to include in a CDATA block.
827 827
828 828 Certain control characters are illegal in a CDATA block, and
829 829 there's no way to include a ]]> in a CDATA either. This function
830 830 replaces illegal bytes with ? and adds a space between the ]] so
831 831 that it won't break the CDATA block.
832 832 """
833 833 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
834 834
835 835
836 836 def log(*msg):
837 837 """Log something to stdout.
838 838
839 839 Arguments are strings to print.
840 840 """
841 841 with iolock:
842 842 if verbose:
843 843 print(verbose, end=' ')
844 844 for m in msg:
845 845 print(m, end=' ')
846 846 print()
847 847 sys.stdout.flush()
848 848
849 849
850 850 def highlightdiff(line, color):
851 851 if not color:
852 852 return line
853 853 assert pygmentspresent
854 854 return pygments.highlight(
855 855 line.decode('latin1'), difflexer, terminal256formatter
856 856 ).encode('latin1')
857 857
858 858
859 859 def highlightmsg(msg, color):
860 860 if not color:
861 861 return msg
862 862 assert pygmentspresent
863 863 return pygments.highlight(msg, runnerlexer, runnerformatter)
864 864
865 865
866 866 def terminate(proc):
867 867 """Terminate subprocess"""
868 868 vlog('# Terminating process %d' % proc.pid)
869 869 try:
870 870 proc.terminate()
871 871 except OSError:
872 872 pass
873 873
874 874
875 875 def killdaemons(pidfile):
876 876 import killdaemons as killmod
877 877
878 878 return killmod.killdaemons(pidfile, tryhard=False, remove=True, logfn=vlog)
879 879
880 880
881 881 class Test(unittest.TestCase):
882 882 """Encapsulates a single, runnable test.
883 883
884 884 While this class conforms to the unittest.TestCase API, it differs in that
885 885 instances need to be instantiated manually. (Typically, unittest.TestCase
886 886 classes are instantiated automatically by scanning modules.)
887 887 """
888 888
889 889 # Status code reserved for skipped tests (used by hghave).
890 890 SKIPPED_STATUS = 80
891 891
892 892 def __init__(
893 893 self,
894 894 path,
895 895 outputdir,
896 896 tmpdir,
897 897 keeptmpdir=False,
898 898 debug=False,
899 899 first=False,
900 900 timeout=None,
901 901 startport=None,
902 902 extraconfigopts=None,
903 903 shell=None,
904 904 hgcommand=None,
905 905 slowtimeout=None,
906 906 usechg=False,
907 907 useipv6=False,
908 908 ):
909 909 """Create a test from parameters.
910 910
911 911 path is the full path to the file defining the test.
912 912
913 913 tmpdir is the main temporary directory to use for this test.
914 914
915 915 keeptmpdir determines whether to keep the test's temporary directory
916 916 after execution. It defaults to removal (False).
917 917
918 918 debug mode will make the test execute verbosely, with unfiltered
919 919 output.
920 920
921 921 timeout controls the maximum run time of the test. It is ignored when
922 922 debug is True. See slowtimeout for tests with #require slow.
923 923
924 924 slowtimeout overrides timeout if the test has #require slow.
925 925
926 926 startport controls the starting port number to use for this test. Each
927 927 test will reserve 3 port numbers for execution. It is the caller's
928 928 responsibility to allocate a non-overlapping port range to Test
929 929 instances.
930 930
931 931 extraconfigopts is an iterable of extra hgrc config options. Values
932 932 must have the form "key=value" (something understood by hgrc). Values
933 933 of the form "foo.key=value" will result in "[foo] key=value".
934 934
935 935 shell is the shell to execute tests in.
936 936 """
937 937 if timeout is None:
938 938 timeout = defaults['timeout']
939 939 if startport is None:
940 940 startport = defaults['port']
941 941 if slowtimeout is None:
942 942 slowtimeout = defaults['slowtimeout']
943 943 self.path = path
944 944 self.bname = os.path.basename(path)
945 945 self.name = _bytes2sys(self.bname)
946 946 self._testdir = os.path.dirname(path)
947 947 self._outputdir = outputdir
948 948 self._tmpname = os.path.basename(path)
949 949 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
950 950
951 951 self._threadtmp = tmpdir
952 952 self._keeptmpdir = keeptmpdir
953 953 self._debug = debug
954 954 self._first = first
955 955 self._timeout = timeout
956 956 self._slowtimeout = slowtimeout
957 957 self._startport = startport
958 958 self._extraconfigopts = extraconfigopts or []
959 959 self._shell = _sys2bytes(shell)
960 960 self._hgcommand = hgcommand or b'hg'
961 961 self._usechg = usechg
962 962 self._useipv6 = useipv6
963 963
964 964 self._aborted = False
965 965 self._daemonpids = []
966 966 self._finished = None
967 967 self._ret = None
968 968 self._out = None
969 969 self._skipped = None
970 970 self._testtmp = None
971 971 self._chgsockdir = None
972 972
973 973 self._refout = self.readrefout()
974 974
975 975 def readrefout(self):
976 976 """read reference output"""
977 977 # If we're not in --debug mode and reference output file exists,
978 978 # check test output against it.
979 979 if self._debug:
980 980 return None # to match "out is None"
981 981 elif os.path.exists(self.refpath):
982 982 with open(self.refpath, 'rb') as f:
983 983 return f.read().splitlines(True)
984 984 else:
985 985 return []
986 986
987 987 # needed to get base class __repr__ running
988 988 @property
989 989 def _testMethodName(self):
990 990 return self.name
991 991
992 992 def __str__(self):
993 993 return self.name
994 994
995 995 def shortDescription(self):
996 996 return self.name
997 997
998 998 def setUp(self):
999 999 """Tasks to perform before run()."""
1000 1000 self._finished = False
1001 1001 self._ret = None
1002 1002 self._out = None
1003 1003 self._skipped = None
1004 1004
1005 1005 try:
1006 1006 os.mkdir(self._threadtmp)
1007 1007 except OSError as e:
1008 1008 if e.errno != errno.EEXIST:
1009 1009 raise
1010 1010
1011 1011 name = self._tmpname
1012 1012 self._testtmp = os.path.join(self._threadtmp, name)
1013 1013 os.mkdir(self._testtmp)
1014 1014
1015 1015 # Remove any previous output files.
1016 1016 if os.path.exists(self.errpath):
1017 1017 try:
1018 1018 os.remove(self.errpath)
1019 1019 except OSError as e:
1020 1020 # We might have raced another test to clean up a .err
1021 1021 # file, so ignore ENOENT when removing a previous .err
1022 1022 # file.
1023 1023 if e.errno != errno.ENOENT:
1024 1024 raise
1025 1025
1026 1026 if self._usechg:
1027 1027 self._chgsockdir = os.path.join(
1028 1028 self._threadtmp, b'%s.chgsock' % name
1029 1029 )
1030 1030 os.mkdir(self._chgsockdir)
1031 1031
1032 1032 def run(self, result):
1033 1033 """Run this test and report results against a TestResult instance."""
1034 1034 # This function is extremely similar to unittest.TestCase.run(). Once
1035 1035 # we require Python 2.7 (or at least its version of unittest), this
1036 1036 # function can largely go away.
1037 1037 self._result = result
1038 1038 result.startTest(self)
1039 1039 try:
1040 1040 try:
1041 1041 self.setUp()
1042 1042 except (KeyboardInterrupt, SystemExit):
1043 1043 self._aborted = True
1044 1044 raise
1045 1045 except Exception:
1046 1046 result.addError(self, sys.exc_info())
1047 1047 return
1048 1048
1049 1049 success = False
1050 1050 try:
1051 1051 self.runTest()
1052 1052 except KeyboardInterrupt:
1053 1053 self._aborted = True
1054 1054 raise
1055 1055 except unittest.SkipTest as e:
1056 1056 result.addSkip(self, str(e))
1057 1057 # The base class will have already counted this as a
1058 1058 # test we "ran", but we want to exclude skipped tests
1059 1059 # from those we count towards those run.
1060 1060 result.testsRun -= 1
1061 1061 except self.failureException as e:
1062 1062 # This differs from unittest in that we don't capture
1063 1063 # the stack trace. This is for historical reasons and
1064 1064 # this decision could be revisited in the future,
1065 1065 # especially for PythonTest instances.
1066 1066 if result.addFailure(self, str(e)):
1067 1067 success = True
1068 1068 except Exception:
1069 1069 result.addError(self, sys.exc_info())
1070 1070 else:
1071 1071 success = True
1072 1072
1073 1073 try:
1074 1074 self.tearDown()
1075 1075 except (KeyboardInterrupt, SystemExit):
1076 1076 self._aborted = True
1077 1077 raise
1078 1078 except Exception:
1079 1079 result.addError(self, sys.exc_info())
1080 1080 success = False
1081 1081
1082 1082 if success:
1083 1083 result.addSuccess(self)
1084 1084 finally:
1085 1085 result.stopTest(self, interrupted=self._aborted)
1086 1086
1087 1087 def runTest(self):
1088 1088 """Run this test instance.
1089 1089
1090 1090 This will return a tuple describing the result of the test.
1091 1091 """
1092 1092 env = self._getenv()
1093 1093 self._genrestoreenv(env)
1094 1094 self._daemonpids.append(env['DAEMON_PIDS'])
1095 1095 self._createhgrc(env['HGRCPATH'])
1096 1096
1097 1097 vlog('# Test', self.name)
1098 1098
1099 1099 ret, out = self._run(env)
1100 1100 self._finished = True
1101 1101 self._ret = ret
1102 1102 self._out = out
1103 1103
1104 1104 def describe(ret):
1105 1105 if ret < 0:
1106 1106 return 'killed by signal: %d' % -ret
1107 1107 return 'returned error code %d' % ret
1108 1108
1109 1109 self._skipped = False
1110 1110
1111 1111 if ret == self.SKIPPED_STATUS:
1112 1112 if out is None: # Debug mode, nothing to parse.
1113 1113 missing = ['unknown']
1114 1114 failed = None
1115 1115 else:
1116 1116 missing, failed = TTest.parsehghaveoutput(out)
1117 1117
1118 1118 if not missing:
1119 1119 missing = ['skipped']
1120 1120
1121 1121 if failed:
1122 1122 self.fail('hg have failed checking for %s' % failed[-1])
1123 1123 else:
1124 1124 self._skipped = True
1125 1125 raise unittest.SkipTest(missing[-1])
1126 1126 elif ret == 'timeout':
1127 1127 self.fail('timed out')
1128 1128 elif ret is False:
1129 1129 self.fail('no result code from test')
1130 1130 elif out != self._refout:
1131 1131 # Diff generation may rely on written .err file.
1132 1132 if (
1133 1133 (ret != 0 or out != self._refout)
1134 1134 and not self._skipped
1135 1135 and not self._debug
1136 1136 ):
1137 1137 with open(self.errpath, 'wb') as f:
1138 1138 for line in out:
1139 1139 f.write(line)
1140 1140
1141 1141 # The result object handles diff calculation for us.
1142 1142 with firstlock:
1143 1143 if self._result.addOutputMismatch(self, ret, out, self._refout):
1144 1144 # change was accepted, skip failing
1145 1145 return
1146 1146 if self._first:
1147 1147 global firsterror
1148 1148 firsterror = True
1149 1149
1150 1150 if ret:
1151 1151 msg = 'output changed and ' + describe(ret)
1152 1152 else:
1153 1153 msg = 'output changed'
1154 1154
1155 1155 self.fail(msg)
1156 1156 elif ret:
1157 1157 self.fail(describe(ret))
1158 1158
1159 1159 def tearDown(self):
1160 1160 """Tasks to perform after run()."""
1161 1161 for entry in self._daemonpids:
1162 1162 killdaemons(entry)
1163 1163 self._daemonpids = []
1164 1164
1165 1165 if self._keeptmpdir:
1166 1166 log(
1167 1167 '\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s'
1168 1168 % (_bytes2sys(self._testtmp), _bytes2sys(self._threadtmp),)
1169 1169 )
1170 1170 else:
1171 1171 try:
1172 1172 shutil.rmtree(self._testtmp)
1173 1173 except OSError:
1174 1174 # unreadable directory may be left in $TESTTMP; fix permission
1175 1175 # and try again
1176 1176 makecleanable(self._testtmp)
1177 1177 shutil.rmtree(self._testtmp, True)
1178 1178 shutil.rmtree(self._threadtmp, True)
1179 1179
1180 1180 if self._usechg:
1181 1181 # chgservers will stop automatically after they find the socket
1182 1182 # files are deleted
1183 1183 shutil.rmtree(self._chgsockdir, True)
1184 1184
1185 1185 if (
1186 1186 (self._ret != 0 or self._out != self._refout)
1187 1187 and not self._skipped
1188 1188 and not self._debug
1189 1189 and self._out
1190 1190 ):
1191 1191 with open(self.errpath, 'wb') as f:
1192 1192 for line in self._out:
1193 1193 f.write(line)
1194 1194
1195 1195 vlog("# Ret was:", self._ret, '(%s)' % self.name)
1196 1196
1197 1197 def _run(self, env):
1198 1198 # This should be implemented in child classes to run tests.
1199 1199 raise unittest.SkipTest('unknown test type')
1200 1200
1201 1201 def abort(self):
1202 1202 """Terminate execution of this test."""
1203 1203 self._aborted = True
1204 1204
1205 1205 def _portmap(self, i):
1206 1206 offset = b'' if i == 0 else b'%d' % i
1207 1207 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
1208 1208
1209 1209 def _getreplacements(self):
1210 1210 """Obtain a mapping of text replacements to apply to test output.
1211 1211
1212 1212 Test output needs to be normalized so it can be compared to expected
1213 1213 output. This function defines how some of that normalization will
1214 1214 occur.
1215 1215 """
1216 1216 r = [
1217 1217 # This list should be parallel to defineport in _getenv
1218 1218 self._portmap(0),
1219 1219 self._portmap(1),
1220 1220 self._portmap(2),
1221 1221 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
1222 1222 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
1223 1223 ]
1224 1224 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
1225 1225
1226 1226 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1227 1227
1228 1228 if os.path.exists(replacementfile):
1229 1229 data = {}
1230 1230 with open(replacementfile, mode='rb') as source:
1231 1231 # the intermediate 'compile' step help with debugging
1232 1232 code = compile(source.read(), replacementfile, 'exec')
1233 1233 exec(code, data)
1234 1234 for value in data.get('substitutions', ()):
1235 1235 if len(value) != 2:
1236 1236 msg = 'malformatted substitution in %s: %r'
1237 1237 msg %= (replacementfile, value)
1238 1238 raise ValueError(msg)
1239 1239 r.append(value)
1240 1240 return r
1241 1241
1242 1242 def _escapepath(self, p):
1243 1243 if os.name == 'nt':
1244 1244 return b''.join(
1245 1245 c.isalpha()
1246 1246 and b'[%s%s]' % (c.lower(), c.upper())
1247 1247 or c in b'/\\'
1248 1248 and br'[/\\]'
1249 1249 or c.isdigit()
1250 1250 and c
1251 1251 or b'\\' + c
1252 1252 for c in [p[i : i + 1] for i in range(len(p))]
1253 1253 )
1254 1254 else:
1255 1255 return re.escape(p)
1256 1256
1257 1257 def _localip(self):
1258 1258 if self._useipv6:
1259 1259 return b'::1'
1260 1260 else:
1261 1261 return b'127.0.0.1'
1262 1262
1263 1263 def _genrestoreenv(self, testenv):
1264 1264 """Generate a script that can be used by tests to restore the original
1265 1265 environment."""
1266 1266 # Put the restoreenv script inside self._threadtmp
1267 1267 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1268 1268 testenv['HGTEST_RESTOREENV'] = _bytes2sys(scriptpath)
1269 1269
1270 1270 # Only restore environment variable names that the shell allows
1271 1271 # us to export.
1272 1272 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1273 1273
1274 1274 # Do not restore these variables; otherwise tests would fail.
1275 1275 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1276 1276
1277 1277 with open(scriptpath, 'w') as envf:
1278 1278 for name, value in origenviron.items():
1279 1279 if not name_regex.match(name):
1280 1280 # Skip environment variables with unusual names not
1281 1281 # allowed by most shells.
1282 1282 continue
1283 1283 if name in reqnames:
1284 1284 continue
1285 1285 envf.write('%s=%s\n' % (name, shellquote(value)))
1286 1286
1287 1287 for name in testenv:
1288 1288 if name in origenviron or name in reqnames:
1289 1289 continue
1290 1290 envf.write('unset %s\n' % (name,))
1291 1291
1292 1292 def _getenv(self):
1293 1293 """Obtain environment variables to use during test execution."""
1294 1294
1295 1295 def defineport(i):
1296 1296 offset = '' if i == 0 else '%s' % i
1297 1297 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1298 1298
1299 1299 env = os.environ.copy()
1300 1300 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1301 1301 env['HGEMITWARNINGS'] = '1'
1302 1302 env['TESTTMP'] = _bytes2sys(self._testtmp)
1303 1303 env['TESTNAME'] = self.name
1304 1304 env['HOME'] = _bytes2sys(self._testtmp)
1305 1305 # This number should match portneeded in _getport
1306 1306 for port in xrange(3):
1307 1307 # This list should be parallel to _portmap in _getreplacements
1308 1308 defineport(port)
1309 1309 env["HGRCPATH"] = _bytes2sys(os.path.join(self._threadtmp, b'.hgrc'))
1310 1310 env["DAEMON_PIDS"] = _bytes2sys(
1311 1311 os.path.join(self._threadtmp, b'daemon.pids')
1312 1312 )
1313 1313 env["HGEDITOR"] = (
1314 1314 '"' + sysexecutable + '"' + ' -c "import sys; sys.exit(0)"'
1315 1315 )
1316 1316 env["HGUSER"] = "test"
1317 1317 env["HGENCODING"] = "ascii"
1318 1318 env["HGENCODINGMODE"] = "strict"
1319 1319 env["HGHOSTNAME"] = "test-hostname"
1320 1320 env['HGIPV6'] = str(int(self._useipv6))
1321 1321 # See contrib/catapipe.py for how to use this functionality.
1322 1322 if 'HGTESTCATAPULTSERVERPIPE' not in env:
1323 1323 # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
1324 1324 # non-test one in as a default, otherwise set to devnull
1325 1325 env['HGTESTCATAPULTSERVERPIPE'] = env.get(
1326 1326 'HGCATAPULTSERVERPIPE', os.devnull
1327 1327 )
1328 1328
1329 1329 extraextensions = []
1330 1330 for opt in self._extraconfigopts:
1331 1331 section, key = _sys2bytes(opt).split(b'.', 1)
1332 1332 if section != 'extensions':
1333 1333 continue
1334 1334 name = key.split(b'=', 1)[0]
1335 1335 extraextensions.append(name)
1336 1336
1337 1337 if extraextensions:
1338 1338 env['HGTESTEXTRAEXTENSIONS'] = b' '.join(extraextensions)
1339 1339
1340 1340 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1341 1341 # IP addresses.
1342 1342 env['LOCALIP'] = _bytes2sys(self._localip())
1343 1343
1344 1344 # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
1345 1345 # but this is needed for testing python instances like dummyssh,
1346 1346 # dummysmtpd.py, and dumbhttp.py.
1347 1347 if PYTHON3 and os.name == 'nt':
1348 1348 env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
1349 1349
1350 1350 # Modified HOME in test environment can confuse Rust tools. So set
1351 1351 # CARGO_HOME and RUSTUP_HOME automatically if a Rust toolchain is
1352 1352 # present and these variables aren't already defined.
1353 1353 cargo_home_path = os.path.expanduser('~/.cargo')
1354 1354 rustup_home_path = os.path.expanduser('~/.rustup')
1355 1355
1356 1356 if os.path.exists(cargo_home_path) and b'CARGO_HOME' not in osenvironb:
1357 1357 env['CARGO_HOME'] = cargo_home_path
1358 1358 if (
1359 1359 os.path.exists(rustup_home_path)
1360 1360 and b'RUSTUP_HOME' not in osenvironb
1361 1361 ):
1362 1362 env['RUSTUP_HOME'] = rustup_home_path
1363 1363
1364 1364 # Reset some environment variables to well-known values so that
1365 1365 # the tests produce repeatable output.
1366 1366 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1367 1367 env['TZ'] = 'GMT'
1368 1368 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1369 1369 env['COLUMNS'] = '80'
1370 1370 env['TERM'] = 'xterm'
1371 1371
1372 1372 dropped = [
1373 1373 'CDPATH',
1374 1374 'CHGDEBUG',
1375 1375 'EDITOR',
1376 1376 'GREP_OPTIONS',
1377 1377 'HG',
1378 1378 'HGMERGE',
1379 1379 'HGPLAIN',
1380 1380 'HGPLAINEXCEPT',
1381 1381 'HGPROF',
1382 1382 'http_proxy',
1383 1383 'no_proxy',
1384 1384 'NO_PROXY',
1385 1385 'PAGER',
1386 1386 'VISUAL',
1387 1387 ]
1388 1388
1389 1389 for k in dropped:
1390 1390 if k in env:
1391 1391 del env[k]
1392 1392
1393 1393 # unset env related to hooks
1394 1394 for k in list(env):
1395 1395 if k.startswith('HG_'):
1396 1396 del env[k]
1397 1397
1398 1398 if self._usechg:
1399 1399 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1400 1400
1401 1401 return env
1402 1402
1403 1403 def _createhgrc(self, path):
1404 1404 """Create an hgrc file for this test."""
1405 1405 with open(path, 'wb') as hgrc:
1406 1406 hgrc.write(b'[ui]\n')
1407 1407 hgrc.write(b'slash = True\n')
1408 1408 hgrc.write(b'interactive = False\n')
1409 1409 hgrc.write(b'merge = internal:merge\n')
1410 1410 hgrc.write(b'mergemarkers = detailed\n')
1411 1411 hgrc.write(b'promptecho = True\n')
1412 1412 hgrc.write(b'[defaults]\n')
1413 1413 hgrc.write(b'[devel]\n')
1414 1414 hgrc.write(b'all-warnings = true\n')
1415 1415 hgrc.write(b'default-date = 0 0\n')
1416 1416 hgrc.write(b'[largefiles]\n')
1417 1417 hgrc.write(
1418 1418 b'usercache = %s\n'
1419 1419 % (os.path.join(self._testtmp, b'.cache/largefiles'))
1420 1420 )
1421 1421 hgrc.write(b'[lfs]\n')
1422 1422 hgrc.write(
1423 1423 b'usercache = %s\n'
1424 1424 % (os.path.join(self._testtmp, b'.cache/lfs'))
1425 1425 )
1426 1426 hgrc.write(b'[web]\n')
1427 1427 hgrc.write(b'address = localhost\n')
1428 1428 hgrc.write(b'ipv6 = %r\n' % self._useipv6)
1429 1429 hgrc.write(b'server-header = testing stub value\n')
1430 1430
1431 1431 for opt in self._extraconfigopts:
1432 1432 section, key = _sys2bytes(opt).split(b'.', 1)
1433 1433 assert b'=' in key, (
1434 1434 'extra config opt %s must ' 'have an = for assignment' % opt
1435 1435 )
1436 1436 hgrc.write(b'[%s]\n%s\n' % (section, key))
1437 1437
1438 1438 def fail(self, msg):
1439 1439 # unittest differentiates between errored and failed.
1440 1440 # Failed is denoted by AssertionError (by default at least).
1441 1441 raise AssertionError(msg)
1442 1442
1443 1443 def _runcommand(self, cmd, env, normalizenewlines=False):
1444 1444 """Run command in a sub-process, capturing the output (stdout and
1445 1445 stderr).
1446 1446
1447 1447 Return a tuple (exitcode, output). output is None in debug mode.
1448 1448 """
1449 1449 if self._debug:
1450 1450 proc = subprocess.Popen(
1451 1451 _bytes2sys(cmd),
1452 1452 shell=True,
1453 1453 cwd=_bytes2sys(self._testtmp),
1454 1454 env=env,
1455 1455 )
1456 1456 ret = proc.wait()
1457 1457 return (ret, None)
1458 1458
1459 1459 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1460 1460
1461 1461 def cleanup():
1462 1462 terminate(proc)
1463 1463 ret = proc.wait()
1464 1464 if ret == 0:
1465 1465 ret = signal.SIGTERM << 8
1466 1466 killdaemons(env['DAEMON_PIDS'])
1467 1467 return ret
1468 1468
1469 1469 proc.tochild.close()
1470 1470
1471 1471 try:
1472 1472 output = proc.fromchild.read()
1473 1473 except KeyboardInterrupt:
1474 1474 vlog('# Handling keyboard interrupt')
1475 1475 cleanup()
1476 1476 raise
1477 1477
1478 1478 ret = proc.wait()
1479 1479 if wifexited(ret):
1480 1480 ret = os.WEXITSTATUS(ret)
1481 1481
1482 1482 if proc.timeout:
1483 1483 ret = 'timeout'
1484 1484
1485 1485 if ret:
1486 1486 killdaemons(env['DAEMON_PIDS'])
1487 1487
1488 1488 for s, r in self._getreplacements():
1489 1489 output = re.sub(s, r, output)
1490 1490
1491 1491 if normalizenewlines:
1492 1492 output = output.replace(b'\r\n', b'\n')
1493 1493
1494 1494 return ret, output.splitlines(True)
1495 1495
1496 1496
1497 1497 class PythonTest(Test):
1498 1498 """A Python-based test."""
1499 1499
1500 1500 @property
1501 1501 def refpath(self):
1502 1502 return os.path.join(self._testdir, b'%s.out' % self.bname)
1503 1503
1504 1504 def _run(self, env):
1505 1505 # Quote the python(3) executable for Windows
1506 1506 cmd = b'"%s" "%s"' % (PYTHON, self.path)
1507 1507 vlog("# Running", cmd.decode("utf-8"))
1508 1508 normalizenewlines = os.name == 'nt'
1509 1509 result = self._runcommand(cmd, env, normalizenewlines=normalizenewlines)
1510 1510 if self._aborted:
1511 1511 raise KeyboardInterrupt()
1512 1512
1513 1513 return result
1514 1514
1515 1515
1516 1516 # Some glob patterns apply only in some circumstances, so the script
1517 1517 # might want to remove (glob) annotations that otherwise should be
1518 1518 # retained.
1519 1519 checkcodeglobpats = [
1520 1520 # On Windows it looks like \ doesn't require a (glob), but we know
1521 1521 # better.
1522 1522 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1523 1523 re.compile(br'^moving \S+/.*[^)]$'),
1524 1524 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1525 1525 # Not all platforms have 127.0.0.1 as loopback (though most do),
1526 1526 # so we always glob that too.
1527 1527 re.compile(br'.*\$LOCALIP.*$'),
1528 1528 ]
1529 1529
1530 1530 bchr = chr
1531 1531 if PYTHON3:
1532 1532 bchr = lambda x: bytes([x])
1533 1533
1534 1534 WARN_UNDEFINED = 1
1535 1535 WARN_YES = 2
1536 1536 WARN_NO = 3
1537 1537
1538 1538 MARK_OPTIONAL = b" (?)\n"
1539 1539
1540 1540
1541 1541 def isoptional(line):
1542 1542 return line.endswith(MARK_OPTIONAL)
1543 1543
1544 1544
1545 1545 class TTest(Test):
1546 1546 """A "t test" is a test backed by a .t file."""
1547 1547
1548 1548 SKIPPED_PREFIX = b'skipped: '
1549 1549 FAILED_PREFIX = b'hghave check failed: '
1550 1550 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1551 1551
1552 1552 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1553 1553 ESCAPEMAP = {bchr(i): br'\x%02x' % i for i in range(256)}
1554 1554 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1555 1555
1556 1556 def __init__(self, path, *args, **kwds):
1557 1557 # accept an extra "case" parameter
1558 1558 case = kwds.pop('case', [])
1559 1559 self._case = case
1560 1560 self._allcases = {x for y in parsettestcases(path) for x in y}
1561 1561 super(TTest, self).__init__(path, *args, **kwds)
1562 1562 if case:
1563 1563 casepath = b'#'.join(case)
1564 1564 self.name = '%s#%s' % (self.name, _bytes2sys(casepath))
1565 1565 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1566 1566 self._tmpname += b'-%s' % casepath
1567 1567 self._have = {}
1568 1568
1569 1569 @property
1570 1570 def refpath(self):
1571 1571 return os.path.join(self._testdir, self.bname)
1572 1572
1573 1573 def _run(self, env):
1574 1574 with open(self.path, 'rb') as f:
1575 1575 lines = f.readlines()
1576 1576
1577 1577 # .t file is both reference output and the test input, keep reference
1578 1578 # output updated with the the test input. This avoids some race
1579 1579 # conditions where the reference output does not match the actual test.
1580 1580 if self._refout is not None:
1581 1581 self._refout = lines
1582 1582
1583 1583 salt, script, after, expected = self._parsetest(lines)
1584 1584
1585 1585 # Write out the generated script.
1586 1586 fname = b'%s.sh' % self._testtmp
1587 1587 with open(fname, 'wb') as f:
1588 1588 for l in script:
1589 1589 f.write(l)
1590 1590
1591 1591 cmd = b'%s "%s"' % (self._shell, fname)
1592 1592 vlog("# Running", cmd.decode("utf-8"))
1593 1593
1594 1594 exitcode, output = self._runcommand(cmd, env)
1595 1595
1596 1596 if self._aborted:
1597 1597 raise KeyboardInterrupt()
1598 1598
1599 1599 # Do not merge output if skipped. Return hghave message instead.
1600 1600 # Similarly, with --debug, output is None.
1601 1601 if exitcode == self.SKIPPED_STATUS or output is None:
1602 1602 return exitcode, output
1603 1603
1604 1604 return self._processoutput(exitcode, output, salt, after, expected)
1605 1605
1606 1606 def _hghave(self, reqs):
1607 1607 allreqs = b' '.join(reqs)
1608 1608
1609 1609 self._detectslow(reqs)
1610 1610
1611 1611 if allreqs in self._have:
1612 1612 return self._have.get(allreqs)
1613 1613
1614 1614 # TODO do something smarter when all other uses of hghave are gone.
1615 1615 runtestdir = os.path.abspath(os.path.dirname(_sys2bytes(__file__)))
1616 1616 tdir = runtestdir.replace(b'\\', b'/')
1617 1617 proc = Popen4(
1618 1618 b'%s -c "%s/hghave %s"' % (self._shell, tdir, allreqs),
1619 1619 self._testtmp,
1620 1620 0,
1621 1621 self._getenv(),
1622 1622 )
1623 1623 stdout, stderr = proc.communicate()
1624 1624 ret = proc.wait()
1625 1625 if wifexited(ret):
1626 1626 ret = os.WEXITSTATUS(ret)
1627 1627 if ret == 2:
1628 1628 print(stdout.decode('utf-8'))
1629 1629 sys.exit(1)
1630 1630
1631 1631 if ret != 0:
1632 1632 self._have[allreqs] = (False, stdout)
1633 1633 return False, stdout
1634 1634
1635 1635 self._have[allreqs] = (True, None)
1636 1636 return True, None
1637 1637
1638 1638 def _detectslow(self, reqs):
1639 1639 """update the timeout of slow test when appropriate"""
1640 1640 if b'slow' in reqs:
1641 1641 self._timeout = self._slowtimeout
1642 1642
1643 1643 def _iftest(self, args):
1644 1644 # implements "#if"
1645 1645 reqs = []
1646 1646 for arg in args:
1647 1647 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1648 1648 if arg[3:] in self._case:
1649 1649 return False
1650 1650 elif arg in self._allcases:
1651 1651 if arg not in self._case:
1652 1652 return False
1653 1653 else:
1654 1654 reqs.append(arg)
1655 1655 self._detectslow(reqs)
1656 1656 return self._hghave(reqs)[0]
1657 1657
1658 1658 def _parsetest(self, lines):
1659 1659 # We generate a shell script which outputs unique markers to line
1660 1660 # up script results with our source. These markers include input
1661 1661 # line number and the last return code.
1662 1662 salt = b"SALT%d" % time.time()
1663 1663
1664 1664 def addsalt(line, inpython):
1665 1665 if inpython:
1666 1666 script.append(b'%s %d 0\n' % (salt, line))
1667 1667 else:
1668 1668 script.append(b'echo %s %d $?\n' % (salt, line))
1669 1669
1670 1670 activetrace = []
1671 1671 session = str(uuid.uuid4())
1672 1672 if PYTHON3:
1673 1673 session = session.encode('ascii')
1674 1674 hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE') or os.getenv(
1675 1675 'HGCATAPULTSERVERPIPE'
1676 1676 )
1677 1677
1678 1678 def toggletrace(cmd=None):
1679 1679 if not hgcatapult or hgcatapult == os.devnull:
1680 1680 return
1681 1681
1682 1682 if activetrace:
1683 1683 script.append(
1684 1684 b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1685 1685 % (session, activetrace[0])
1686 1686 )
1687 1687 if cmd is None:
1688 1688 return
1689 1689
1690 1690 if isinstance(cmd, str):
1691 1691 quoted = shellquote(cmd.strip())
1692 1692 else:
1693 1693 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1694 1694 quoted = quoted.replace(b'\\', b'\\\\')
1695 1695 script.append(
1696 1696 b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1697 1697 % (session, quoted)
1698 1698 )
1699 1699 activetrace[0:] = [quoted]
1700 1700
1701 1701 script = []
1702 1702
1703 1703 # After we run the shell script, we re-unify the script output
1704 1704 # with non-active parts of the source, with synchronization by our
1705 1705 # SALT line number markers. The after table contains the non-active
1706 1706 # components, ordered by line number.
1707 1707 after = {}
1708 1708
1709 1709 # Expected shell script output.
1710 1710 expected = {}
1711 1711
1712 1712 pos = prepos = -1
1713 1713
1714 1714 # True or False when in a true or false conditional section
1715 1715 skipping = None
1716 1716
1717 1717 # We keep track of whether or not we're in a Python block so we
1718 1718 # can generate the surrounding doctest magic.
1719 1719 inpython = False
1720 1720
1721 1721 if self._debug:
1722 1722 script.append(b'set -x\n')
1723 1723 if self._hgcommand != b'hg':
1724 1724 script.append(b'alias hg="%s"\n' % self._hgcommand)
1725 1725 if os.getenv('MSYSTEM'):
1726 1726 script.append(b'alias pwd="pwd -W"\n')
1727 1727
1728 1728 if hgcatapult and hgcatapult != os.devnull:
1729 1729 if PYTHON3:
1730 1730 hgcatapult = hgcatapult.encode('utf8')
1731 1731 cataname = self.name.encode('utf8')
1732 1732 else:
1733 1733 cataname = self.name
1734 1734
1735 1735 # Kludge: use a while loop to keep the pipe from getting
1736 1736 # closed by our echo commands. The still-running file gets
1737 1737 # reaped at the end of the script, which causes the while
1738 1738 # loop to exit and closes the pipe. Sigh.
1739 1739 script.append(
1740 1740 b'rtendtracing() {\n'
1741 1741 b' echo END %(session)s %(name)s >> %(catapult)s\n'
1742 1742 b' rm -f "$TESTTMP/.still-running"\n'
1743 1743 b'}\n'
1744 1744 b'trap "rtendtracing" 0\n'
1745 1745 b'touch "$TESTTMP/.still-running"\n'
1746 1746 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1747 1747 b'> %(catapult)s &\n'
1748 1748 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1749 1749 b'echo START %(session)s %(name)s >> %(catapult)s\n'
1750 1750 % {
1751 1751 b'name': cataname,
1752 1752 b'session': session,
1753 1753 b'catapult': hgcatapult,
1754 1754 }
1755 1755 )
1756 1756
1757 1757 if self._case:
1758 1758 casestr = b'#'.join(self._case)
1759 if isinstance(self._case, str):
1759 if isinstance(casestr, str):
1760 1760 quoted = shellquote(casestr)
1761 1761 else:
1762 1762 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1763 1763 script.append(b'TESTCASE=%s\n' % quoted)
1764 1764 script.append(b'export TESTCASE\n')
1765 1765
1766 1766 n = 0
1767 1767 for n, l in enumerate(lines):
1768 1768 if not l.endswith(b'\n'):
1769 1769 l += b'\n'
1770 1770 if l.startswith(b'#require'):
1771 1771 lsplit = l.split()
1772 1772 if len(lsplit) < 2 or lsplit[0] != b'#require':
1773 1773 after.setdefault(pos, []).append(
1774 1774 b' !!! invalid #require\n'
1775 1775 )
1776 1776 if not skipping:
1777 1777 haveresult, message = self._hghave(lsplit[1:])
1778 1778 if not haveresult:
1779 1779 script = [b'echo "%s"\nexit 80\n' % message]
1780 1780 break
1781 1781 after.setdefault(pos, []).append(l)
1782 1782 elif l.startswith(b'#if'):
1783 1783 lsplit = l.split()
1784 1784 if len(lsplit) < 2 or lsplit[0] != b'#if':
1785 1785 after.setdefault(pos, []).append(b' !!! invalid #if\n')
1786 1786 if skipping is not None:
1787 1787 after.setdefault(pos, []).append(b' !!! nested #if\n')
1788 1788 skipping = not self._iftest(lsplit[1:])
1789 1789 after.setdefault(pos, []).append(l)
1790 1790 elif l.startswith(b'#else'):
1791 1791 if skipping is None:
1792 1792 after.setdefault(pos, []).append(b' !!! missing #if\n')
1793 1793 skipping = not skipping
1794 1794 after.setdefault(pos, []).append(l)
1795 1795 elif l.startswith(b'#endif'):
1796 1796 if skipping is None:
1797 1797 after.setdefault(pos, []).append(b' !!! missing #if\n')
1798 1798 skipping = None
1799 1799 after.setdefault(pos, []).append(l)
1800 1800 elif skipping:
1801 1801 after.setdefault(pos, []).append(l)
1802 1802 elif l.startswith(b' >>> '): # python inlines
1803 1803 after.setdefault(pos, []).append(l)
1804 1804 prepos = pos
1805 1805 pos = n
1806 1806 if not inpython:
1807 1807 # We've just entered a Python block. Add the header.
1808 1808 inpython = True
1809 1809 addsalt(prepos, False) # Make sure we report the exit code.
1810 1810 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1811 1811 addsalt(n, True)
1812 1812 script.append(l[2:])
1813 1813 elif l.startswith(b' ... '): # python inlines
1814 1814 after.setdefault(prepos, []).append(l)
1815 1815 script.append(l[2:])
1816 1816 elif l.startswith(b' $ '): # commands
1817 1817 if inpython:
1818 1818 script.append(b'EOF\n')
1819 1819 inpython = False
1820 1820 after.setdefault(pos, []).append(l)
1821 1821 prepos = pos
1822 1822 pos = n
1823 1823 addsalt(n, False)
1824 1824 rawcmd = l[4:]
1825 1825 cmd = rawcmd.split()
1826 1826 toggletrace(rawcmd)
1827 1827 if len(cmd) == 2 and cmd[0] == b'cd':
1828 1828 rawcmd = b'cd %s || exit 1\n' % cmd[1]
1829 1829 script.append(rawcmd)
1830 1830 elif l.startswith(b' > '): # continuations
1831 1831 after.setdefault(prepos, []).append(l)
1832 1832 script.append(l[4:])
1833 1833 elif l.startswith(b' '): # results
1834 1834 # Queue up a list of expected results.
1835 1835 expected.setdefault(pos, []).append(l[2:])
1836 1836 else:
1837 1837 if inpython:
1838 1838 script.append(b'EOF\n')
1839 1839 inpython = False
1840 1840 # Non-command/result. Queue up for merged output.
1841 1841 after.setdefault(pos, []).append(l)
1842 1842
1843 1843 if inpython:
1844 1844 script.append(b'EOF\n')
1845 1845 if skipping is not None:
1846 1846 after.setdefault(pos, []).append(b' !!! missing #endif\n')
1847 1847 addsalt(n + 1, False)
1848 1848 # Need to end any current per-command trace
1849 1849 if activetrace:
1850 1850 toggletrace()
1851 1851 return salt, script, after, expected
1852 1852
1853 1853 def _processoutput(self, exitcode, output, salt, after, expected):
1854 1854 # Merge the script output back into a unified test.
1855 1855 warnonly = WARN_UNDEFINED # 1: not yet; 2: yes; 3: for sure not
1856 1856 if exitcode != 0:
1857 1857 warnonly = WARN_NO
1858 1858
1859 1859 pos = -1
1860 1860 postout = []
1861 1861 for out_rawline in output:
1862 1862 out_line, cmd_line = out_rawline, None
1863 1863 if salt in out_rawline:
1864 1864 out_line, cmd_line = out_rawline.split(salt, 1)
1865 1865
1866 1866 pos, postout, warnonly = self._process_out_line(
1867 1867 out_line, pos, postout, expected, warnonly
1868 1868 )
1869 1869 pos, postout = self._process_cmd_line(cmd_line, pos, postout, after)
1870 1870
1871 1871 if pos in after:
1872 1872 postout += after.pop(pos)
1873 1873
1874 1874 if warnonly == WARN_YES:
1875 1875 exitcode = False # Set exitcode to warned.
1876 1876
1877 1877 return exitcode, postout
1878 1878
1879 1879 def _process_out_line(self, out_line, pos, postout, expected, warnonly):
1880 1880 while out_line:
1881 1881 if not out_line.endswith(b'\n'):
1882 1882 out_line += b' (no-eol)\n'
1883 1883
1884 1884 # Find the expected output at the current position.
1885 1885 els = [None]
1886 1886 if expected.get(pos, None):
1887 1887 els = expected[pos]
1888 1888
1889 1889 optional = []
1890 1890 for i, el in enumerate(els):
1891 1891 r = False
1892 1892 if el:
1893 1893 r, exact = self.linematch(el, out_line)
1894 1894 if isinstance(r, str):
1895 1895 if r == '-glob':
1896 1896 out_line = ''.join(el.rsplit(' (glob)', 1))
1897 1897 r = '' # Warn only this line.
1898 1898 elif r == "retry":
1899 1899 postout.append(b' ' + el)
1900 1900 else:
1901 1901 log('\ninfo, unknown linematch result: %r\n' % r)
1902 1902 r = False
1903 1903 if r:
1904 1904 els.pop(i)
1905 1905 break
1906 1906 if el:
1907 1907 if isoptional(el):
1908 1908 optional.append(i)
1909 1909 else:
1910 1910 m = optline.match(el)
1911 1911 if m:
1912 1912 conditions = [c for c in m.group(2).split(b' ')]
1913 1913
1914 1914 if not self._iftest(conditions):
1915 1915 optional.append(i)
1916 1916 if exact:
1917 1917 # Don't allow line to be matches against a later
1918 1918 # line in the output
1919 1919 els.pop(i)
1920 1920 break
1921 1921
1922 1922 if r:
1923 1923 if r == "retry":
1924 1924 continue
1925 1925 # clean up any optional leftovers
1926 1926 for i in optional:
1927 1927 postout.append(b' ' + els[i])
1928 1928 for i in reversed(optional):
1929 1929 del els[i]
1930 1930 postout.append(b' ' + el)
1931 1931 else:
1932 1932 if self.NEEDESCAPE(out_line):
1933 1933 out_line = TTest._stringescape(
1934 1934 b'%s (esc)\n' % out_line.rstrip(b'\n')
1935 1935 )
1936 1936 postout.append(b' ' + out_line) # Let diff deal with it.
1937 1937 if r != '': # If line failed.
1938 1938 warnonly = WARN_NO
1939 1939 elif warnonly == WARN_UNDEFINED:
1940 1940 warnonly = WARN_YES
1941 1941 break
1942 1942 else:
1943 1943 # clean up any optional leftovers
1944 1944 while expected.get(pos, None):
1945 1945 el = expected[pos].pop(0)
1946 1946 if el:
1947 1947 if not isoptional(el):
1948 1948 m = optline.match(el)
1949 1949 if m:
1950 1950 conditions = [c for c in m.group(2).split(b' ')]
1951 1951
1952 1952 if self._iftest(conditions):
1953 1953 # Don't append as optional line
1954 1954 continue
1955 1955 else:
1956 1956 continue
1957 1957 postout.append(b' ' + el)
1958 1958 return pos, postout, warnonly
1959 1959
1960 1960 def _process_cmd_line(self, cmd_line, pos, postout, after):
1961 1961 """process a "command" part of a line from unified test output"""
1962 1962 if cmd_line:
1963 1963 # Add on last return code.
1964 1964 ret = int(cmd_line.split()[1])
1965 1965 if ret != 0:
1966 1966 postout.append(b' [%d]\n' % ret)
1967 1967 if pos in after:
1968 1968 # Merge in non-active test bits.
1969 1969 postout += after.pop(pos)
1970 1970 pos = int(cmd_line.split()[0])
1971 1971 return pos, postout
1972 1972
1973 1973 @staticmethod
1974 1974 def rematch(el, l):
1975 1975 try:
1976 1976 # parse any flags at the beginning of the regex. Only 'i' is
1977 1977 # supported right now, but this should be easy to extend.
1978 1978 flags, el = re.match(br'^(\(\?i\))?(.*)', el).groups()[0:2]
1979 1979 flags = flags or b''
1980 1980 el = flags + b'(?:' + el + b')'
1981 1981 # use \Z to ensure that the regex matches to the end of the string
1982 1982 if os.name == 'nt':
1983 1983 return re.match(el + br'\r?\n\Z', l)
1984 1984 return re.match(el + br'\n\Z', l)
1985 1985 except re.error:
1986 1986 # el is an invalid regex
1987 1987 return False
1988 1988
1989 1989 @staticmethod
1990 1990 def globmatch(el, l):
1991 1991 # The only supported special characters are * and ? plus / which also
1992 1992 # matches \ on windows. Escaping of these characters is supported.
1993 1993 if el + b'\n' == l:
1994 1994 if os.altsep:
1995 1995 # matching on "/" is not needed for this line
1996 1996 for pat in checkcodeglobpats:
1997 1997 if pat.match(el):
1998 1998 return True
1999 1999 return b'-glob'
2000 2000 return True
2001 2001 el = el.replace(b'$LOCALIP', b'*')
2002 2002 i, n = 0, len(el)
2003 2003 res = b''
2004 2004 while i < n:
2005 2005 c = el[i : i + 1]
2006 2006 i += 1
2007 2007 if c == b'\\' and i < n and el[i : i + 1] in b'*?\\/':
2008 2008 res += el[i - 1 : i + 1]
2009 2009 i += 1
2010 2010 elif c == b'*':
2011 2011 res += b'.*'
2012 2012 elif c == b'?':
2013 2013 res += b'.'
2014 2014 elif c == b'/' and os.altsep:
2015 2015 res += b'[/\\\\]'
2016 2016 else:
2017 2017 res += re.escape(c)
2018 2018 return TTest.rematch(res, l)
2019 2019
2020 2020 def linematch(self, el, l):
2021 2021 if el == l: # perfect match (fast)
2022 2022 return True, True
2023 2023 retry = False
2024 2024 if isoptional(el):
2025 2025 retry = "retry"
2026 2026 el = el[: -len(MARK_OPTIONAL)] + b"\n"
2027 2027 else:
2028 2028 m = optline.match(el)
2029 2029 if m:
2030 2030 conditions = [c for c in m.group(2).split(b' ')]
2031 2031
2032 2032 el = m.group(1) + b"\n"
2033 2033 if not self._iftest(conditions):
2034 2034 # listed feature missing, should not match
2035 2035 return "retry", False
2036 2036
2037 2037 if el.endswith(b" (esc)\n"):
2038 2038 if PYTHON3:
2039 2039 el = el[:-7].decode('unicode_escape') + '\n'
2040 2040 el = el.encode('utf-8')
2041 2041 else:
2042 2042 el = el[:-7].decode('string-escape') + '\n'
2043 2043 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
2044 2044 return True, True
2045 2045 if el.endswith(b" (re)\n"):
2046 2046 return (TTest.rematch(el[:-6], l) or retry), False
2047 2047 if el.endswith(b" (glob)\n"):
2048 2048 # ignore '(glob)' added to l by 'replacements'
2049 2049 if l.endswith(b" (glob)\n"):
2050 2050 l = l[:-8] + b"\n"
2051 2051 return (TTest.globmatch(el[:-8], l) or retry), False
2052 2052 if os.altsep:
2053 2053 _l = l.replace(b'\\', b'/')
2054 2054 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
2055 2055 return True, True
2056 2056 return retry, True
2057 2057
2058 2058 @staticmethod
2059 2059 def parsehghaveoutput(lines):
2060 2060 '''Parse hghave log lines.
2061 2061
2062 2062 Return tuple of lists (missing, failed):
2063 2063 * the missing/unknown features
2064 2064 * the features for which existence check failed'''
2065 2065 missing = []
2066 2066 failed = []
2067 2067 for line in lines:
2068 2068 if line.startswith(TTest.SKIPPED_PREFIX):
2069 2069 line = line.splitlines()[0]
2070 2070 missing.append(_bytes2sys(line[len(TTest.SKIPPED_PREFIX) :]))
2071 2071 elif line.startswith(TTest.FAILED_PREFIX):
2072 2072 line = line.splitlines()[0]
2073 2073 failed.append(_bytes2sys(line[len(TTest.FAILED_PREFIX) :]))
2074 2074
2075 2075 return missing, failed
2076 2076
2077 2077 @staticmethod
2078 2078 def _escapef(m):
2079 2079 return TTest.ESCAPEMAP[m.group(0)]
2080 2080
2081 2081 @staticmethod
2082 2082 def _stringescape(s):
2083 2083 return TTest.ESCAPESUB(TTest._escapef, s)
2084 2084
2085 2085
2086 2086 iolock = threading.RLock()
2087 2087 firstlock = threading.RLock()
2088 2088 firsterror = False
2089 2089
2090 2090
2091 2091 class TestResult(unittest._TextTestResult):
2092 2092 """Holds results when executing via unittest."""
2093 2093
2094 2094 # Don't worry too much about accessing the non-public _TextTestResult.
2095 2095 # It is relatively common in Python testing tools.
2096 2096 def __init__(self, options, *args, **kwargs):
2097 2097 super(TestResult, self).__init__(*args, **kwargs)
2098 2098
2099 2099 self._options = options
2100 2100
2101 2101 # unittest.TestResult didn't have skipped until 2.7. We need to
2102 2102 # polyfill it.
2103 2103 self.skipped = []
2104 2104
2105 2105 # We have a custom "ignored" result that isn't present in any Python
2106 2106 # unittest implementation. It is very similar to skipped. It may make
2107 2107 # sense to map it into skip some day.
2108 2108 self.ignored = []
2109 2109
2110 2110 self.times = []
2111 2111 self._firststarttime = None
2112 2112 # Data stored for the benefit of generating xunit reports.
2113 2113 self.successes = []
2114 2114 self.faildata = {}
2115 2115
2116 2116 if options.color == 'auto':
2117 2117 self.color = pygmentspresent and self.stream.isatty()
2118 2118 elif options.color == 'never':
2119 2119 self.color = False
2120 2120 else: # 'always', for testing purposes
2121 2121 self.color = pygmentspresent
2122 2122
2123 2123 def onStart(self, test):
2124 2124 """ Can be overriden by custom TestResult
2125 2125 """
2126 2126
2127 2127 def onEnd(self):
2128 2128 """ Can be overriden by custom TestResult
2129 2129 """
2130 2130
2131 2131 def addFailure(self, test, reason):
2132 2132 self.failures.append((test, reason))
2133 2133
2134 2134 if self._options.first:
2135 2135 self.stop()
2136 2136 else:
2137 2137 with iolock:
2138 2138 if reason == "timed out":
2139 2139 self.stream.write('t')
2140 2140 else:
2141 2141 if not self._options.nodiff:
2142 2142 self.stream.write('\n')
2143 2143 # Exclude the '\n' from highlighting to lex correctly
2144 2144 formatted = 'ERROR: %s output changed\n' % test
2145 2145 self.stream.write(highlightmsg(formatted, self.color))
2146 2146 self.stream.write('!')
2147 2147
2148 2148 self.stream.flush()
2149 2149
2150 2150 def addSuccess(self, test):
2151 2151 with iolock:
2152 2152 super(TestResult, self).addSuccess(test)
2153 2153 self.successes.append(test)
2154 2154
2155 2155 def addError(self, test, err):
2156 2156 super(TestResult, self).addError(test, err)
2157 2157 if self._options.first:
2158 2158 self.stop()
2159 2159
2160 2160 # Polyfill.
2161 2161 def addSkip(self, test, reason):
2162 2162 self.skipped.append((test, reason))
2163 2163 with iolock:
2164 2164 if self.showAll:
2165 2165 self.stream.writeln('skipped %s' % reason)
2166 2166 else:
2167 2167 self.stream.write('s')
2168 2168 self.stream.flush()
2169 2169
2170 2170 def addIgnore(self, test, reason):
2171 2171 self.ignored.append((test, reason))
2172 2172 with iolock:
2173 2173 if self.showAll:
2174 2174 self.stream.writeln('ignored %s' % reason)
2175 2175 else:
2176 2176 if reason not in ('not retesting', "doesn't match keyword"):
2177 2177 self.stream.write('i')
2178 2178 else:
2179 2179 self.testsRun += 1
2180 2180 self.stream.flush()
2181 2181
2182 2182 def addOutputMismatch(self, test, ret, got, expected):
2183 2183 """Record a mismatch in test output for a particular test."""
2184 2184 if self.shouldStop or firsterror:
2185 2185 # don't print, some other test case already failed and
2186 2186 # printed, we're just stale and probably failed due to our
2187 2187 # temp dir getting cleaned up.
2188 2188 return
2189 2189
2190 2190 accepted = False
2191 2191 lines = []
2192 2192
2193 2193 with iolock:
2194 2194 if self._options.nodiff:
2195 2195 pass
2196 2196 elif self._options.view:
2197 2197 v = self._options.view
2198 2198 subprocess.call(
2199 2199 r'"%s" "%s" "%s"'
2200 2200 % (v, _bytes2sys(test.refpath), _bytes2sys(test.errpath)),
2201 2201 shell=True,
2202 2202 )
2203 2203 else:
2204 2204 servefail, lines = getdiff(
2205 2205 expected, got, test.refpath, test.errpath
2206 2206 )
2207 2207 self.stream.write('\n')
2208 2208 for line in lines:
2209 2209 line = highlightdiff(line, self.color)
2210 2210 if PYTHON3:
2211 2211 self.stream.flush()
2212 2212 self.stream.buffer.write(line)
2213 2213 self.stream.buffer.flush()
2214 2214 else:
2215 2215 self.stream.write(line)
2216 2216 self.stream.flush()
2217 2217
2218 2218 if servefail:
2219 2219 raise test.failureException(
2220 2220 'server failed to start (HGPORT=%s)' % test._startport
2221 2221 )
2222 2222
2223 2223 # handle interactive prompt without releasing iolock
2224 2224 if self._options.interactive:
2225 2225 if test.readrefout() != expected:
2226 2226 self.stream.write(
2227 2227 'Reference output has changed (run again to prompt '
2228 2228 'changes)'
2229 2229 )
2230 2230 else:
2231 2231 self.stream.write('Accept this change? [n] ')
2232 2232 self.stream.flush()
2233 2233 answer = sys.stdin.readline().strip()
2234 2234 if answer.lower() in ('y', 'yes'):
2235 2235 if test.path.endswith(b'.t'):
2236 2236 rename(test.errpath, test.path)
2237 2237 else:
2238 2238 rename(test.errpath, '%s.out' % test.path)
2239 2239 accepted = True
2240 2240 if not accepted:
2241 2241 self.faildata[test.name] = b''.join(lines)
2242 2242
2243 2243 return accepted
2244 2244
2245 2245 def startTest(self, test):
2246 2246 super(TestResult, self).startTest(test)
2247 2247
2248 2248 # os.times module computes the user time and system time spent by
2249 2249 # child's processes along with real elapsed time taken by a process.
2250 2250 # This module has one limitation. It can only work for Linux user
2251 2251 # and not for Windows. Hence why we fall back to another function
2252 2252 # for wall time calculations.
2253 2253 test.started_times = os.times()
2254 2254 # TODO use a monotonic clock once support for Python 2.7 is dropped.
2255 2255 test.started_time = time.time()
2256 2256 if self._firststarttime is None: # thread racy but irrelevant
2257 2257 self._firststarttime = test.started_time
2258 2258
2259 2259 def stopTest(self, test, interrupted=False):
2260 2260 super(TestResult, self).stopTest(test)
2261 2261
2262 2262 test.stopped_times = os.times()
2263 2263 stopped_time = time.time()
2264 2264
2265 2265 starttime = test.started_times
2266 2266 endtime = test.stopped_times
2267 2267 origin = self._firststarttime
2268 2268 self.times.append(
2269 2269 (
2270 2270 test.name,
2271 2271 endtime[2] - starttime[2], # user space CPU time
2272 2272 endtime[3] - starttime[3], # sys space CPU time
2273 2273 stopped_time - test.started_time, # real time
2274 2274 test.started_time - origin, # start date in run context
2275 2275 stopped_time - origin, # end date in run context
2276 2276 )
2277 2277 )
2278 2278
2279 2279 if interrupted:
2280 2280 with iolock:
2281 2281 self.stream.writeln(
2282 2282 'INTERRUPTED: %s (after %d seconds)'
2283 2283 % (test.name, self.times[-1][3])
2284 2284 )
2285 2285
2286 2286
2287 2287 def getTestResult():
2288 2288 """
2289 2289 Returns the relevant test result
2290 2290 """
2291 2291 if "CUSTOM_TEST_RESULT" in os.environ:
2292 2292 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
2293 2293 return testresultmodule.TestResult
2294 2294 else:
2295 2295 return TestResult
2296 2296
2297 2297
2298 2298 class TestSuite(unittest.TestSuite):
2299 2299 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
2300 2300
2301 2301 def __init__(
2302 2302 self,
2303 2303 testdir,
2304 2304 jobs=1,
2305 2305 whitelist=None,
2306 2306 blacklist=None,
2307 2307 retest=False,
2308 2308 keywords=None,
2309 2309 loop=False,
2310 2310 runs_per_test=1,
2311 2311 loadtest=None,
2312 2312 showchannels=False,
2313 2313 *args,
2314 2314 **kwargs
2315 2315 ):
2316 2316 """Create a new instance that can run tests with a configuration.
2317 2317
2318 2318 testdir specifies the directory where tests are executed from. This
2319 2319 is typically the ``tests`` directory from Mercurial's source
2320 2320 repository.
2321 2321
2322 2322 jobs specifies the number of jobs to run concurrently. Each test
2323 2323 executes on its own thread. Tests actually spawn new processes, so
2324 2324 state mutation should not be an issue.
2325 2325
2326 2326 If there is only one job, it will use the main thread.
2327 2327
2328 2328 whitelist and blacklist denote tests that have been whitelisted and
2329 2329 blacklisted, respectively. These arguments don't belong in TestSuite.
2330 2330 Instead, whitelist and blacklist should be handled by the thing that
2331 2331 populates the TestSuite with tests. They are present to preserve
2332 2332 backwards compatible behavior which reports skipped tests as part
2333 2333 of the results.
2334 2334
2335 2335 retest denotes whether to retest failed tests. This arguably belongs
2336 2336 outside of TestSuite.
2337 2337
2338 2338 keywords denotes key words that will be used to filter which tests
2339 2339 to execute. This arguably belongs outside of TestSuite.
2340 2340
2341 2341 loop denotes whether to loop over tests forever.
2342 2342 """
2343 2343 super(TestSuite, self).__init__(*args, **kwargs)
2344 2344
2345 2345 self._jobs = jobs
2346 2346 self._whitelist = whitelist
2347 2347 self._blacklist = blacklist
2348 2348 self._retest = retest
2349 2349 self._keywords = keywords
2350 2350 self._loop = loop
2351 2351 self._runs_per_test = runs_per_test
2352 2352 self._loadtest = loadtest
2353 2353 self._showchannels = showchannels
2354 2354
2355 2355 def run(self, result):
2356 2356 # We have a number of filters that need to be applied. We do this
2357 2357 # here instead of inside Test because it makes the running logic for
2358 2358 # Test simpler.
2359 2359 tests = []
2360 2360 num_tests = [0]
2361 2361 for test in self._tests:
2362 2362
2363 2363 def get():
2364 2364 num_tests[0] += 1
2365 2365 if getattr(test, 'should_reload', False):
2366 2366 return self._loadtest(test, num_tests[0])
2367 2367 return test
2368 2368
2369 2369 if not os.path.exists(test.path):
2370 2370 result.addSkip(test, "Doesn't exist")
2371 2371 continue
2372 2372
2373 2373 if not (self._whitelist and test.bname in self._whitelist):
2374 2374 if self._blacklist and test.bname in self._blacklist:
2375 2375 result.addSkip(test, 'blacklisted')
2376 2376 continue
2377 2377
2378 2378 if self._retest and not os.path.exists(test.errpath):
2379 2379 result.addIgnore(test, 'not retesting')
2380 2380 continue
2381 2381
2382 2382 if self._keywords:
2383 2383 with open(test.path, 'rb') as f:
2384 2384 t = f.read().lower() + test.bname.lower()
2385 2385 ignored = False
2386 2386 for k in self._keywords.lower().split():
2387 2387 if k not in t:
2388 2388 result.addIgnore(test, "doesn't match keyword")
2389 2389 ignored = True
2390 2390 break
2391 2391
2392 2392 if ignored:
2393 2393 continue
2394 2394 for _ in xrange(self._runs_per_test):
2395 2395 tests.append(get())
2396 2396
2397 2397 runtests = list(tests)
2398 2398 done = queue.Queue()
2399 2399 running = 0
2400 2400
2401 2401 channels = [""] * self._jobs
2402 2402
2403 2403 def job(test, result):
2404 2404 for n, v in enumerate(channels):
2405 2405 if not v:
2406 2406 channel = n
2407 2407 break
2408 2408 else:
2409 2409 raise ValueError('Could not find output channel')
2410 2410 channels[channel] = "=" + test.name[5:].split(".")[0]
2411 2411 try:
2412 2412 test(result)
2413 2413 done.put(None)
2414 2414 except KeyboardInterrupt:
2415 2415 pass
2416 2416 except: # re-raises
2417 2417 done.put(('!', test, 'run-test raised an error, see traceback'))
2418 2418 raise
2419 2419 finally:
2420 2420 try:
2421 2421 channels[channel] = ''
2422 2422 except IndexError:
2423 2423 pass
2424 2424
2425 2425 def stat():
2426 2426 count = 0
2427 2427 while channels:
2428 2428 d = '\n%03s ' % count
2429 2429 for n, v in enumerate(channels):
2430 2430 if v:
2431 2431 d += v[0]
2432 2432 channels[n] = v[1:] or '.'
2433 2433 else:
2434 2434 d += ' '
2435 2435 d += ' '
2436 2436 with iolock:
2437 2437 sys.stdout.write(d + ' ')
2438 2438 sys.stdout.flush()
2439 2439 for x in xrange(10):
2440 2440 if channels:
2441 2441 time.sleep(0.1)
2442 2442 count += 1
2443 2443
2444 2444 stoppedearly = False
2445 2445
2446 2446 if self._showchannels:
2447 2447 statthread = threading.Thread(target=stat, name="stat")
2448 2448 statthread.start()
2449 2449
2450 2450 try:
2451 2451 while tests or running:
2452 2452 if not done.empty() or running == self._jobs or not tests:
2453 2453 try:
2454 2454 done.get(True, 1)
2455 2455 running -= 1
2456 2456 if result and result.shouldStop:
2457 2457 stoppedearly = True
2458 2458 break
2459 2459 except queue.Empty:
2460 2460 continue
2461 2461 if tests and not running == self._jobs:
2462 2462 test = tests.pop(0)
2463 2463 if self._loop:
2464 2464 if getattr(test, 'should_reload', False):
2465 2465 num_tests[0] += 1
2466 2466 tests.append(self._loadtest(test, num_tests[0]))
2467 2467 else:
2468 2468 tests.append(test)
2469 2469 if self._jobs == 1:
2470 2470 job(test, result)
2471 2471 else:
2472 2472 t = threading.Thread(
2473 2473 target=job, name=test.name, args=(test, result)
2474 2474 )
2475 2475 t.start()
2476 2476 running += 1
2477 2477
2478 2478 # If we stop early we still need to wait on started tests to
2479 2479 # finish. Otherwise, there is a race between the test completing
2480 2480 # and the test's cleanup code running. This could result in the
2481 2481 # test reporting incorrect.
2482 2482 if stoppedearly:
2483 2483 while running:
2484 2484 try:
2485 2485 done.get(True, 1)
2486 2486 running -= 1
2487 2487 except queue.Empty:
2488 2488 continue
2489 2489 except KeyboardInterrupt:
2490 2490 for test in runtests:
2491 2491 test.abort()
2492 2492
2493 2493 channels = []
2494 2494
2495 2495 return result
2496 2496
2497 2497
2498 2498 # Save the most recent 5 wall-clock runtimes of each test to a
2499 2499 # human-readable text file named .testtimes. Tests are sorted
2500 2500 # alphabetically, while times for each test are listed from oldest to
2501 2501 # newest.
2502 2502
2503 2503
2504 2504 def loadtimes(outputdir):
2505 2505 times = []
2506 2506 try:
2507 2507 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2508 2508 for line in fp:
2509 2509 m = re.match('(.*?) ([0-9. ]+)', line)
2510 2510 times.append(
2511 2511 (m.group(1), [float(t) for t in m.group(2).split()])
2512 2512 )
2513 2513 except IOError as err:
2514 2514 if err.errno != errno.ENOENT:
2515 2515 raise
2516 2516 return times
2517 2517
2518 2518
2519 2519 def savetimes(outputdir, result):
2520 2520 saved = dict(loadtimes(outputdir))
2521 2521 maxruns = 5
2522 2522 skipped = {str(t[0]) for t in result.skipped}
2523 2523 for tdata in result.times:
2524 2524 test, real = tdata[0], tdata[3]
2525 2525 if test not in skipped:
2526 2526 ts = saved.setdefault(test, [])
2527 2527 ts.append(real)
2528 2528 ts[:] = ts[-maxruns:]
2529 2529
2530 2530 fd, tmpname = tempfile.mkstemp(
2531 2531 prefix=b'.testtimes', dir=outputdir, text=True
2532 2532 )
2533 2533 with os.fdopen(fd, 'w') as fp:
2534 2534 for name, ts in sorted(saved.items()):
2535 2535 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2536 2536 timepath = os.path.join(outputdir, b'.testtimes')
2537 2537 try:
2538 2538 os.unlink(timepath)
2539 2539 except OSError:
2540 2540 pass
2541 2541 try:
2542 2542 os.rename(tmpname, timepath)
2543 2543 except OSError:
2544 2544 pass
2545 2545
2546 2546
2547 2547 class TextTestRunner(unittest.TextTestRunner):
2548 2548 """Custom unittest test runner that uses appropriate settings."""
2549 2549
2550 2550 def __init__(self, runner, *args, **kwargs):
2551 2551 super(TextTestRunner, self).__init__(*args, **kwargs)
2552 2552
2553 2553 self._runner = runner
2554 2554
2555 2555 self._result = getTestResult()(
2556 2556 self._runner.options, self.stream, self.descriptions, self.verbosity
2557 2557 )
2558 2558
2559 2559 def listtests(self, test):
2560 2560 test = sorted(test, key=lambda t: t.name)
2561 2561
2562 2562 self._result.onStart(test)
2563 2563
2564 2564 for t in test:
2565 2565 print(t.name)
2566 2566 self._result.addSuccess(t)
2567 2567
2568 2568 if self._runner.options.xunit:
2569 2569 with open(self._runner.options.xunit, "wb") as xuf:
2570 2570 self._writexunit(self._result, xuf)
2571 2571
2572 2572 if self._runner.options.json:
2573 2573 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2574 2574 with open(jsonpath, 'w') as fp:
2575 2575 self._writejson(self._result, fp)
2576 2576
2577 2577 return self._result
2578 2578
2579 2579 def run(self, test):
2580 2580 self._result.onStart(test)
2581 2581 test(self._result)
2582 2582
2583 2583 failed = len(self._result.failures)
2584 2584 skipped = len(self._result.skipped)
2585 2585 ignored = len(self._result.ignored)
2586 2586
2587 2587 with iolock:
2588 2588 self.stream.writeln('')
2589 2589
2590 2590 if not self._runner.options.noskips:
2591 2591 for test, msg in sorted(
2592 2592 self._result.skipped, key=lambda s: s[0].name
2593 2593 ):
2594 2594 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2595 2595 msg = highlightmsg(formatted, self._result.color)
2596 2596 self.stream.write(msg)
2597 2597 for test, msg in sorted(
2598 2598 self._result.failures, key=lambda f: f[0].name
2599 2599 ):
2600 2600 formatted = 'Failed %s: %s\n' % (test.name, msg)
2601 2601 self.stream.write(highlightmsg(formatted, self._result.color))
2602 2602 for test, msg in sorted(
2603 2603 self._result.errors, key=lambda e: e[0].name
2604 2604 ):
2605 2605 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2606 2606
2607 2607 if self._runner.options.xunit:
2608 2608 with open(self._runner.options.xunit, "wb") as xuf:
2609 2609 self._writexunit(self._result, xuf)
2610 2610
2611 2611 if self._runner.options.json:
2612 2612 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2613 2613 with open(jsonpath, 'w') as fp:
2614 2614 self._writejson(self._result, fp)
2615 2615
2616 2616 self._runner._checkhglib('Tested')
2617 2617
2618 2618 savetimes(self._runner._outputdir, self._result)
2619 2619
2620 2620 if failed and self._runner.options.known_good_rev:
2621 2621 self._bisecttests(t for t, m in self._result.failures)
2622 2622 self.stream.writeln(
2623 2623 '# Ran %d tests, %d skipped, %d failed.'
2624 2624 % (self._result.testsRun, skipped + ignored, failed)
2625 2625 )
2626 2626 if failed:
2627 2627 self.stream.writeln(
2628 2628 'python hash seed: %s' % os.environ['PYTHONHASHSEED']
2629 2629 )
2630 2630 if self._runner.options.time:
2631 2631 self.printtimes(self._result.times)
2632 2632
2633 2633 if self._runner.options.exceptions:
2634 2634 exceptions = aggregateexceptions(
2635 2635 os.path.join(self._runner._outputdir, b'exceptions')
2636 2636 )
2637 2637
2638 2638 self.stream.writeln('Exceptions Report:')
2639 2639 self.stream.writeln(
2640 2640 '%d total from %d frames'
2641 2641 % (exceptions['total'], len(exceptions['exceptioncounts']))
2642 2642 )
2643 2643 combined = exceptions['combined']
2644 2644 for key in sorted(combined, key=combined.get, reverse=True):
2645 2645 frame, line, exc = key
2646 2646 totalcount, testcount, leastcount, leasttest = combined[key]
2647 2647
2648 2648 self.stream.writeln(
2649 2649 '%d (%d tests)\t%s: %s (%s - %d total)'
2650 2650 % (
2651 2651 totalcount,
2652 2652 testcount,
2653 2653 frame,
2654 2654 exc,
2655 2655 leasttest,
2656 2656 leastcount,
2657 2657 )
2658 2658 )
2659 2659
2660 2660 self.stream.flush()
2661 2661
2662 2662 return self._result
2663 2663
2664 2664 def _bisecttests(self, tests):
2665 2665 bisectcmd = ['hg', 'bisect']
2666 2666 bisectrepo = self._runner.options.bisect_repo
2667 2667 if bisectrepo:
2668 2668 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2669 2669
2670 2670 def pread(args):
2671 2671 env = os.environ.copy()
2672 2672 env['HGPLAIN'] = '1'
2673 2673 p = subprocess.Popen(
2674 2674 args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env
2675 2675 )
2676 2676 data = p.stdout.read()
2677 2677 p.wait()
2678 2678 return data
2679 2679
2680 2680 for test in tests:
2681 2681 pread(bisectcmd + ['--reset']),
2682 2682 pread(bisectcmd + ['--bad', '.'])
2683 2683 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2684 2684 # TODO: we probably need to forward more options
2685 2685 # that alter hg's behavior inside the tests.
2686 2686 opts = ''
2687 2687 withhg = self._runner.options.with_hg
2688 2688 if withhg:
2689 2689 opts += ' --with-hg=%s ' % shellquote(_bytes2sys(withhg))
2690 2690 rtc = '%s %s %s %s' % (sysexecutable, sys.argv[0], opts, test)
2691 2691 data = pread(bisectcmd + ['--command', rtc])
2692 2692 m = re.search(
2693 2693 (
2694 2694 br'\nThe first (?P<goodbad>bad|good) revision '
2695 2695 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2696 2696 br'summary: +(?P<summary>[^\n]+)\n'
2697 2697 ),
2698 2698 data,
2699 2699 (re.MULTILINE | re.DOTALL),
2700 2700 )
2701 2701 if m is None:
2702 2702 self.stream.writeln(
2703 2703 'Failed to identify failure point for %s' % test
2704 2704 )
2705 2705 continue
2706 2706 dat = m.groupdict()
2707 2707 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2708 2708 self.stream.writeln(
2709 2709 '%s %s by %s (%s)'
2710 2710 % (
2711 2711 test,
2712 2712 verb,
2713 2713 dat['node'].decode('ascii'),
2714 2714 dat['summary'].decode('utf8', 'ignore'),
2715 2715 )
2716 2716 )
2717 2717
2718 2718 def printtimes(self, times):
2719 2719 # iolock held by run
2720 2720 self.stream.writeln('# Producing time report')
2721 2721 times.sort(key=lambda t: (t[3]))
2722 2722 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2723 2723 self.stream.writeln(
2724 2724 '%-7s %-7s %-7s %-7s %-7s %s'
2725 2725 % ('start', 'end', 'cuser', 'csys', 'real', 'Test')
2726 2726 )
2727 2727 for tdata in times:
2728 2728 test = tdata[0]
2729 2729 cuser, csys, real, start, end = tdata[1:6]
2730 2730 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2731 2731
2732 2732 @staticmethod
2733 2733 def _writexunit(result, outf):
2734 2734 # See http://llg.cubic.org/docs/junit/ for a reference.
2735 2735 timesd = {t[0]: t[3] for t in result.times}
2736 2736 doc = minidom.Document()
2737 2737 s = doc.createElement('testsuite')
2738 2738 s.setAttribute('errors', "0") # TODO
2739 2739 s.setAttribute('failures', str(len(result.failures)))
2740 2740 s.setAttribute('name', 'run-tests')
2741 2741 s.setAttribute(
2742 2742 'skipped', str(len(result.skipped) + len(result.ignored))
2743 2743 )
2744 2744 s.setAttribute('tests', str(result.testsRun))
2745 2745 doc.appendChild(s)
2746 2746 for tc in result.successes:
2747 2747 t = doc.createElement('testcase')
2748 2748 t.setAttribute('name', tc.name)
2749 2749 tctime = timesd.get(tc.name)
2750 2750 if tctime is not None:
2751 2751 t.setAttribute('time', '%.3f' % tctime)
2752 2752 s.appendChild(t)
2753 2753 for tc, err in sorted(result.faildata.items()):
2754 2754 t = doc.createElement('testcase')
2755 2755 t.setAttribute('name', tc)
2756 2756 tctime = timesd.get(tc)
2757 2757 if tctime is not None:
2758 2758 t.setAttribute('time', '%.3f' % tctime)
2759 2759 # createCDATASection expects a unicode or it will
2760 2760 # convert using default conversion rules, which will
2761 2761 # fail if string isn't ASCII.
2762 2762 err = cdatasafe(err).decode('utf-8', 'replace')
2763 2763 cd = doc.createCDATASection(err)
2764 2764 # Use 'failure' here instead of 'error' to match errors = 0,
2765 2765 # failures = len(result.failures) in the testsuite element.
2766 2766 failelem = doc.createElement('failure')
2767 2767 failelem.setAttribute('message', 'output changed')
2768 2768 failelem.setAttribute('type', 'output-mismatch')
2769 2769 failelem.appendChild(cd)
2770 2770 t.appendChild(failelem)
2771 2771 s.appendChild(t)
2772 2772 for tc, message in result.skipped:
2773 2773 # According to the schema, 'skipped' has no attributes. So store
2774 2774 # the skip message as a text node instead.
2775 2775 t = doc.createElement('testcase')
2776 2776 t.setAttribute('name', tc.name)
2777 2777 binmessage = message.encode('utf-8')
2778 2778 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2779 2779 cd = doc.createCDATASection(message)
2780 2780 skipelem = doc.createElement('skipped')
2781 2781 skipelem.appendChild(cd)
2782 2782 t.appendChild(skipelem)
2783 2783 s.appendChild(t)
2784 2784 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2785 2785
2786 2786 @staticmethod
2787 2787 def _writejson(result, outf):
2788 2788 timesd = {}
2789 2789 for tdata in result.times:
2790 2790 test = tdata[0]
2791 2791 timesd[test] = tdata[1:]
2792 2792
2793 2793 outcome = {}
2794 2794 groups = [
2795 2795 ('success', ((tc, None) for tc in result.successes)),
2796 2796 ('failure', result.failures),
2797 2797 ('skip', result.skipped),
2798 2798 ]
2799 2799 for res, testcases in groups:
2800 2800 for tc, __ in testcases:
2801 2801 if tc.name in timesd:
2802 2802 diff = result.faildata.get(tc.name, b'')
2803 2803 try:
2804 2804 diff = diff.decode('unicode_escape')
2805 2805 except UnicodeDecodeError as e:
2806 2806 diff = '%r decoding diff, sorry' % e
2807 2807 tres = {
2808 2808 'result': res,
2809 2809 'time': ('%0.3f' % timesd[tc.name][2]),
2810 2810 'cuser': ('%0.3f' % timesd[tc.name][0]),
2811 2811 'csys': ('%0.3f' % timesd[tc.name][1]),
2812 2812 'start': ('%0.3f' % timesd[tc.name][3]),
2813 2813 'end': ('%0.3f' % timesd[tc.name][4]),
2814 2814 'diff': diff,
2815 2815 }
2816 2816 else:
2817 2817 # blacklisted test
2818 2818 tres = {'result': res}
2819 2819
2820 2820 outcome[tc.name] = tres
2821 2821 jsonout = json.dumps(
2822 2822 outcome, sort_keys=True, indent=4, separators=(',', ': ')
2823 2823 )
2824 2824 outf.writelines(("testreport =", jsonout))
2825 2825
2826 2826
2827 2827 def sorttests(testdescs, previoustimes, shuffle=False):
2828 2828 """Do an in-place sort of tests."""
2829 2829 if shuffle:
2830 2830 random.shuffle(testdescs)
2831 2831 return
2832 2832
2833 2833 if previoustimes:
2834 2834
2835 2835 def sortkey(f):
2836 2836 f = f['path']
2837 2837 if f in previoustimes:
2838 2838 # Use most recent time as estimate
2839 2839 return -(previoustimes[f][-1])
2840 2840 else:
2841 2841 # Default to a rather arbitrary value of 1 second for new tests
2842 2842 return -1.0
2843 2843
2844 2844 else:
2845 2845 # keywords for slow tests
2846 2846 slow = {
2847 2847 b'svn': 10,
2848 2848 b'cvs': 10,
2849 2849 b'hghave': 10,
2850 2850 b'largefiles-update': 10,
2851 2851 b'run-tests': 10,
2852 2852 b'corruption': 10,
2853 2853 b'race': 10,
2854 2854 b'i18n': 10,
2855 2855 b'check': 100,
2856 2856 b'gendoc': 100,
2857 2857 b'contrib-perf': 200,
2858 2858 b'merge-combination': 100,
2859 2859 }
2860 2860 perf = {}
2861 2861
2862 2862 def sortkey(f):
2863 2863 # run largest tests first, as they tend to take the longest
2864 2864 f = f['path']
2865 2865 try:
2866 2866 return perf[f]
2867 2867 except KeyError:
2868 2868 try:
2869 2869 val = -os.stat(f).st_size
2870 2870 except OSError as e:
2871 2871 if e.errno != errno.ENOENT:
2872 2872 raise
2873 2873 perf[f] = -1e9 # file does not exist, tell early
2874 2874 return -1e9
2875 2875 for kw, mul in slow.items():
2876 2876 if kw in f:
2877 2877 val *= mul
2878 2878 if f.endswith(b'.py'):
2879 2879 val /= 10.0
2880 2880 perf[f] = val / 1000.0
2881 2881 return perf[f]
2882 2882
2883 2883 testdescs.sort(key=sortkey)
2884 2884
2885 2885
2886 2886 class TestRunner(object):
2887 2887 """Holds context for executing tests.
2888 2888
2889 2889 Tests rely on a lot of state. This object holds it for them.
2890 2890 """
2891 2891
2892 2892 # Programs required to run tests.
2893 2893 REQUIREDTOOLS = [
2894 2894 b'diff',
2895 2895 b'grep',
2896 2896 b'unzip',
2897 2897 b'gunzip',
2898 2898 b'bunzip2',
2899 2899 b'sed',
2900 2900 ]
2901 2901
2902 2902 # Maps file extensions to test class.
2903 2903 TESTTYPES = [
2904 2904 (b'.py', PythonTest),
2905 2905 (b'.t', TTest),
2906 2906 ]
2907 2907
2908 2908 def __init__(self):
2909 2909 self.options = None
2910 2910 self._hgroot = None
2911 2911 self._testdir = None
2912 2912 self._outputdir = None
2913 2913 self._hgtmp = None
2914 2914 self._installdir = None
2915 2915 self._bindir = None
2916 2916 self._tmpbinddir = None
2917 2917 self._pythondir = None
2918 2918 self._coveragefile = None
2919 2919 self._createdfiles = []
2920 2920 self._hgcommand = None
2921 2921 self._hgpath = None
2922 2922 self._portoffset = 0
2923 2923 self._ports = {}
2924 2924
2925 2925 def run(self, args, parser=None):
2926 2926 """Run the test suite."""
2927 2927 oldmask = os.umask(0o22)
2928 2928 try:
2929 2929 parser = parser or getparser()
2930 2930 options = parseargs(args, parser)
2931 2931 tests = [_sys2bytes(a) for a in options.tests]
2932 2932 if options.test_list is not None:
2933 2933 for listfile in options.test_list:
2934 2934 with open(listfile, 'rb') as f:
2935 2935 tests.extend(t for t in f.read().splitlines() if t)
2936 2936 self.options = options
2937 2937
2938 2938 self._checktools()
2939 2939 testdescs = self.findtests(tests)
2940 2940 if options.profile_runner:
2941 2941 import statprof
2942 2942
2943 2943 statprof.start()
2944 2944 result = self._run(testdescs)
2945 2945 if options.profile_runner:
2946 2946 statprof.stop()
2947 2947 statprof.display()
2948 2948 return result
2949 2949
2950 2950 finally:
2951 2951 os.umask(oldmask)
2952 2952
2953 2953 def _run(self, testdescs):
2954 2954 testdir = getcwdb()
2955 2955 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
2956 2956 # assume all tests in same folder for now
2957 2957 if testdescs:
2958 2958 pathname = os.path.dirname(testdescs[0]['path'])
2959 2959 if pathname:
2960 2960 testdir = os.path.join(testdir, pathname)
2961 2961 self._testdir = osenvironb[b'TESTDIR'] = testdir
2962 2962 if self.options.outputdir:
2963 2963 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
2964 2964 else:
2965 2965 self._outputdir = getcwdb()
2966 2966 if testdescs and pathname:
2967 2967 self._outputdir = os.path.join(self._outputdir, pathname)
2968 2968 previoustimes = {}
2969 2969 if self.options.order_by_runtime:
2970 2970 previoustimes = dict(loadtimes(self._outputdir))
2971 2971 sorttests(testdescs, previoustimes, shuffle=self.options.random)
2972 2972
2973 2973 if 'PYTHONHASHSEED' not in os.environ:
2974 2974 # use a random python hash seed all the time
2975 2975 # we do the randomness ourself to know what seed is used
2976 2976 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
2977 2977
2978 2978 if self.options.tmpdir:
2979 2979 self.options.keep_tmpdir = True
2980 2980 tmpdir = _sys2bytes(self.options.tmpdir)
2981 2981 if os.path.exists(tmpdir):
2982 2982 # Meaning of tmpdir has changed since 1.3: we used to create
2983 2983 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
2984 2984 # tmpdir already exists.
2985 2985 print("error: temp dir %r already exists" % tmpdir)
2986 2986 return 1
2987 2987
2988 2988 os.makedirs(tmpdir)
2989 2989 else:
2990 2990 d = None
2991 2991 if os.name == 'nt':
2992 2992 # without this, we get the default temp dir location, but
2993 2993 # in all lowercase, which causes troubles with paths (issue3490)
2994 2994 d = osenvironb.get(b'TMP', None)
2995 2995 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
2996 2996
2997 2997 self._hgtmp = osenvironb[b'HGTMP'] = os.path.realpath(tmpdir)
2998 2998
2999 2999 if self.options.with_hg:
3000 3000 self._installdir = None
3001 3001 whg = self.options.with_hg
3002 3002 self._bindir = os.path.dirname(os.path.realpath(whg))
3003 3003 assert isinstance(self._bindir, bytes)
3004 3004 self._hgcommand = os.path.basename(whg)
3005 3005 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
3006 3006 os.makedirs(self._tmpbindir)
3007 3007
3008 3008 normbin = os.path.normpath(os.path.abspath(whg))
3009 3009 normbin = normbin.replace(_sys2bytes(os.sep), b'/')
3010 3010
3011 3011 # Other Python scripts in the test harness need to
3012 3012 # `import mercurial`. If `hg` is a Python script, we assume
3013 3013 # the Mercurial modules are relative to its path and tell the tests
3014 3014 # to load Python modules from its directory.
3015 3015 with open(whg, 'rb') as fh:
3016 3016 initial = fh.read(1024)
3017 3017
3018 3018 if re.match(b'#!.*python', initial):
3019 3019 self._pythondir = self._bindir
3020 3020 # If it looks like our in-repo Rust binary, use the source root.
3021 3021 # This is a bit hacky. But rhg is still not supported outside the
3022 3022 # source directory. So until it is, do the simple thing.
3023 3023 elif re.search(b'/rust/target/[^/]+/hg', normbin):
3024 3024 self._pythondir = os.path.dirname(self._testdir)
3025 3025 # Fall back to the legacy behavior.
3026 3026 else:
3027 3027 self._pythondir = self._bindir
3028 3028
3029 3029 else:
3030 3030 self._installdir = os.path.join(self._hgtmp, b"install")
3031 3031 self._bindir = os.path.join(self._installdir, b"bin")
3032 3032 self._hgcommand = b'hg'
3033 3033 self._tmpbindir = self._bindir
3034 3034 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
3035 3035
3036 3036 # Force the use of hg.exe instead of relying on MSYS to recognize hg is
3037 3037 # a python script and feed it to python.exe. Legacy stdio is force
3038 3038 # enabled by hg.exe, and this is a more realistic way to launch hg
3039 3039 # anyway.
3040 3040 if os.name == 'nt' and not self._hgcommand.endswith(b'.exe'):
3041 3041 self._hgcommand += b'.exe'
3042 3042
3043 3043 # set CHGHG, then replace "hg" command by "chg"
3044 3044 chgbindir = self._bindir
3045 3045 if self.options.chg or self.options.with_chg:
3046 3046 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
3047 3047 else:
3048 3048 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
3049 3049 if self.options.chg:
3050 3050 self._hgcommand = b'chg'
3051 3051 elif self.options.with_chg:
3052 3052 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
3053 3053 self._hgcommand = os.path.basename(self.options.with_chg)
3054 3054
3055 3055 osenvironb[b"BINDIR"] = self._bindir
3056 3056 osenvironb[b"PYTHON"] = PYTHON
3057 3057
3058 3058 fileb = _sys2bytes(__file__)
3059 3059 runtestdir = os.path.abspath(os.path.dirname(fileb))
3060 3060 osenvironb[b'RUNTESTDIR'] = runtestdir
3061 3061 if PYTHON3:
3062 3062 sepb = _sys2bytes(os.pathsep)
3063 3063 else:
3064 3064 sepb = os.pathsep
3065 3065 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
3066 3066 if os.path.islink(__file__):
3067 3067 # test helper will likely be at the end of the symlink
3068 3068 realfile = os.path.realpath(fileb)
3069 3069 realdir = os.path.abspath(os.path.dirname(realfile))
3070 3070 path.insert(2, realdir)
3071 3071 if chgbindir != self._bindir:
3072 3072 path.insert(1, chgbindir)
3073 3073 if self._testdir != runtestdir:
3074 3074 path = [self._testdir] + path
3075 3075 if self._tmpbindir != self._bindir:
3076 3076 path = [self._tmpbindir] + path
3077 3077 osenvironb[b"PATH"] = sepb.join(path)
3078 3078
3079 3079 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
3080 3080 # can run .../tests/run-tests.py test-foo where test-foo
3081 3081 # adds an extension to HGRC. Also include run-test.py directory to
3082 3082 # import modules like heredoctest.
3083 3083 pypath = [self._pythondir, self._testdir, runtestdir]
3084 3084 # We have to augment PYTHONPATH, rather than simply replacing
3085 3085 # it, in case external libraries are only available via current
3086 3086 # PYTHONPATH. (In particular, the Subversion bindings on OS X
3087 3087 # are in /opt/subversion.)
3088 3088 oldpypath = osenvironb.get(IMPL_PATH)
3089 3089 if oldpypath:
3090 3090 pypath.append(oldpypath)
3091 3091 osenvironb[IMPL_PATH] = sepb.join(pypath)
3092 3092
3093 3093 if self.options.pure:
3094 3094 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
3095 3095 os.environ["HGMODULEPOLICY"] = "py"
3096 3096
3097 3097 if self.options.allow_slow_tests:
3098 3098 os.environ["HGTEST_SLOW"] = "slow"
3099 3099 elif 'HGTEST_SLOW' in os.environ:
3100 3100 del os.environ['HGTEST_SLOW']
3101 3101
3102 3102 self._coveragefile = os.path.join(self._testdir, b'.coverage')
3103 3103
3104 3104 if self.options.exceptions:
3105 3105 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
3106 3106 try:
3107 3107 os.makedirs(exceptionsdir)
3108 3108 except OSError as e:
3109 3109 if e.errno != errno.EEXIST:
3110 3110 raise
3111 3111
3112 3112 # Remove all existing exception reports.
3113 3113 for f in os.listdir(exceptionsdir):
3114 3114 os.unlink(os.path.join(exceptionsdir, f))
3115 3115
3116 3116 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
3117 3117 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
3118 3118 self.options.extra_config_opt.append(
3119 3119 'extensions.logexceptions=%s' % logexceptions.decode('utf-8')
3120 3120 )
3121 3121
3122 3122 vlog("# Using TESTDIR", _bytes2sys(self._testdir))
3123 3123 vlog("# Using RUNTESTDIR", _bytes2sys(osenvironb[b'RUNTESTDIR']))
3124 3124 vlog("# Using HGTMP", _bytes2sys(self._hgtmp))
3125 3125 vlog("# Using PATH", os.environ["PATH"])
3126 3126 vlog(
3127 3127 "# Using", _bytes2sys(IMPL_PATH), _bytes2sys(osenvironb[IMPL_PATH]),
3128 3128 )
3129 3129 vlog("# Writing to directory", _bytes2sys(self._outputdir))
3130 3130
3131 3131 try:
3132 3132 return self._runtests(testdescs) or 0
3133 3133 finally:
3134 3134 time.sleep(0.1)
3135 3135 self._cleanup()
3136 3136
3137 3137 def findtests(self, args):
3138 3138 """Finds possible test files from arguments.
3139 3139
3140 3140 If you wish to inject custom tests into the test harness, this would
3141 3141 be a good function to monkeypatch or override in a derived class.
3142 3142 """
3143 3143 if not args:
3144 3144 if self.options.changed:
3145 3145 proc = Popen4(
3146 3146 b'hg st --rev "%s" -man0 .'
3147 3147 % _sys2bytes(self.options.changed),
3148 3148 None,
3149 3149 0,
3150 3150 )
3151 3151 stdout, stderr = proc.communicate()
3152 3152 args = stdout.strip(b'\0').split(b'\0')
3153 3153 else:
3154 3154 args = os.listdir(b'.')
3155 3155
3156 3156 expanded_args = []
3157 3157 for arg in args:
3158 3158 if os.path.isdir(arg):
3159 3159 if not arg.endswith(b'/'):
3160 3160 arg += b'/'
3161 3161 expanded_args.extend([arg + a for a in os.listdir(arg)])
3162 3162 else:
3163 3163 expanded_args.append(arg)
3164 3164 args = expanded_args
3165 3165
3166 3166 testcasepattern = re.compile(br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-.#]+))')
3167 3167 tests = []
3168 3168 for t in args:
3169 3169 case = []
3170 3170
3171 3171 if not (
3172 3172 os.path.basename(t).startswith(b'test-')
3173 3173 and (t.endswith(b'.py') or t.endswith(b'.t'))
3174 3174 ):
3175 3175
3176 3176 m = testcasepattern.match(os.path.basename(t))
3177 3177 if m is not None:
3178 3178 t_basename, casestr = m.groups()
3179 3179 t = os.path.join(os.path.dirname(t), t_basename)
3180 3180 if casestr:
3181 3181 case = casestr.split(b'#')
3182 3182 else:
3183 3183 continue
3184 3184
3185 3185 if t.endswith(b'.t'):
3186 3186 # .t file may contain multiple test cases
3187 3187 casedimensions = parsettestcases(t)
3188 3188 if casedimensions:
3189 3189 cases = []
3190 3190
3191 3191 def addcases(case, casedimensions):
3192 3192 if not casedimensions:
3193 3193 cases.append(case)
3194 3194 else:
3195 3195 for c in casedimensions[0]:
3196 3196 addcases(case + [c], casedimensions[1:])
3197 3197
3198 3198 addcases([], casedimensions)
3199 3199 if case and case in cases:
3200 3200 cases = [case]
3201 3201 elif case:
3202 3202 # Ignore invalid cases
3203 3203 cases = []
3204 3204 else:
3205 3205 pass
3206 3206 tests += [{'path': t, 'case': c} for c in sorted(cases)]
3207 3207 else:
3208 3208 tests.append({'path': t})
3209 3209 else:
3210 3210 tests.append({'path': t})
3211 3211 return tests
3212 3212
3213 3213 def _runtests(self, testdescs):
3214 3214 def _reloadtest(test, i):
3215 3215 # convert a test back to its description dict
3216 3216 desc = {'path': test.path}
3217 3217 case = getattr(test, '_case', [])
3218 3218 if case:
3219 3219 desc['case'] = case
3220 3220 return self._gettest(desc, i)
3221 3221
3222 3222 try:
3223 3223 if self.options.restart:
3224 3224 orig = list(testdescs)
3225 3225 while testdescs:
3226 3226 desc = testdescs[0]
3227 3227 # desc['path'] is a relative path
3228 3228 if 'case' in desc:
3229 3229 casestr = b'#'.join(desc['case'])
3230 3230 errpath = b'%s#%s.err' % (desc['path'], casestr)
3231 3231 else:
3232 3232 errpath = b'%s.err' % desc['path']
3233 3233 errpath = os.path.join(self._outputdir, errpath)
3234 3234 if os.path.exists(errpath):
3235 3235 break
3236 3236 testdescs.pop(0)
3237 3237 if not testdescs:
3238 3238 print("running all tests")
3239 3239 testdescs = orig
3240 3240
3241 3241 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
3242 3242 num_tests = len(tests) * self.options.runs_per_test
3243 3243
3244 3244 jobs = min(num_tests, self.options.jobs)
3245 3245
3246 3246 failed = False
3247 3247 kws = self.options.keywords
3248 3248 if kws is not None and PYTHON3:
3249 3249 kws = kws.encode('utf-8')
3250 3250
3251 3251 suite = TestSuite(
3252 3252 self._testdir,
3253 3253 jobs=jobs,
3254 3254 whitelist=self.options.whitelisted,
3255 3255 blacklist=self.options.blacklist,
3256 3256 retest=self.options.retest,
3257 3257 keywords=kws,
3258 3258 loop=self.options.loop,
3259 3259 runs_per_test=self.options.runs_per_test,
3260 3260 showchannels=self.options.showchannels,
3261 3261 tests=tests,
3262 3262 loadtest=_reloadtest,
3263 3263 )
3264 3264 verbosity = 1
3265 3265 if self.options.list_tests:
3266 3266 verbosity = 0
3267 3267 elif self.options.verbose:
3268 3268 verbosity = 2
3269 3269 runner = TextTestRunner(self, verbosity=verbosity)
3270 3270
3271 3271 if self.options.list_tests:
3272 3272 result = runner.listtests(suite)
3273 3273 else:
3274 3274 if self._installdir:
3275 3275 self._installhg()
3276 3276 self._checkhglib("Testing")
3277 3277 else:
3278 3278 self._usecorrectpython()
3279 3279 if self.options.chg:
3280 3280 assert self._installdir
3281 3281 self._installchg()
3282 3282
3283 3283 log(
3284 3284 'running %d tests using %d parallel processes'
3285 3285 % (num_tests, jobs)
3286 3286 )
3287 3287
3288 3288 result = runner.run(suite)
3289 3289
3290 3290 if result.failures or result.errors:
3291 3291 failed = True
3292 3292
3293 3293 result.onEnd()
3294 3294
3295 3295 if self.options.anycoverage:
3296 3296 self._outputcoverage()
3297 3297 except KeyboardInterrupt:
3298 3298 failed = True
3299 3299 print("\ninterrupted!")
3300 3300
3301 3301 if failed:
3302 3302 return 1
3303 3303
3304 3304 def _getport(self, count):
3305 3305 port = self._ports.get(count) # do we have a cached entry?
3306 3306 if port is None:
3307 3307 portneeded = 3
3308 3308 # above 100 tries we just give up and let test reports failure
3309 3309 for tries in xrange(100):
3310 3310 allfree = True
3311 3311 port = self.options.port + self._portoffset
3312 3312 for idx in xrange(portneeded):
3313 3313 if not checkportisavailable(port + idx):
3314 3314 allfree = False
3315 3315 break
3316 3316 self._portoffset += portneeded
3317 3317 if allfree:
3318 3318 break
3319 3319 self._ports[count] = port
3320 3320 return port
3321 3321
3322 3322 def _gettest(self, testdesc, count):
3323 3323 """Obtain a Test by looking at its filename.
3324 3324
3325 3325 Returns a Test instance. The Test may not be runnable if it doesn't
3326 3326 map to a known type.
3327 3327 """
3328 3328 path = testdesc['path']
3329 3329 lctest = path.lower()
3330 3330 testcls = Test
3331 3331
3332 3332 for ext, cls in self.TESTTYPES:
3333 3333 if lctest.endswith(ext):
3334 3334 testcls = cls
3335 3335 break
3336 3336
3337 3337 refpath = os.path.join(getcwdb(), path)
3338 3338 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
3339 3339
3340 3340 # extra keyword parameters. 'case' is used by .t tests
3341 3341 kwds = {k: testdesc[k] for k in ['case'] if k in testdesc}
3342 3342
3343 3343 t = testcls(
3344 3344 refpath,
3345 3345 self._outputdir,
3346 3346 tmpdir,
3347 3347 keeptmpdir=self.options.keep_tmpdir,
3348 3348 debug=self.options.debug,
3349 3349 first=self.options.first,
3350 3350 timeout=self.options.timeout,
3351 3351 startport=self._getport(count),
3352 3352 extraconfigopts=self.options.extra_config_opt,
3353 3353 shell=self.options.shell,
3354 3354 hgcommand=self._hgcommand,
3355 3355 usechg=bool(self.options.with_chg or self.options.chg),
3356 3356 useipv6=useipv6,
3357 3357 **kwds
3358 3358 )
3359 3359 t.should_reload = True
3360 3360 return t
3361 3361
3362 3362 def _cleanup(self):
3363 3363 """Clean up state from this test invocation."""
3364 3364 if self.options.keep_tmpdir:
3365 3365 return
3366 3366
3367 3367 vlog("# Cleaning up HGTMP", _bytes2sys(self._hgtmp))
3368 3368 shutil.rmtree(self._hgtmp, True)
3369 3369 for f in self._createdfiles:
3370 3370 try:
3371 3371 os.remove(f)
3372 3372 except OSError:
3373 3373 pass
3374 3374
3375 3375 def _usecorrectpython(self):
3376 3376 """Configure the environment to use the appropriate Python in tests."""
3377 3377 # Tests must use the same interpreter as us or bad things will happen.
3378 3378 pyexename = sys.platform == 'win32' and b'python.exe' or b'python'
3379 3379
3380 3380 # os.symlink() is a thing with py3 on Windows, but it requires
3381 3381 # Administrator rights.
3382 3382 if getattr(os, 'symlink', None) and os.name != 'nt':
3383 3383 vlog(
3384 3384 "# Making python executable in test path a symlink to '%s'"
3385 3385 % sysexecutable
3386 3386 )
3387 3387 mypython = os.path.join(self._tmpbindir, pyexename)
3388 3388 try:
3389 3389 if os.readlink(mypython) == sysexecutable:
3390 3390 return
3391 3391 os.unlink(mypython)
3392 3392 except OSError as err:
3393 3393 if err.errno != errno.ENOENT:
3394 3394 raise
3395 3395 if self._findprogram(pyexename) != sysexecutable:
3396 3396 try:
3397 3397 os.symlink(sysexecutable, mypython)
3398 3398 self._createdfiles.append(mypython)
3399 3399 except OSError as err:
3400 3400 # child processes may race, which is harmless
3401 3401 if err.errno != errno.EEXIST:
3402 3402 raise
3403 3403 else:
3404 3404 exedir, exename = os.path.split(sysexecutable)
3405 3405 vlog(
3406 3406 "# Modifying search path to find %s as %s in '%s'"
3407 3407 % (exename, pyexename, exedir)
3408 3408 )
3409 3409 path = os.environ['PATH'].split(os.pathsep)
3410 3410 while exedir in path:
3411 3411 path.remove(exedir)
3412 3412 os.environ['PATH'] = os.pathsep.join([exedir] + path)
3413 3413 if not self._findprogram(pyexename):
3414 3414 print("WARNING: Cannot find %s in search path" % pyexename)
3415 3415
3416 3416 def _installhg(self):
3417 3417 """Install hg into the test environment.
3418 3418
3419 3419 This will also configure hg with the appropriate testing settings.
3420 3420 """
3421 3421 vlog("# Performing temporary installation of HG")
3422 3422 installerrs = os.path.join(self._hgtmp, b"install.err")
3423 3423 compiler = ''
3424 3424 if self.options.compiler:
3425 3425 compiler = '--compiler ' + self.options.compiler
3426 3426 if self.options.pure:
3427 3427 pure = b"--pure"
3428 3428 else:
3429 3429 pure = b""
3430 3430
3431 3431 # Run installer in hg root
3432 3432 script = os.path.realpath(sys.argv[0])
3433 3433 exe = sysexecutable
3434 3434 if PYTHON3:
3435 3435 compiler = _sys2bytes(compiler)
3436 3436 script = _sys2bytes(script)
3437 3437 exe = _sys2bytes(exe)
3438 3438 hgroot = os.path.dirname(os.path.dirname(script))
3439 3439 self._hgroot = hgroot
3440 3440 os.chdir(hgroot)
3441 3441 nohome = b'--home=""'
3442 3442 if os.name == 'nt':
3443 3443 # The --home="" trick works only on OS where os.sep == '/'
3444 3444 # because of a distutils convert_path() fast-path. Avoid it at
3445 3445 # least on Windows for now, deal with .pydistutils.cfg bugs
3446 3446 # when they happen.
3447 3447 nohome = b''
3448 3448 cmd = (
3449 3449 b'"%(exe)s" setup.py %(pure)s clean --all'
3450 3450 b' build %(compiler)s --build-base="%(base)s"'
3451 3451 b' install --force --prefix="%(prefix)s"'
3452 3452 b' --install-lib="%(libdir)s"'
3453 3453 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
3454 3454 % {
3455 3455 b'exe': exe,
3456 3456 b'pure': pure,
3457 3457 b'compiler': compiler,
3458 3458 b'base': os.path.join(self._hgtmp, b"build"),
3459 3459 b'prefix': self._installdir,
3460 3460 b'libdir': self._pythondir,
3461 3461 b'bindir': self._bindir,
3462 3462 b'nohome': nohome,
3463 3463 b'logfile': installerrs,
3464 3464 }
3465 3465 )
3466 3466
3467 3467 # setuptools requires install directories to exist.
3468 3468 def makedirs(p):
3469 3469 try:
3470 3470 os.makedirs(p)
3471 3471 except OSError as e:
3472 3472 if e.errno != errno.EEXIST:
3473 3473 raise
3474 3474
3475 3475 makedirs(self._pythondir)
3476 3476 makedirs(self._bindir)
3477 3477
3478 3478 vlog("# Running", cmd.decode("utf-8"))
3479 3479 if subprocess.call(_bytes2sys(cmd), shell=True) == 0:
3480 3480 if not self.options.verbose:
3481 3481 try:
3482 3482 os.remove(installerrs)
3483 3483 except OSError as e:
3484 3484 if e.errno != errno.ENOENT:
3485 3485 raise
3486 3486 else:
3487 3487 with open(installerrs, 'rb') as f:
3488 3488 for line in f:
3489 3489 if PYTHON3:
3490 3490 sys.stdout.buffer.write(line)
3491 3491 else:
3492 3492 sys.stdout.write(line)
3493 3493 sys.exit(1)
3494 3494 os.chdir(self._testdir)
3495 3495
3496 3496 self._usecorrectpython()
3497 3497
3498 3498 hgbat = os.path.join(self._bindir, b'hg.bat')
3499 3499 if os.path.isfile(hgbat):
3500 3500 # hg.bat expects to be put in bin/scripts while run-tests.py
3501 3501 # installation layout put it in bin/ directly. Fix it
3502 3502 with open(hgbat, 'rb') as f:
3503 3503 data = f.read()
3504 3504 if br'"%~dp0..\python" "%~dp0hg" %*' in data:
3505 3505 data = data.replace(
3506 3506 br'"%~dp0..\python" "%~dp0hg" %*',
3507 3507 b'"%~dp0python" "%~dp0hg" %*',
3508 3508 )
3509 3509 with open(hgbat, 'wb') as f:
3510 3510 f.write(data)
3511 3511 else:
3512 3512 print('WARNING: cannot fix hg.bat reference to python.exe')
3513 3513
3514 3514 if self.options.anycoverage:
3515 3515 custom = os.path.join(
3516 3516 osenvironb[b'RUNTESTDIR'], b'sitecustomize.py'
3517 3517 )
3518 3518 target = os.path.join(self._pythondir, b'sitecustomize.py')
3519 3519 vlog('# Installing coverage trigger to %s' % target)
3520 3520 shutil.copyfile(custom, target)
3521 3521 rc = os.path.join(self._testdir, b'.coveragerc')
3522 3522 vlog('# Installing coverage rc to %s' % rc)
3523 3523 osenvironb[b'COVERAGE_PROCESS_START'] = rc
3524 3524 covdir = os.path.join(self._installdir, b'..', b'coverage')
3525 3525 try:
3526 3526 os.mkdir(covdir)
3527 3527 except OSError as e:
3528 3528 if e.errno != errno.EEXIST:
3529 3529 raise
3530 3530
3531 3531 osenvironb[b'COVERAGE_DIR'] = covdir
3532 3532
3533 3533 def _checkhglib(self, verb):
3534 3534 """Ensure that the 'mercurial' package imported by python is
3535 3535 the one we expect it to be. If not, print a warning to stderr."""
3536 3536 if (self._bindir == self._pythondir) and (
3537 3537 self._bindir != self._tmpbindir
3538 3538 ):
3539 3539 # The pythondir has been inferred from --with-hg flag.
3540 3540 # We cannot expect anything sensible here.
3541 3541 return
3542 3542 expecthg = os.path.join(self._pythondir, b'mercurial')
3543 3543 actualhg = self._gethgpath()
3544 3544 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3545 3545 sys.stderr.write(
3546 3546 'warning: %s with unexpected mercurial lib: %s\n'
3547 3547 ' (expected %s)\n' % (verb, actualhg, expecthg)
3548 3548 )
3549 3549
3550 3550 def _gethgpath(self):
3551 3551 """Return the path to the mercurial package that is actually found by
3552 3552 the current Python interpreter."""
3553 3553 if self._hgpath is not None:
3554 3554 return self._hgpath
3555 3555
3556 3556 cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
3557 3557 cmd = cmd % PYTHON
3558 3558 if PYTHON3:
3559 3559 cmd = _bytes2sys(cmd)
3560 3560
3561 3561 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
3562 3562 out, err = p.communicate()
3563 3563
3564 3564 self._hgpath = out.strip()
3565 3565
3566 3566 return self._hgpath
3567 3567
3568 3568 def _installchg(self):
3569 3569 """Install chg into the test environment"""
3570 3570 vlog('# Performing temporary installation of CHG')
3571 3571 assert os.path.dirname(self._bindir) == self._installdir
3572 3572 assert self._hgroot, 'must be called after _installhg()'
3573 3573 cmd = b'"%(make)s" clean install PREFIX="%(prefix)s"' % {
3574 3574 b'make': b'make', # TODO: switch by option or environment?
3575 3575 b'prefix': self._installdir,
3576 3576 }
3577 3577 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3578 3578 vlog("# Running", cmd)
3579 3579 proc = subprocess.Popen(
3580 3580 cmd,
3581 3581 shell=True,
3582 3582 cwd=cwd,
3583 3583 stdin=subprocess.PIPE,
3584 3584 stdout=subprocess.PIPE,
3585 3585 stderr=subprocess.STDOUT,
3586 3586 )
3587 3587 out, _err = proc.communicate()
3588 3588 if proc.returncode != 0:
3589 3589 if PYTHON3:
3590 3590 sys.stdout.buffer.write(out)
3591 3591 else:
3592 3592 sys.stdout.write(out)
3593 3593 sys.exit(1)
3594 3594
3595 3595 def _outputcoverage(self):
3596 3596 """Produce code coverage output."""
3597 3597 import coverage
3598 3598
3599 3599 coverage = coverage.coverage
3600 3600
3601 3601 vlog('# Producing coverage report')
3602 3602 # chdir is the easiest way to get short, relative paths in the
3603 3603 # output.
3604 3604 os.chdir(self._hgroot)
3605 3605 covdir = os.path.join(_bytes2sys(self._installdir), '..', 'coverage')
3606 3606 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3607 3607
3608 3608 # Map install directory paths back to source directory.
3609 3609 cov.config.paths['srcdir'] = ['.', _bytes2sys(self._pythondir)]
3610 3610
3611 3611 cov.combine()
3612 3612
3613 3613 omit = [
3614 3614 _bytes2sys(os.path.join(x, b'*'))
3615 3615 for x in [self._bindir, self._testdir]
3616 3616 ]
3617 3617 cov.report(ignore_errors=True, omit=omit)
3618 3618
3619 3619 if self.options.htmlcov:
3620 3620 htmldir = os.path.join(_bytes2sys(self._outputdir), 'htmlcov')
3621 3621 cov.html_report(directory=htmldir, omit=omit)
3622 3622 if self.options.annotate:
3623 3623 adir = os.path.join(_bytes2sys(self._outputdir), 'annotated')
3624 3624 if not os.path.isdir(adir):
3625 3625 os.mkdir(adir)
3626 3626 cov.annotate(directory=adir, omit=omit)
3627 3627
3628 3628 def _findprogram(self, program):
3629 3629 """Search PATH for a executable program"""
3630 3630 dpb = _sys2bytes(os.defpath)
3631 3631 sepb = _sys2bytes(os.pathsep)
3632 3632 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3633 3633 name = os.path.join(p, program)
3634 3634 if os.name == 'nt' or os.access(name, os.X_OK):
3635 3635 return name
3636 3636 return None
3637 3637
3638 3638 def _checktools(self):
3639 3639 """Ensure tools required to run tests are present."""
3640 3640 for p in self.REQUIREDTOOLS:
3641 3641 if os.name == 'nt' and not p.endswith(b'.exe'):
3642 3642 p += b'.exe'
3643 3643 found = self._findprogram(p)
3644 3644 p = p.decode("utf-8")
3645 3645 if found:
3646 3646 vlog("# Found prerequisite", p, "at", _bytes2sys(found))
3647 3647 else:
3648 3648 print("WARNING: Did not find prerequisite tool: %s " % p)
3649 3649
3650 3650
3651 3651 def aggregateexceptions(path):
3652 3652 exceptioncounts = collections.Counter()
3653 3653 testsbyfailure = collections.defaultdict(set)
3654 3654 failuresbytest = collections.defaultdict(set)
3655 3655
3656 3656 for f in os.listdir(path):
3657 3657 with open(os.path.join(path, f), 'rb') as fh:
3658 3658 data = fh.read().split(b'\0')
3659 3659 if len(data) != 5:
3660 3660 continue
3661 3661
3662 3662 exc, mainframe, hgframe, hgline, testname = data
3663 3663 exc = exc.decode('utf-8')
3664 3664 mainframe = mainframe.decode('utf-8')
3665 3665 hgframe = hgframe.decode('utf-8')
3666 3666 hgline = hgline.decode('utf-8')
3667 3667 testname = testname.decode('utf-8')
3668 3668
3669 3669 key = (hgframe, hgline, exc)
3670 3670 exceptioncounts[key] += 1
3671 3671 testsbyfailure[key].add(testname)
3672 3672 failuresbytest[testname].add(key)
3673 3673
3674 3674 # Find test having fewest failures for each failure.
3675 3675 leastfailing = {}
3676 3676 for key, tests in testsbyfailure.items():
3677 3677 fewesttest = None
3678 3678 fewestcount = 99999999
3679 3679 for test in sorted(tests):
3680 3680 if len(failuresbytest[test]) < fewestcount:
3681 3681 fewesttest = test
3682 3682 fewestcount = len(failuresbytest[test])
3683 3683
3684 3684 leastfailing[key] = (fewestcount, fewesttest)
3685 3685
3686 3686 # Create a combined counter so we can sort by total occurrences and
3687 3687 # impacted tests.
3688 3688 combined = {}
3689 3689 for key in exceptioncounts:
3690 3690 combined[key] = (
3691 3691 exceptioncounts[key],
3692 3692 len(testsbyfailure[key]),
3693 3693 leastfailing[key][0],
3694 3694 leastfailing[key][1],
3695 3695 )
3696 3696
3697 3697 return {
3698 3698 'exceptioncounts': exceptioncounts,
3699 3699 'total': sum(exceptioncounts.values()),
3700 3700 'combined': combined,
3701 3701 'leastfailing': leastfailing,
3702 3702 'byfailure': testsbyfailure,
3703 3703 'bytest': failuresbytest,
3704 3704 }
3705 3705
3706 3706
3707 3707 if __name__ == '__main__':
3708 3708 runner = TestRunner()
3709 3709
3710 3710 try:
3711 3711 import msvcrt
3712 3712
3713 3713 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3714 3714 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3715 3715 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3716 3716 except ImportError:
3717 3717 pass
3718 3718
3719 3719 sys.exit(runner.run(sys.argv[1:]))
@@ -1,394 +1,402 b''
1 1 #!/usr/bin/env python
2 2 from __future__ import absolute_import, print_function
3 3
4 4 import hashlib
5 5 import os
6 6 import random
7 7 import shutil
8 8 import stat
9 9 import struct
10 10 import sys
11 11 import tempfile
12 12 import time
13 13 import unittest
14 14
15 15 import silenttestrunner
16 16
17 17 # Load the local remotefilelog, not the system one
18 18 sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')]
19 19 from mercurial.node import nullid
20 from mercurial import policy
21
22 if not policy._packageprefs.get(policy.policy, (False, False))[1]:
23 if __name__ == '__main__':
24 msg = "skipped: pure module not available with module policy:"
25 print(msg, policy.policy, file=sys.stderr)
26 sys.exit(80)
27
20 28 from mercurial import (
21 29 pycompat,
22 30 ui as uimod,
23 31 )
24 32 from hgext.remotefilelog import (
25 33 basepack,
26 34 constants,
27 35 datapack,
28 36 )
29 37
30 38
31 39 class datapacktestsbase(object):
32 40 def __init__(self, datapackreader, paramsavailable):
33 41 self.datapackreader = datapackreader
34 42 self.paramsavailable = paramsavailable
35 43
36 44 def setUp(self):
37 45 self.tempdirs = []
38 46
39 47 def tearDown(self):
40 48 for d in self.tempdirs:
41 49 shutil.rmtree(d)
42 50
43 51 def makeTempDir(self):
44 52 tempdir = pycompat.bytestr(tempfile.mkdtemp())
45 53 self.tempdirs.append(tempdir)
46 54 return tempdir
47 55
48 56 def getHash(self, content):
49 57 return hashlib.sha1(content).digest()
50 58
51 59 def getFakeHash(self):
52 60 return b''.join(
53 61 pycompat.bytechr(random.randint(0, 255)) for _ in range(20)
54 62 )
55 63
56 64 def createPack(self, revisions=None, packdir=None):
57 65 if revisions is None:
58 66 revisions = [(b"filename", self.getFakeHash(), nullid, b"content")]
59 67
60 68 if packdir is None:
61 69 packdir = self.makeTempDir()
62 70
63 71 packer = datapack.mutabledatapack(uimod.ui(), packdir, version=2)
64 72
65 73 for args in revisions:
66 74 filename, node, base, content = args[0:4]
67 75 # meta is optional
68 76 meta = None
69 77 if len(args) > 4:
70 78 meta = args[4]
71 79 packer.add(filename, node, base, content, metadata=meta)
72 80
73 81 path = packer.close()
74 82 return self.datapackreader(path)
75 83
76 84 def _testAddSingle(self, content):
77 85 """Test putting a simple blob into a pack and reading it out.
78 86 """
79 87 filename = b"foo"
80 88 node = self.getHash(content)
81 89
82 90 revisions = [(filename, node, nullid, content)]
83 91 pack = self.createPack(revisions)
84 92 if self.paramsavailable:
85 93 self.assertEqual(
86 94 pack.params.fanoutprefix, basepack.SMALLFANOUTPREFIX
87 95 )
88 96
89 97 chain = pack.getdeltachain(filename, node)
90 98 self.assertEqual(content, chain[0][4])
91 99
92 100 def testAddSingle(self):
93 101 self._testAddSingle(b'')
94 102
95 103 def testAddSingleEmpty(self):
96 104 self._testAddSingle(b'abcdef')
97 105
98 106 def testAddMultiple(self):
99 107 """Test putting multiple unrelated blobs into a pack and reading them
100 108 out.
101 109 """
102 110 revisions = []
103 111 for i in range(10):
104 112 filename = b"foo%d" % i
105 113 content = b"abcdef%d" % i
106 114 node = self.getHash(content)
107 115 revisions.append((filename, node, self.getFakeHash(), content))
108 116
109 117 pack = self.createPack(revisions)
110 118
111 119 for filename, node, base, content in revisions:
112 120 entry = pack.getdelta(filename, node)
113 121 self.assertEqual((content, filename, base, {}), entry)
114 122
115 123 chain = pack.getdeltachain(filename, node)
116 124 self.assertEqual(content, chain[0][4])
117 125
118 126 def testAddDeltas(self):
119 127 """Test putting multiple delta blobs into a pack and read the chain.
120 128 """
121 129 revisions = []
122 130 filename = b"foo"
123 131 lastnode = nullid
124 132 for i in range(10):
125 133 content = b"abcdef%d" % i
126 134 node = self.getHash(content)
127 135 revisions.append((filename, node, lastnode, content))
128 136 lastnode = node
129 137
130 138 pack = self.createPack(revisions)
131 139
132 140 entry = pack.getdelta(filename, revisions[0][1])
133 141 realvalue = (revisions[0][3], filename, revisions[0][2], {})
134 142 self.assertEqual(entry, realvalue)
135 143
136 144 # Test that the chain for the final entry has all the others
137 145 chain = pack.getdeltachain(filename, node)
138 146 for i in range(10):
139 147 content = b"abcdef%d" % i
140 148 self.assertEqual(content, chain[-i - 1][4])
141 149
142 150 def testPackMany(self):
143 151 """Pack many related and unrelated objects.
144 152 """
145 153 # Build a random pack file
146 154 revisions = []
147 155 blobs = {}
148 156 random.seed(0)
149 157 for i in range(100):
150 158 filename = b"filename-%d" % i
151 159 filerevs = []
152 160 for j in range(random.randint(1, 100)):
153 161 content = b"content-%d" % j
154 162 node = self.getHash(content)
155 163 lastnode = nullid
156 164 if len(filerevs) > 0:
157 165 lastnode = filerevs[random.randint(0, len(filerevs) - 1)]
158 166 filerevs.append(node)
159 167 blobs[(filename, node, lastnode)] = content
160 168 revisions.append((filename, node, lastnode, content))
161 169
162 170 pack = self.createPack(revisions)
163 171
164 172 # Verify the pack contents
165 173 for (filename, node, lastnode), content in sorted(blobs.items()):
166 174 chain = pack.getdeltachain(filename, node)
167 175 for entry in chain:
168 176 expectedcontent = blobs[(entry[0], entry[1], entry[3])]
169 177 self.assertEqual(entry[4], expectedcontent)
170 178
171 179 def testPackMetadata(self):
172 180 revisions = []
173 181 for i in range(100):
174 182 filename = b'%d.txt' % i
175 183 content = b'put-something-here \n' * i
176 184 node = self.getHash(content)
177 185 meta = {
178 186 constants.METAKEYFLAG: i ** 4,
179 187 constants.METAKEYSIZE: len(content),
180 188 b'Z': b'random_string',
181 189 b'_': b'\0' * i,
182 190 }
183 191 revisions.append((filename, node, nullid, content, meta))
184 192 pack = self.createPack(revisions)
185 193 for name, node, x, content, origmeta in revisions:
186 194 parsedmeta = pack.getmeta(name, node)
187 195 # flag == 0 should be optimized out
188 196 if origmeta[constants.METAKEYFLAG] == 0:
189 197 del origmeta[constants.METAKEYFLAG]
190 198 self.assertEqual(parsedmeta, origmeta)
191 199
192 200 def testGetMissing(self):
193 201 """Test the getmissing() api.
194 202 """
195 203 revisions = []
196 204 filename = b"foo"
197 205 lastnode = nullid
198 206 for i in range(10):
199 207 content = b"abcdef%d" % i
200 208 node = self.getHash(content)
201 209 revisions.append((filename, node, lastnode, content))
202 210 lastnode = node
203 211
204 212 pack = self.createPack(revisions)
205 213
206 214 missing = pack.getmissing([(b"foo", revisions[0][1])])
207 215 self.assertFalse(missing)
208 216
209 217 missing = pack.getmissing(
210 218 [(b"foo", revisions[0][1]), (b"foo", revisions[1][1])]
211 219 )
212 220 self.assertFalse(missing)
213 221
214 222 fakenode = self.getFakeHash()
215 223 missing = pack.getmissing(
216 224 [(b"foo", revisions[0][1]), (b"foo", fakenode)]
217 225 )
218 226 self.assertEqual(missing, [(b"foo", fakenode)])
219 227
220 228 def testAddThrows(self):
221 229 pack = self.createPack()
222 230
223 231 try:
224 232 pack.add(b'filename', nullid, b'contents')
225 233 self.assertTrue(False, "datapack.add should throw")
226 234 except RuntimeError:
227 235 pass
228 236
229 237 def testBadVersionThrows(self):
230 238 pack = self.createPack()
231 239 path = pack.path + b'.datapack'
232 240 with open(path, 'rb') as f:
233 241 raw = f.read()
234 242 raw = struct.pack('!B', 255) + raw[1:]
235 243 os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE)
236 244 with open(path, 'wb+') as f:
237 245 f.write(raw)
238 246
239 247 try:
240 248 self.datapackreader(pack.path)
241 249 self.assertTrue(False, "bad version number should have thrown")
242 250 except RuntimeError:
243 251 pass
244 252
245 253 def testMissingDeltabase(self):
246 254 fakenode = self.getFakeHash()
247 255 revisions = [(b"filename", fakenode, self.getFakeHash(), b"content")]
248 256 pack = self.createPack(revisions)
249 257 chain = pack.getdeltachain(b"filename", fakenode)
250 258 self.assertEqual(len(chain), 1)
251 259
252 260 def testLargePack(self):
253 261 """Test creating and reading from a large pack with over X entries.
254 262 This causes it to use a 2^16 fanout table instead."""
255 263 revisions = []
256 264 blobs = {}
257 265 total = basepack.SMALLFANOUTCUTOFF + 1
258 266 for i in pycompat.xrange(total):
259 267 filename = b"filename-%d" % i
260 268 content = filename
261 269 node = self.getHash(content)
262 270 blobs[(filename, node)] = content
263 271 revisions.append((filename, node, nullid, content))
264 272
265 273 pack = self.createPack(revisions)
266 274 if self.paramsavailable:
267 275 self.assertEqual(
268 276 pack.params.fanoutprefix, basepack.LARGEFANOUTPREFIX
269 277 )
270 278
271 279 for (filename, node), content in blobs.items():
272 280 actualcontent = pack.getdeltachain(filename, node)[0][4]
273 281 self.assertEqual(actualcontent, content)
274 282
275 283 def testPacksCache(self):
276 284 """Test that we remember the most recent packs while fetching the delta
277 285 chain."""
278 286
279 287 packdir = self.makeTempDir()
280 288 deltachains = []
281 289
282 290 numpacks = 10
283 291 revisionsperpack = 100
284 292
285 293 for i in range(numpacks):
286 294 chain = []
287 295 revision = (b'%d' % i, self.getFakeHash(), nullid, b"content")
288 296
289 297 for _ in range(revisionsperpack):
290 298 chain.append(revision)
291 299 revision = (
292 300 b'%d' % i,
293 301 self.getFakeHash(),
294 302 revision[1],
295 303 self.getFakeHash(),
296 304 )
297 305
298 306 self.createPack(chain, packdir)
299 307 deltachains.append(chain)
300 308
301 309 class testdatapackstore(datapack.datapackstore):
302 310 # Ensures that we are not keeping everything in the cache.
303 311 DEFAULTCACHESIZE = numpacks // 2
304 312
305 313 store = testdatapackstore(uimod.ui(), packdir)
306 314
307 315 random.shuffle(deltachains)
308 316 for randomchain in deltachains:
309 317 revision = random.choice(randomchain)
310 318 chain = store.getdeltachain(revision[0], revision[1])
311 319
312 320 mostrecentpack = next(iter(store.packs), None)
313 321 self.assertEqual(
314 322 mostrecentpack.getdeltachain(revision[0], revision[1]), chain
315 323 )
316 324
317 325 self.assertEqual(randomchain.index(revision) + 1, len(chain))
318 326
319 327 # perf test off by default since it's slow
320 328 def _testIndexPerf(self):
321 329 random.seed(0)
322 330 print("Multi-get perf test")
323 331 packsizes = [
324 332 100,
325 333 10000,
326 334 100000,
327 335 500000,
328 336 1000000,
329 337 3000000,
330 338 ]
331 339 lookupsizes = [
332 340 10,
333 341 100,
334 342 1000,
335 343 10000,
336 344 100000,
337 345 1000000,
338 346 ]
339 347 for packsize in packsizes:
340 348 revisions = []
341 349 for i in pycompat.xrange(packsize):
342 350 filename = b"filename-%d" % i
343 351 content = b"content-%d" % i
344 352 node = self.getHash(content)
345 353 revisions.append((filename, node, nullid, content))
346 354
347 355 path = self.createPack(revisions).path
348 356
349 357 # Perf of large multi-get
350 358 import gc
351 359
352 360 gc.disable()
353 361 pack = self.datapackreader(path)
354 362 for lookupsize in lookupsizes:
355 363 if lookupsize > packsize:
356 364 continue
357 365 random.shuffle(revisions)
358 366 findnodes = [(rev[0], rev[1]) for rev in revisions]
359 367
360 368 start = time.time()
361 369 pack.getmissing(findnodes[:lookupsize])
362 370 elapsed = time.time() - start
363 371 print(
364 372 "%s pack %d lookups = %0.04f"
365 373 % (
366 374 ('%d' % packsize).rjust(7),
367 375 ('%d' % lookupsize).rjust(7),
368 376 elapsed,
369 377 )
370 378 )
371 379
372 380 print("")
373 381 gc.enable()
374 382
375 383 # The perf test is meant to produce output, so we always fail the test
376 384 # so the user sees the output.
377 385 raise RuntimeError("perf test always fails")
378 386
379 387
380 388 class datapacktests(datapacktestsbase, unittest.TestCase):
381 389 def __init__(self, *args, **kwargs):
382 390 datapacktestsbase.__init__(self, datapack.datapack, True)
383 391 unittest.TestCase.__init__(self, *args, **kwargs)
384 392
385 393
386 394 # TODO:
387 395 # datapack store:
388 396 # - getmissing
389 397 # - GC two packs into one
390 398
391 399 if __name__ == '__main__':
392 400 if pycompat.iswindows:
393 401 sys.exit(80) # Skip on Windows
394 402 silenttestrunner.main(__name__)
General Comments 0
You need to be logged in to leave comments. Login now