##// END OF EJS Templates
persistent-nodemap: add a way to make the picked uid predictable...
marmoute -
r48091:9a3aa547 default
parent child Browse files
Show More
@@ -1,656 +1,698 b''
1 # nodemap.py - nodemap related code and utilities
1 # nodemap.py - nodemap related code and utilities
2 #
2 #
3 # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
4 # Copyright 2019 George Racinet <georges.racinet@octobus.net>
4 # Copyright 2019 George Racinet <georges.racinet@octobus.net>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import os
12 import os
13 import random
13 import re
14 import re
14 import struct
15 import struct
15
16
16 from ..node import hex
17 from ..node import hex
17
18
18 from .. import (
19 from .. import (
20 encoding,
19 error,
21 error,
22 pycompat,
20 util,
23 util,
21 )
24 )
22
25
23
26
24 class NodeMap(dict):
27 class NodeMap(dict):
25 def __missing__(self, x):
28 def __missing__(self, x):
26 raise error.RevlogError(b'unknown node: %s' % x)
29 raise error.RevlogError(b'unknown node: %s' % x)
27
30
28
31
29 def persisted_data(revlog):
32 def persisted_data(revlog):
30 """read the nodemap for a revlog from disk"""
33 """read the nodemap for a revlog from disk"""
31 if revlog._nodemap_file is None:
34 if revlog._nodemap_file is None:
32 return None
35 return None
33 pdata = revlog.opener.tryread(revlog._nodemap_file)
36 pdata = revlog.opener.tryread(revlog._nodemap_file)
34 if not pdata:
37 if not pdata:
35 return None
38 return None
36 offset = 0
39 offset = 0
37 (version,) = S_VERSION.unpack(pdata[offset : offset + S_VERSION.size])
40 (version,) = S_VERSION.unpack(pdata[offset : offset + S_VERSION.size])
38 if version != ONDISK_VERSION:
41 if version != ONDISK_VERSION:
39 return None
42 return None
40 offset += S_VERSION.size
43 offset += S_VERSION.size
41 headers = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size])
44 headers = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size])
42 uid_size, tip_rev, data_length, data_unused, tip_node_size = headers
45 uid_size, tip_rev, data_length, data_unused, tip_node_size = headers
43 offset += S_HEADER.size
46 offset += S_HEADER.size
44 docket = NodeMapDocket(pdata[offset : offset + uid_size])
47 docket = NodeMapDocket(pdata[offset : offset + uid_size])
45 offset += uid_size
48 offset += uid_size
46 docket.tip_rev = tip_rev
49 docket.tip_rev = tip_rev
47 docket.tip_node = pdata[offset : offset + tip_node_size]
50 docket.tip_node = pdata[offset : offset + tip_node_size]
48 docket.data_length = data_length
51 docket.data_length = data_length
49 docket.data_unused = data_unused
52 docket.data_unused = data_unused
50
53
51 filename = _rawdata_filepath(revlog, docket)
54 filename = _rawdata_filepath(revlog, docket)
52 use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
55 use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
53 try:
56 try:
54 with revlog.opener(filename) as fd:
57 with revlog.opener(filename) as fd:
55 if use_mmap:
58 if use_mmap:
56 try:
59 try:
57 data = util.buffer(util.mmapread(fd, data_length))
60 data = util.buffer(util.mmapread(fd, data_length))
58 except ValueError:
61 except ValueError:
59 # raised when the read file is too small
62 # raised when the read file is too small
60 data = b''
63 data = b''
61 else:
64 else:
62 data = fd.read(data_length)
65 data = fd.read(data_length)
63 except (IOError, OSError) as e:
66 except (IOError, OSError) as e:
64 if e.errno == errno.ENOENT:
67 if e.errno == errno.ENOENT:
65 return None
68 return None
66 else:
69 else:
67 raise
70 raise
68 if len(data) < data_length:
71 if len(data) < data_length:
69 return None
72 return None
70 return docket, data
73 return docket, data
71
74
72
75
73 def setup_persistent_nodemap(tr, revlog):
76 def setup_persistent_nodemap(tr, revlog):
74 """Install whatever is needed transaction side to persist a nodemap on disk
77 """Install whatever is needed transaction side to persist a nodemap on disk
75
78
76 (only actually persist the nodemap if this is relevant for this revlog)
79 (only actually persist the nodemap if this is relevant for this revlog)
77 """
80 """
78 if revlog._inline:
81 if revlog._inline:
79 return # inlined revlog are too small for this to be relevant
82 return # inlined revlog are too small for this to be relevant
80 if revlog._nodemap_file is None:
83 if revlog._nodemap_file is None:
81 return # we do not use persistent_nodemap on this revlog
84 return # we do not use persistent_nodemap on this revlog
82
85
83 # we need to happen after the changelog finalization, in that use "cl-"
86 # we need to happen after the changelog finalization, in that use "cl-"
84 callback_id = b"nm-revlog-persistent-nodemap-%s" % revlog._nodemap_file
87 callback_id = b"nm-revlog-persistent-nodemap-%s" % revlog._nodemap_file
85 if tr.hasfinalize(callback_id):
88 if tr.hasfinalize(callback_id):
86 return # no need to register again
89 return # no need to register again
87 tr.addpending(
90 tr.addpending(
88 callback_id, lambda tr: persist_nodemap(tr, revlog, pending=True)
91 callback_id, lambda tr: persist_nodemap(tr, revlog, pending=True)
89 )
92 )
90 tr.addfinalize(callback_id, lambda tr: persist_nodemap(tr, revlog))
93 tr.addfinalize(callback_id, lambda tr: persist_nodemap(tr, revlog))
91
94
92
95
93 class _NoTransaction(object):
96 class _NoTransaction(object):
94 """transaction like object to update the nodemap outside a transaction"""
97 """transaction like object to update the nodemap outside a transaction"""
95
98
96 def __init__(self):
99 def __init__(self):
97 self._postclose = {}
100 self._postclose = {}
98
101
99 def addpostclose(self, callback_id, callback_func):
102 def addpostclose(self, callback_id, callback_func):
100 self._postclose[callback_id] = callback_func
103 self._postclose[callback_id] = callback_func
101
104
102 def registertmp(self, *args, **kwargs):
105 def registertmp(self, *args, **kwargs):
103 pass
106 pass
104
107
105 def addbackup(self, *args, **kwargs):
108 def addbackup(self, *args, **kwargs):
106 pass
109 pass
107
110
108 def add(self, *args, **kwargs):
111 def add(self, *args, **kwargs):
109 pass
112 pass
110
113
111 def addabort(self, *args, **kwargs):
114 def addabort(self, *args, **kwargs):
112 pass
115 pass
113
116
114 def _report(self, *args):
117 def _report(self, *args):
115 pass
118 pass
116
119
117
120
118 def update_persistent_nodemap(revlog):
121 def update_persistent_nodemap(revlog):
119 """update the persistent nodemap right now
122 """update the persistent nodemap right now
120
123
121 To be used for updating the nodemap on disk outside of a normal transaction
124 To be used for updating the nodemap on disk outside of a normal transaction
122 setup (eg, `debugupdatecache`).
125 setup (eg, `debugupdatecache`).
123 """
126 """
124 if revlog._inline:
127 if revlog._inline:
125 return # inlined revlog are too small for this to be relevant
128 return # inlined revlog are too small for this to be relevant
126 if revlog._nodemap_file is None:
129 if revlog._nodemap_file is None:
127 return # we do not use persistent_nodemap on this revlog
130 return # we do not use persistent_nodemap on this revlog
128
131
129 notr = _NoTransaction()
132 notr = _NoTransaction()
130 persist_nodemap(notr, revlog)
133 persist_nodemap(notr, revlog)
131 for k in sorted(notr._postclose):
134 for k in sorted(notr._postclose):
132 notr._postclose[k](None)
135 notr._postclose[k](None)
133
136
134
137
135 def delete_nodemap(tr, repo, revlog):
138 def delete_nodemap(tr, repo, revlog):
136 """ Delete nodemap data on disk for a given revlog"""
139 """ Delete nodemap data on disk for a given revlog"""
137 if revlog._nodemap_file is None:
140 if revlog._nodemap_file is None:
138 msg = "calling persist nodemap on a revlog without the feature enabled"
141 msg = "calling persist nodemap on a revlog without the feature enabled"
139 raise error.ProgrammingError(msg)
142 raise error.ProgrammingError(msg)
140 repo.svfs.unlink(revlog._nodemap_file)
143 repo.svfs.unlink(revlog._nodemap_file)
141
144
142
145
143 def persist_nodemap(tr, revlog, pending=False, force=False):
146 def persist_nodemap(tr, revlog, pending=False, force=False):
144 """Write nodemap data on disk for a given revlog"""
147 """Write nodemap data on disk for a given revlog"""
145 if getattr(revlog, 'filteredrevs', ()):
148 if getattr(revlog, 'filteredrevs', ()):
146 raise error.ProgrammingError(
149 raise error.ProgrammingError(
147 "cannot persist nodemap of a filtered changelog"
150 "cannot persist nodemap of a filtered changelog"
148 )
151 )
149 if revlog._nodemap_file is None:
152 if revlog._nodemap_file is None:
150 if force:
153 if force:
151 revlog._nodemap_file = get_nodemap_file(revlog)
154 revlog._nodemap_file = get_nodemap_file(revlog)
152 else:
155 else:
153 msg = "calling persist nodemap on a revlog without the feature enabled"
156 msg = "calling persist nodemap on a revlog without the feature enabled"
154 raise error.ProgrammingError(msg)
157 raise error.ProgrammingError(msg)
155
158
156 can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
159 can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
157 ondisk_docket = revlog._nodemap_docket
160 ondisk_docket = revlog._nodemap_docket
158 feed_data = util.safehasattr(revlog.index, "update_nodemap_data")
161 feed_data = util.safehasattr(revlog.index, "update_nodemap_data")
159 use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
162 use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
160
163
161 data = None
164 data = None
162 # first attemp an incremental update of the data
165 # first attemp an incremental update of the data
163 if can_incremental and ondisk_docket is not None:
166 if can_incremental and ondisk_docket is not None:
164 target_docket = revlog._nodemap_docket.copy()
167 target_docket = revlog._nodemap_docket.copy()
165 (
168 (
166 src_docket,
169 src_docket,
167 data_changed_count,
170 data_changed_count,
168 data,
171 data,
169 ) = revlog.index.nodemap_data_incremental()
172 ) = revlog.index.nodemap_data_incremental()
170 new_length = target_docket.data_length + len(data)
173 new_length = target_docket.data_length + len(data)
171 new_unused = target_docket.data_unused + data_changed_count
174 new_unused = target_docket.data_unused + data_changed_count
172 if src_docket != target_docket:
175 if src_docket != target_docket:
173 data = None
176 data = None
174 elif new_length <= (new_unused * 10): # under 10% of unused data
177 elif new_length <= (new_unused * 10): # under 10% of unused data
175 data = None
178 data = None
176 else:
179 else:
177 datafile = _rawdata_filepath(revlog, target_docket)
180 datafile = _rawdata_filepath(revlog, target_docket)
178 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
181 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
179 # store vfs
182 # store vfs
180 tr.add(datafile, target_docket.data_length)
183 tr.add(datafile, target_docket.data_length)
181 with revlog.opener(datafile, b'r+') as fd:
184 with revlog.opener(datafile, b'r+') as fd:
182 fd.seek(target_docket.data_length)
185 fd.seek(target_docket.data_length)
183 fd.write(data)
186 fd.write(data)
184 if feed_data:
187 if feed_data:
185 if use_mmap:
188 if use_mmap:
186 fd.seek(0)
189 fd.seek(0)
187 new_data = fd.read(new_length)
190 new_data = fd.read(new_length)
188 else:
191 else:
189 fd.flush()
192 fd.flush()
190 new_data = util.buffer(util.mmapread(fd, new_length))
193 new_data = util.buffer(util.mmapread(fd, new_length))
191 target_docket.data_length = new_length
194 target_docket.data_length = new_length
192 target_docket.data_unused = new_unused
195 target_docket.data_unused = new_unused
193
196
194 if data is None:
197 if data is None:
195 # otherwise fallback to a full new export
198 # otherwise fallback to a full new export
196 target_docket = NodeMapDocket()
199 target_docket = NodeMapDocket()
197 datafile = _rawdata_filepath(revlog, target_docket)
200 datafile = _rawdata_filepath(revlog, target_docket)
198 if util.safehasattr(revlog.index, "nodemap_data_all"):
201 if util.safehasattr(revlog.index, "nodemap_data_all"):
199 data = revlog.index.nodemap_data_all()
202 data = revlog.index.nodemap_data_all()
200 else:
203 else:
201 data = persistent_data(revlog.index)
204 data = persistent_data(revlog.index)
202 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
205 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
203 # store vfs
206 # store vfs
204
207
205 tryunlink = revlog.opener.tryunlink
208 tryunlink = revlog.opener.tryunlink
206
209
207 def abortck(tr):
210 def abortck(tr):
208 tryunlink(datafile)
211 tryunlink(datafile)
209
212
210 callback_id = b"delete-%s" % datafile
213 callback_id = b"delete-%s" % datafile
211
214
212 # some flavor of the transaction abort does not cleanup new file, it
215 # some flavor of the transaction abort does not cleanup new file, it
213 # simply empty them.
216 # simply empty them.
214 tr.addabort(callback_id, abortck)
217 tr.addabort(callback_id, abortck)
215 with revlog.opener(datafile, b'w+') as fd:
218 with revlog.opener(datafile, b'w+') as fd:
216 fd.write(data)
219 fd.write(data)
217 if feed_data:
220 if feed_data:
218 if use_mmap:
221 if use_mmap:
219 new_data = data
222 new_data = data
220 else:
223 else:
221 fd.flush()
224 fd.flush()
222 new_data = util.buffer(util.mmapread(fd, len(data)))
225 new_data = util.buffer(util.mmapread(fd, len(data)))
223 target_docket.data_length = len(data)
226 target_docket.data_length = len(data)
224 target_docket.tip_rev = revlog.tiprev()
227 target_docket.tip_rev = revlog.tiprev()
225 target_docket.tip_node = revlog.node(target_docket.tip_rev)
228 target_docket.tip_node = revlog.node(target_docket.tip_rev)
226 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
229 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
227 # store vfs
230 # store vfs
228 file_path = revlog._nodemap_file
231 file_path = revlog._nodemap_file
229 if pending:
232 if pending:
230 file_path += b'.a'
233 file_path += b'.a'
231 tr.registertmp(file_path)
234 tr.registertmp(file_path)
232 else:
235 else:
233 tr.addbackup(file_path)
236 tr.addbackup(file_path)
234
237
235 with revlog.opener(file_path, b'w', atomictemp=True) as fp:
238 with revlog.opener(file_path, b'w', atomictemp=True) as fp:
236 fp.write(target_docket.serialize())
239 fp.write(target_docket.serialize())
237 revlog._nodemap_docket = target_docket
240 revlog._nodemap_docket = target_docket
238 if feed_data:
241 if feed_data:
239 revlog.index.update_nodemap_data(target_docket, new_data)
242 revlog.index.update_nodemap_data(target_docket, new_data)
240
243
241 # search for old index file in all cases, some older process might have
244 # search for old index file in all cases, some older process might have
242 # left one behind.
245 # left one behind.
243 olds = _other_rawdata_filepath(revlog, target_docket)
246 olds = _other_rawdata_filepath(revlog, target_docket)
244 if olds:
247 if olds:
245 realvfs = getattr(revlog, '_realopener', revlog.opener)
248 realvfs = getattr(revlog, '_realopener', revlog.opener)
246
249
247 def cleanup(tr):
250 def cleanup(tr):
248 for oldfile in olds:
251 for oldfile in olds:
249 realvfs.tryunlink(oldfile)
252 realvfs.tryunlink(oldfile)
250
253
251 callback_id = b"revlog-cleanup-nodemap-%s" % revlog._nodemap_file
254 callback_id = b"revlog-cleanup-nodemap-%s" % revlog._nodemap_file
252 tr.addpostclose(callback_id, cleanup)
255 tr.addpostclose(callback_id, cleanup)
253
256
254
257
255 ### Nodemap docket file
258 ### Nodemap docket file
256 #
259 #
257 # The nodemap data are stored on disk using 2 files:
260 # The nodemap data are stored on disk using 2 files:
258 #
261 #
259 # * a raw data files containing a persistent nodemap
262 # * a raw data files containing a persistent nodemap
260 # (see `Nodemap Trie` section)
263 # (see `Nodemap Trie` section)
261 #
264 #
262 # * a small "docket" file containing medatadata
265 # * a small "docket" file containing medatadata
263 #
266 #
264 # While the nodemap data can be multiple tens of megabytes, the "docket" is
267 # While the nodemap data can be multiple tens of megabytes, the "docket" is
265 # small, it is easy to update it automatically or to duplicated its content
268 # small, it is easy to update it automatically or to duplicated its content
266 # during a transaction.
269 # during a transaction.
267 #
270 #
268 # Multiple raw data can exist at the same time (The currently valid one and a
271 # Multiple raw data can exist at the same time (The currently valid one and a
269 # new one beind used by an in progress transaction). To accomodate this, the
272 # new one beind used by an in progress transaction). To accomodate this, the
270 # filename hosting the raw data has a variable parts. The exact filename is
273 # filename hosting the raw data has a variable parts. The exact filename is
271 # specified inside the "docket" file.
274 # specified inside the "docket" file.
272 #
275 #
273 # The docket file contains information to find, qualify and validate the raw
276 # The docket file contains information to find, qualify and validate the raw
274 # data. Its content is currently very light, but it will expand as the on disk
277 # data. Its content is currently very light, but it will expand as the on disk
275 # nodemap gains the necessary features to be used in production.
278 # nodemap gains the necessary features to be used in production.
276
279
277 ONDISK_VERSION = 1
280 ONDISK_VERSION = 1
278 S_VERSION = struct.Struct(">B")
281 S_VERSION = struct.Struct(">B")
279 S_HEADER = struct.Struct(">BQQQQ")
282 S_HEADER = struct.Struct(">BQQQQ")
280
283
281 ID_SIZE = 8
284 ID_SIZE = 8
282
285
283
286
284 def _make_uid():
287 def _make_uid():
285 """return a new unique identifier.
288 """return a new unique identifier.
286
289
287 The identifier is random and composed of ascii characters."""
290 The identifier is random and composed of ascii characters."""
288 return hex(os.urandom(ID_SIZE))
291 return hex(os.urandom(ID_SIZE))
289
292
290
293
294 # some special test logic to avoid anoying random output in the test
295 stable_docket_file = encoding.environ.get(b'HGTEST_DOCKETIDFILE')
296
297 if stable_docket_file:
298
299 def _make_uid():
300 try:
301 with open(stable_docket_file, mode='rb') as f:
302 seed = f.read().strip()
303 except IOError as inst:
304 if inst.errno != errno.ENOENT:
305 raise
306 seed = b'4' # chosen by a fair dice roll. garanteed to be random
307 if pycompat.ispy3:
308 iter_seed = iter(seed)
309 else:
310 iter_seed = (ord(c) for c in seed)
311 # some basic circular sum hashing on 64 bits
312 int_seed = 0
313 low_mask = int('1' * 35, 2)
314 for i in iter_seed:
315 high_part = int_seed >> 35
316 low_part = (int_seed & low_mask) << 28
317 int_seed = high_part + low_part + i
318 r = random.Random()
319 if pycompat.ispy3:
320 r.seed(int_seed, version=1)
321 else:
322 r.seed(int_seed)
323 # once we drop python 3.8 support we can simply use r.randbytes
324 raw = r.getrandbits(ID_SIZE * 8)
325 assert ID_SIZE == 8
326 p = struct.pack('>Q', raw)
327 new = hex(p)
328 with open(stable_docket_file, 'wb') as f:
329 f.write(new)
330 return new
331
332
291 class NodeMapDocket(object):
333 class NodeMapDocket(object):
292 """metadata associated with persistent nodemap data
334 """metadata associated with persistent nodemap data
293
335
294 The persistent data may come from disk or be on their way to disk.
336 The persistent data may come from disk or be on their way to disk.
295 """
337 """
296
338
297 def __init__(self, uid=None):
339 def __init__(self, uid=None):
298 if uid is None:
340 if uid is None:
299 uid = _make_uid()
341 uid = _make_uid()
300 # a unique identifier for the data file:
342 # a unique identifier for the data file:
301 # - When new data are appended, it is preserved.
343 # - When new data are appended, it is preserved.
302 # - When a new data file is created, a new identifier is generated.
344 # - When a new data file is created, a new identifier is generated.
303 self.uid = uid
345 self.uid = uid
304 # the tipmost revision stored in the data file. This revision and all
346 # the tipmost revision stored in the data file. This revision and all
305 # revision before it are expected to be encoded in the data file.
347 # revision before it are expected to be encoded in the data file.
306 self.tip_rev = None
348 self.tip_rev = None
307 # the node of that tipmost revision, if it mismatch the current index
349 # the node of that tipmost revision, if it mismatch the current index
308 # data the docket is not valid for the current index and should be
350 # data the docket is not valid for the current index and should be
309 # discarded.
351 # discarded.
310 #
352 #
311 # note: this method is not perfect as some destructive operation could
353 # note: this method is not perfect as some destructive operation could
312 # preserve the same tip_rev + tip_node while altering lower revision.
354 # preserve the same tip_rev + tip_node while altering lower revision.
313 # However this multiple other caches have the same vulnerability (eg:
355 # However this multiple other caches have the same vulnerability (eg:
314 # brancmap cache).
356 # brancmap cache).
315 self.tip_node = None
357 self.tip_node = None
316 # the size (in bytes) of the persisted data to encode the nodemap valid
358 # the size (in bytes) of the persisted data to encode the nodemap valid
317 # for `tip_rev`.
359 # for `tip_rev`.
318 # - data file shorter than this are corrupted,
360 # - data file shorter than this are corrupted,
319 # - any extra data should be ignored.
361 # - any extra data should be ignored.
320 self.data_length = None
362 self.data_length = None
321 # the amount (in bytes) of "dead" data, still in the data file but no
363 # the amount (in bytes) of "dead" data, still in the data file but no
322 # longer used for the nodemap.
364 # longer used for the nodemap.
323 self.data_unused = 0
365 self.data_unused = 0
324
366
325 def copy(self):
367 def copy(self):
326 new = NodeMapDocket(uid=self.uid)
368 new = NodeMapDocket(uid=self.uid)
327 new.tip_rev = self.tip_rev
369 new.tip_rev = self.tip_rev
328 new.tip_node = self.tip_node
370 new.tip_node = self.tip_node
329 new.data_length = self.data_length
371 new.data_length = self.data_length
330 new.data_unused = self.data_unused
372 new.data_unused = self.data_unused
331 return new
373 return new
332
374
333 def __cmp__(self, other):
375 def __cmp__(self, other):
334 if self.uid < other.uid:
376 if self.uid < other.uid:
335 return -1
377 return -1
336 if self.uid > other.uid:
378 if self.uid > other.uid:
337 return 1
379 return 1
338 elif self.data_length < other.data_length:
380 elif self.data_length < other.data_length:
339 return -1
381 return -1
340 elif self.data_length > other.data_length:
382 elif self.data_length > other.data_length:
341 return 1
383 return 1
342 return 0
384 return 0
343
385
344 def __eq__(self, other):
386 def __eq__(self, other):
345 return self.uid == other.uid and self.data_length == other.data_length
387 return self.uid == other.uid and self.data_length == other.data_length
346
388
347 def serialize(self):
389 def serialize(self):
348 """return serialized bytes for a docket using the passed uid"""
390 """return serialized bytes for a docket using the passed uid"""
349 data = []
391 data = []
350 data.append(S_VERSION.pack(ONDISK_VERSION))
392 data.append(S_VERSION.pack(ONDISK_VERSION))
351 headers = (
393 headers = (
352 len(self.uid),
394 len(self.uid),
353 self.tip_rev,
395 self.tip_rev,
354 self.data_length,
396 self.data_length,
355 self.data_unused,
397 self.data_unused,
356 len(self.tip_node),
398 len(self.tip_node),
357 )
399 )
358 data.append(S_HEADER.pack(*headers))
400 data.append(S_HEADER.pack(*headers))
359 data.append(self.uid)
401 data.append(self.uid)
360 data.append(self.tip_node)
402 data.append(self.tip_node)
361 return b''.join(data)
403 return b''.join(data)
362
404
363
405
364 def _rawdata_filepath(revlog, docket):
406 def _rawdata_filepath(revlog, docket):
365 """The (vfs relative) nodemap's rawdata file for a given uid"""
407 """The (vfs relative) nodemap's rawdata file for a given uid"""
366 prefix = revlog.radix
408 prefix = revlog.radix
367 return b"%s-%s.nd" % (prefix, docket.uid)
409 return b"%s-%s.nd" % (prefix, docket.uid)
368
410
369
411
370 def _other_rawdata_filepath(revlog, docket):
412 def _other_rawdata_filepath(revlog, docket):
371 prefix = revlog.radix
413 prefix = revlog.radix
372 pattern = re.compile(br"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
414 pattern = re.compile(br"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
373 new_file_path = _rawdata_filepath(revlog, docket)
415 new_file_path = _rawdata_filepath(revlog, docket)
374 new_file_name = revlog.opener.basename(new_file_path)
416 new_file_name = revlog.opener.basename(new_file_path)
375 dirpath = revlog.opener.dirname(new_file_path)
417 dirpath = revlog.opener.dirname(new_file_path)
376 others = []
418 others = []
377 for f in revlog.opener.listdir(dirpath):
419 for f in revlog.opener.listdir(dirpath):
378 if pattern.match(f) and f != new_file_name:
420 if pattern.match(f) and f != new_file_name:
379 others.append(f)
421 others.append(f)
380 return others
422 return others
381
423
382
424
383 ### Nodemap Trie
425 ### Nodemap Trie
384 #
426 #
385 # This is a simple reference implementation to compute and persist a nodemap
427 # This is a simple reference implementation to compute and persist a nodemap
386 # trie. This reference implementation is write only. The python version of this
428 # trie. This reference implementation is write only. The python version of this
387 # is not expected to be actually used, since it wont provide performance
429 # is not expected to be actually used, since it wont provide performance
388 # improvement over existing non-persistent C implementation.
430 # improvement over existing non-persistent C implementation.
389 #
431 #
390 # The nodemap is persisted as Trie using 4bits-address/16-entries block. each
432 # The nodemap is persisted as Trie using 4bits-address/16-entries block. each
391 # revision can be adressed using its node shortest prefix.
433 # revision can be adressed using its node shortest prefix.
392 #
434 #
393 # The trie is stored as a sequence of block. Each block contains 16 entries
435 # The trie is stored as a sequence of block. Each block contains 16 entries
394 # (signed 64bit integer, big endian). Each entry can be one of the following:
436 # (signed 64bit integer, big endian). Each entry can be one of the following:
395 #
437 #
396 # * value >= 0 -> index of sub-block
438 # * value >= 0 -> index of sub-block
397 # * value == -1 -> no value
439 # * value == -1 -> no value
398 # * value < -1 -> encoded revision: rev = -(value+2)
440 # * value < -1 -> encoded revision: rev = -(value+2)
399 #
441 #
400 # See REV_OFFSET and _transform_rev below.
442 # See REV_OFFSET and _transform_rev below.
401 #
443 #
402 # The implementation focus on simplicity, not on performance. A Rust
444 # The implementation focus on simplicity, not on performance. A Rust
403 # implementation should provide a efficient version of the same binary
445 # implementation should provide a efficient version of the same binary
404 # persistence. This reference python implementation is never meant to be
446 # persistence. This reference python implementation is never meant to be
405 # extensively use in production.
447 # extensively use in production.
406
448
407
449
408 def persistent_data(index):
450 def persistent_data(index):
409 """return the persistent binary form for a nodemap for a given index"""
451 """return the persistent binary form for a nodemap for a given index"""
410 trie = _build_trie(index)
452 trie = _build_trie(index)
411 return _persist_trie(trie)
453 return _persist_trie(trie)
412
454
413
455
414 def update_persistent_data(index, root, max_idx, last_rev):
456 def update_persistent_data(index, root, max_idx, last_rev):
415 """return the incremental update for persistent nodemap from a given index"""
457 """return the incremental update for persistent nodemap from a given index"""
416 changed_block, trie = _update_trie(index, root, last_rev)
458 changed_block, trie = _update_trie(index, root, last_rev)
417 return (
459 return (
418 changed_block * S_BLOCK.size,
460 changed_block * S_BLOCK.size,
419 _persist_trie(trie, existing_idx=max_idx),
461 _persist_trie(trie, existing_idx=max_idx),
420 )
462 )
421
463
422
464
423 S_BLOCK = struct.Struct(">" + ("l" * 16))
465 S_BLOCK = struct.Struct(">" + ("l" * 16))
424
466
425 NO_ENTRY = -1
467 NO_ENTRY = -1
426 # rev 0 need to be -2 because 0 is used by block, -1 is a special value.
468 # rev 0 need to be -2 because 0 is used by block, -1 is a special value.
427 REV_OFFSET = 2
469 REV_OFFSET = 2
428
470
429
471
430 def _transform_rev(rev):
472 def _transform_rev(rev):
431 """Return the number used to represent the rev in the tree.
473 """Return the number used to represent the rev in the tree.
432
474
433 (or retrieve a rev number from such representation)
475 (or retrieve a rev number from such representation)
434
476
435 Note that this is an involution, a function equal to its inverse (i.e.
477 Note that this is an involution, a function equal to its inverse (i.e.
436 which gives the identity when applied to itself).
478 which gives the identity when applied to itself).
437 """
479 """
438 return -(rev + REV_OFFSET)
480 return -(rev + REV_OFFSET)
439
481
440
482
441 def _to_int(hex_digit):
483 def _to_int(hex_digit):
442 """turn an hexadecimal digit into a proper integer"""
484 """turn an hexadecimal digit into a proper integer"""
443 return int(hex_digit, 16)
485 return int(hex_digit, 16)
444
486
445
487
446 class Block(dict):
488 class Block(dict):
447 """represent a block of the Trie
489 """represent a block of the Trie
448
490
449 contains up to 16 entry indexed from 0 to 15"""
491 contains up to 16 entry indexed from 0 to 15"""
450
492
451 def __init__(self):
493 def __init__(self):
452 super(Block, self).__init__()
494 super(Block, self).__init__()
453 # If this block exist on disk, here is its ID
495 # If this block exist on disk, here is its ID
454 self.ondisk_id = None
496 self.ondisk_id = None
455
497
456 def __iter__(self):
498 def __iter__(self):
457 return iter(self.get(i) for i in range(16))
499 return iter(self.get(i) for i in range(16))
458
500
459
501
460 def _build_trie(index):
502 def _build_trie(index):
461 """build a nodemap trie
503 """build a nodemap trie
462
504
463 The nodemap stores revision number for each unique prefix.
505 The nodemap stores revision number for each unique prefix.
464
506
465 Each block is a dictionary with keys in `[0, 15]`. Values are either
507 Each block is a dictionary with keys in `[0, 15]`. Values are either
466 another block or a revision number.
508 another block or a revision number.
467 """
509 """
468 root = Block()
510 root = Block()
469 for rev in range(len(index)):
511 for rev in range(len(index)):
470 current_hex = hex(index[rev][7])
512 current_hex = hex(index[rev][7])
471 _insert_into_block(index, 0, root, rev, current_hex)
513 _insert_into_block(index, 0, root, rev, current_hex)
472 return root
514 return root
473
515
474
516
475 def _update_trie(index, root, last_rev):
517 def _update_trie(index, root, last_rev):
476 """consume"""
518 """consume"""
477 changed = 0
519 changed = 0
478 for rev in range(last_rev + 1, len(index)):
520 for rev in range(last_rev + 1, len(index)):
479 current_hex = hex(index[rev][7])
521 current_hex = hex(index[rev][7])
480 changed += _insert_into_block(index, 0, root, rev, current_hex)
522 changed += _insert_into_block(index, 0, root, rev, current_hex)
481 return changed, root
523 return changed, root
482
524
483
525
484 def _insert_into_block(index, level, block, current_rev, current_hex):
526 def _insert_into_block(index, level, block, current_rev, current_hex):
485 """insert a new revision in a block
527 """insert a new revision in a block
486
528
487 index: the index we are adding revision for
529 index: the index we are adding revision for
488 level: the depth of the current block in the trie
530 level: the depth of the current block in the trie
489 block: the block currently being considered
531 block: the block currently being considered
490 current_rev: the revision number we are adding
532 current_rev: the revision number we are adding
491 current_hex: the hexadecimal representation of the of that revision
533 current_hex: the hexadecimal representation of the of that revision
492 """
534 """
493 changed = 1
535 changed = 1
494 if block.ondisk_id is not None:
536 if block.ondisk_id is not None:
495 block.ondisk_id = None
537 block.ondisk_id = None
496 hex_digit = _to_int(current_hex[level : level + 1])
538 hex_digit = _to_int(current_hex[level : level + 1])
497 entry = block.get(hex_digit)
539 entry = block.get(hex_digit)
498 if entry is None:
540 if entry is None:
499 # no entry, simply store the revision number
541 # no entry, simply store the revision number
500 block[hex_digit] = current_rev
542 block[hex_digit] = current_rev
501 elif isinstance(entry, dict):
543 elif isinstance(entry, dict):
502 # need to recurse to an underlying block
544 # need to recurse to an underlying block
503 changed += _insert_into_block(
545 changed += _insert_into_block(
504 index, level + 1, entry, current_rev, current_hex
546 index, level + 1, entry, current_rev, current_hex
505 )
547 )
506 else:
548 else:
507 # collision with a previously unique prefix, inserting new
549 # collision with a previously unique prefix, inserting new
508 # vertices to fit both entry.
550 # vertices to fit both entry.
509 other_hex = hex(index[entry][7])
551 other_hex = hex(index[entry][7])
510 other_rev = entry
552 other_rev = entry
511 new = Block()
553 new = Block()
512 block[hex_digit] = new
554 block[hex_digit] = new
513 _insert_into_block(index, level + 1, new, other_rev, other_hex)
555 _insert_into_block(index, level + 1, new, other_rev, other_hex)
514 _insert_into_block(index, level + 1, new, current_rev, current_hex)
556 _insert_into_block(index, level + 1, new, current_rev, current_hex)
515 return changed
557 return changed
516
558
517
559
518 def _persist_trie(root, existing_idx=None):
560 def _persist_trie(root, existing_idx=None):
519 """turn a nodemap trie into persistent binary data
561 """turn a nodemap trie into persistent binary data
520
562
521 See `_build_trie` for nodemap trie structure"""
563 See `_build_trie` for nodemap trie structure"""
522 block_map = {}
564 block_map = {}
523 if existing_idx is not None:
565 if existing_idx is not None:
524 base_idx = existing_idx + 1
566 base_idx = existing_idx + 1
525 else:
567 else:
526 base_idx = 0
568 base_idx = 0
527 chunks = []
569 chunks = []
528 for tn in _walk_trie(root):
570 for tn in _walk_trie(root):
529 if tn.ondisk_id is not None:
571 if tn.ondisk_id is not None:
530 block_map[id(tn)] = tn.ondisk_id
572 block_map[id(tn)] = tn.ondisk_id
531 else:
573 else:
532 block_map[id(tn)] = len(chunks) + base_idx
574 block_map[id(tn)] = len(chunks) + base_idx
533 chunks.append(_persist_block(tn, block_map))
575 chunks.append(_persist_block(tn, block_map))
534 return b''.join(chunks)
576 return b''.join(chunks)
535
577
536
578
537 def _walk_trie(block):
579 def _walk_trie(block):
538 """yield all the block in a trie
580 """yield all the block in a trie
539
581
540 Children blocks are always yield before their parent block.
582 Children blocks are always yield before their parent block.
541 """
583 """
542 for (__, item) in sorted(block.items()):
584 for (__, item) in sorted(block.items()):
543 if isinstance(item, dict):
585 if isinstance(item, dict):
544 for sub_block in _walk_trie(item):
586 for sub_block in _walk_trie(item):
545 yield sub_block
587 yield sub_block
546 yield block
588 yield block
547
589
548
590
549 def _persist_block(block_node, block_map):
591 def _persist_block(block_node, block_map):
550 """produce persistent binary data for a single block
592 """produce persistent binary data for a single block
551
593
552 Children block are assumed to be already persisted and present in
594 Children block are assumed to be already persisted and present in
553 block_map.
595 block_map.
554 """
596 """
555 data = tuple(_to_value(v, block_map) for v in block_node)
597 data = tuple(_to_value(v, block_map) for v in block_node)
556 return S_BLOCK.pack(*data)
598 return S_BLOCK.pack(*data)
557
599
558
600
559 def _to_value(item, block_map):
601 def _to_value(item, block_map):
560 """persist any value as an integer"""
602 """persist any value as an integer"""
561 if item is None:
603 if item is None:
562 return NO_ENTRY
604 return NO_ENTRY
563 elif isinstance(item, dict):
605 elif isinstance(item, dict):
564 return block_map[id(item)]
606 return block_map[id(item)]
565 else:
607 else:
566 return _transform_rev(item)
608 return _transform_rev(item)
567
609
568
610
569 def parse_data(data):
611 def parse_data(data):
570 """parse parse nodemap data into a nodemap Trie"""
612 """parse parse nodemap data into a nodemap Trie"""
571 if (len(data) % S_BLOCK.size) != 0:
613 if (len(data) % S_BLOCK.size) != 0:
572 msg = b"nodemap data size is not a multiple of block size (%d): %d"
614 msg = b"nodemap data size is not a multiple of block size (%d): %d"
573 raise error.Abort(msg % (S_BLOCK.size, len(data)))
615 raise error.Abort(msg % (S_BLOCK.size, len(data)))
574 if not data:
616 if not data:
575 return Block(), None
617 return Block(), None
576 block_map = {}
618 block_map = {}
577 new_blocks = []
619 new_blocks = []
578 for i in range(0, len(data), S_BLOCK.size):
620 for i in range(0, len(data), S_BLOCK.size):
579 block = Block()
621 block = Block()
580 block.ondisk_id = len(block_map)
622 block.ondisk_id = len(block_map)
581 block_map[block.ondisk_id] = block
623 block_map[block.ondisk_id] = block
582 block_data = data[i : i + S_BLOCK.size]
624 block_data = data[i : i + S_BLOCK.size]
583 values = S_BLOCK.unpack(block_data)
625 values = S_BLOCK.unpack(block_data)
584 new_blocks.append((block, values))
626 new_blocks.append((block, values))
585 for b, values in new_blocks:
627 for b, values in new_blocks:
586 for idx, v in enumerate(values):
628 for idx, v in enumerate(values):
587 if v == NO_ENTRY:
629 if v == NO_ENTRY:
588 continue
630 continue
589 elif v >= 0:
631 elif v >= 0:
590 b[idx] = block_map[v]
632 b[idx] = block_map[v]
591 else:
633 else:
592 b[idx] = _transform_rev(v)
634 b[idx] = _transform_rev(v)
593 return block, i // S_BLOCK.size
635 return block, i // S_BLOCK.size
594
636
595
637
596 # debug utility
638 # debug utility
597
639
598
640
599 def check_data(ui, index, data):
641 def check_data(ui, index, data):
600 """verify that the provided nodemap data are valid for the given idex"""
642 """verify that the provided nodemap data are valid for the given idex"""
601 ret = 0
643 ret = 0
602 ui.status((b"revision in index: %d\n") % len(index))
644 ui.status((b"revision in index: %d\n") % len(index))
603 root, __ = parse_data(data)
645 root, __ = parse_data(data)
604 all_revs = set(_all_revisions(root))
646 all_revs = set(_all_revisions(root))
605 ui.status((b"revision in nodemap: %d\n") % len(all_revs))
647 ui.status((b"revision in nodemap: %d\n") % len(all_revs))
606 for r in range(len(index)):
648 for r in range(len(index)):
607 if r not in all_revs:
649 if r not in all_revs:
608 msg = b" revision missing from nodemap: %d\n" % r
650 msg = b" revision missing from nodemap: %d\n" % r
609 ui.write_err(msg)
651 ui.write_err(msg)
610 ret = 1
652 ret = 1
611 else:
653 else:
612 all_revs.remove(r)
654 all_revs.remove(r)
613 nm_rev = _find_node(root, hex(index[r][7]))
655 nm_rev = _find_node(root, hex(index[r][7]))
614 if nm_rev is None:
656 if nm_rev is None:
615 msg = b" revision node does not match any entries: %d\n" % r
657 msg = b" revision node does not match any entries: %d\n" % r
616 ui.write_err(msg)
658 ui.write_err(msg)
617 ret = 1
659 ret = 1
618 elif nm_rev != r:
660 elif nm_rev != r:
619 msg = (
661 msg = (
620 b" revision node does not match the expected revision: "
662 b" revision node does not match the expected revision: "
621 b"%d != %d\n" % (r, nm_rev)
663 b"%d != %d\n" % (r, nm_rev)
622 )
664 )
623 ui.write_err(msg)
665 ui.write_err(msg)
624 ret = 1
666 ret = 1
625
667
626 if all_revs:
668 if all_revs:
627 for r in sorted(all_revs):
669 for r in sorted(all_revs):
628 msg = b" extra revision in nodemap: %d\n" % r
670 msg = b" extra revision in nodemap: %d\n" % r
629 ui.write_err(msg)
671 ui.write_err(msg)
630 ret = 1
672 ret = 1
631 return ret
673 return ret
632
674
633
675
634 def _all_revisions(root):
676 def _all_revisions(root):
635 """return all revisions stored in a Trie"""
677 """return all revisions stored in a Trie"""
636 for block in _walk_trie(root):
678 for block in _walk_trie(root):
637 for v in block:
679 for v in block:
638 if v is None or isinstance(v, Block):
680 if v is None or isinstance(v, Block):
639 continue
681 continue
640 yield v
682 yield v
641
683
642
684
643 def _find_node(block, node):
685 def _find_node(block, node):
644 """find the revision associated with a given node"""
686 """find the revision associated with a given node"""
645 entry = block.get(_to_int(node[0:1]))
687 entry = block.get(_to_int(node[0:1]))
646 if isinstance(entry, dict):
688 if isinstance(entry, dict):
647 return _find_node(entry, node[1:])
689 return _find_node(entry, node[1:])
648 return entry
690 return entry
649
691
650
692
651 def get_nodemap_file(revlog):
693 def get_nodemap_file(revlog):
652 if revlog._trypending:
694 if revlog._trypending:
653 pending_path = revlog.radix + b".n.a"
695 pending_path = revlog.radix + b".n.a"
654 if revlog.opener.exists(pending_path):
696 if revlog.opener.exists(pending_path):
655 return pending_path
697 return pending_path
656 return revlog.radix + b".n"
698 return revlog.radix + b".n"
@@ -1,3932 +1,3934 b''
1 #!/usr/bin/env python3
1 #!/usr/bin/env python3
2 #
2 #
3 # run-tests.py - Run a set of tests on Mercurial
3 # run-tests.py - Run a set of tests on Mercurial
4 #
4 #
5 # Copyright 2006 Olivia Mackall <olivia@selenic.com>
5 # Copyright 2006 Olivia Mackall <olivia@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 # Modifying this script is tricky because it has many modes:
10 # Modifying this script is tricky because it has many modes:
11 # - serial (default) vs parallel (-jN, N > 1)
11 # - serial (default) vs parallel (-jN, N > 1)
12 # - no coverage (default) vs coverage (-c, -C, -s)
12 # - no coverage (default) vs coverage (-c, -C, -s)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 # - tests are a mix of shell scripts and Python scripts
14 # - tests are a mix of shell scripts and Python scripts
15 #
15 #
16 # If you change this script, it is recommended that you ensure you
16 # If you change this script, it is recommended that you ensure you
17 # haven't broken it by running it in various modes with a representative
17 # haven't broken it by running it in various modes with a representative
18 # sample of test scripts. For example:
18 # sample of test scripts. For example:
19 #
19 #
20 # 1) serial, no coverage, temp install:
20 # 1) serial, no coverage, temp install:
21 # ./run-tests.py test-s*
21 # ./run-tests.py test-s*
22 # 2) serial, no coverage, local hg:
22 # 2) serial, no coverage, local hg:
23 # ./run-tests.py --local test-s*
23 # ./run-tests.py --local test-s*
24 # 3) serial, coverage, temp install:
24 # 3) serial, coverage, temp install:
25 # ./run-tests.py -c test-s*
25 # ./run-tests.py -c test-s*
26 # 4) serial, coverage, local hg:
26 # 4) serial, coverage, local hg:
27 # ./run-tests.py -c --local test-s* # unsupported
27 # ./run-tests.py -c --local test-s* # unsupported
28 # 5) parallel, no coverage, temp install:
28 # 5) parallel, no coverage, temp install:
29 # ./run-tests.py -j2 test-s*
29 # ./run-tests.py -j2 test-s*
30 # 6) parallel, no coverage, local hg:
30 # 6) parallel, no coverage, local hg:
31 # ./run-tests.py -j2 --local test-s*
31 # ./run-tests.py -j2 --local test-s*
32 # 7) parallel, coverage, temp install:
32 # 7) parallel, coverage, temp install:
33 # ./run-tests.py -j2 -c test-s* # currently broken
33 # ./run-tests.py -j2 -c test-s* # currently broken
34 # 8) parallel, coverage, local install:
34 # 8) parallel, coverage, local install:
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 # 9) parallel, custom tmp dir:
36 # 9) parallel, custom tmp dir:
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 # 10) parallel, pure, tests that call run-tests:
38 # 10) parallel, pure, tests that call run-tests:
39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 #
40 #
41 # (You could use any subset of the tests: test-s* happens to match
41 # (You could use any subset of the tests: test-s* happens to match
42 # enough that it's worth doing parallel runs, few enough that it
42 # enough that it's worth doing parallel runs, few enough that it
43 # completes fairly quickly, includes both shell and Python scripts, and
43 # completes fairly quickly, includes both shell and Python scripts, and
44 # includes some scripts that run daemon processes.)
44 # includes some scripts that run daemon processes.)
45
45
46 from __future__ import absolute_import, print_function
46 from __future__ import absolute_import, print_function
47
47
48 import argparse
48 import argparse
49 import collections
49 import collections
50 import contextlib
50 import contextlib
51 import difflib
51 import difflib
52 import distutils.version as version
52 import distutils.version as version
53 import errno
53 import errno
54 import json
54 import json
55 import multiprocessing
55 import multiprocessing
56 import os
56 import os
57 import platform
57 import platform
58 import random
58 import random
59 import re
59 import re
60 import shutil
60 import shutil
61 import signal
61 import signal
62 import socket
62 import socket
63 import subprocess
63 import subprocess
64 import sys
64 import sys
65 import sysconfig
65 import sysconfig
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 import unittest
69 import unittest
70 import uuid
70 import uuid
71 import xml.dom.minidom as minidom
71 import xml.dom.minidom as minidom
72
72
73 try:
73 try:
74 import Queue as queue
74 import Queue as queue
75 except ImportError:
75 except ImportError:
76 import queue
76 import queue
77
77
78 try:
78 try:
79 import shlex
79 import shlex
80
80
81 shellquote = shlex.quote
81 shellquote = shlex.quote
82 except (ImportError, AttributeError):
82 except (ImportError, AttributeError):
83 import pipes
83 import pipes
84
84
85 shellquote = pipes.quote
85 shellquote = pipes.quote
86
86
87 processlock = threading.Lock()
87 processlock = threading.Lock()
88
88
89 pygmentspresent = False
89 pygmentspresent = False
90 try: # is pygments installed
90 try: # is pygments installed
91 import pygments
91 import pygments
92 import pygments.lexers as lexers
92 import pygments.lexers as lexers
93 import pygments.lexer as lexer
93 import pygments.lexer as lexer
94 import pygments.formatters as formatters
94 import pygments.formatters as formatters
95 import pygments.token as token
95 import pygments.token as token
96 import pygments.style as style
96 import pygments.style as style
97
97
98 if os.name == 'nt':
98 if os.name == 'nt':
99 hgpath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
99 hgpath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
100 sys.path.append(hgpath)
100 sys.path.append(hgpath)
101 try:
101 try:
102 from mercurial import win32 # pytype: disable=import-error
102 from mercurial import win32 # pytype: disable=import-error
103
103
104 # Don't check the result code because it fails on heptapod, but
104 # Don't check the result code because it fails on heptapod, but
105 # something is able to convert to color anyway.
105 # something is able to convert to color anyway.
106 win32.enablevtmode()
106 win32.enablevtmode()
107 finally:
107 finally:
108 sys.path = sys.path[:-1]
108 sys.path = sys.path[:-1]
109
109
110 pygmentspresent = True
110 pygmentspresent = True
111 difflexer = lexers.DiffLexer()
111 difflexer = lexers.DiffLexer()
112 terminal256formatter = formatters.Terminal256Formatter()
112 terminal256formatter = formatters.Terminal256Formatter()
113 except ImportError:
113 except ImportError:
114 pass
114 pass
115
115
116 if pygmentspresent:
116 if pygmentspresent:
117
117
118 class TestRunnerStyle(style.Style):
118 class TestRunnerStyle(style.Style):
119 default_style = ""
119 default_style = ""
120 skipped = token.string_to_tokentype("Token.Generic.Skipped")
120 skipped = token.string_to_tokentype("Token.Generic.Skipped")
121 failed = token.string_to_tokentype("Token.Generic.Failed")
121 failed = token.string_to_tokentype("Token.Generic.Failed")
122 skippedname = token.string_to_tokentype("Token.Generic.SName")
122 skippedname = token.string_to_tokentype("Token.Generic.SName")
123 failedname = token.string_to_tokentype("Token.Generic.FName")
123 failedname = token.string_to_tokentype("Token.Generic.FName")
124 styles = {
124 styles = {
125 skipped: '#e5e5e5',
125 skipped: '#e5e5e5',
126 skippedname: '#00ffff',
126 skippedname: '#00ffff',
127 failed: '#7f0000',
127 failed: '#7f0000',
128 failedname: '#ff0000',
128 failedname: '#ff0000',
129 }
129 }
130
130
131 class TestRunnerLexer(lexer.RegexLexer):
131 class TestRunnerLexer(lexer.RegexLexer):
132 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
132 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
133 tokens = {
133 tokens = {
134 'root': [
134 'root': [
135 (r'^Skipped', token.Generic.Skipped, 'skipped'),
135 (r'^Skipped', token.Generic.Skipped, 'skipped'),
136 (r'^Failed ', token.Generic.Failed, 'failed'),
136 (r'^Failed ', token.Generic.Failed, 'failed'),
137 (r'^ERROR: ', token.Generic.Failed, 'failed'),
137 (r'^ERROR: ', token.Generic.Failed, 'failed'),
138 ],
138 ],
139 'skipped': [
139 'skipped': [
140 (testpattern, token.Generic.SName),
140 (testpattern, token.Generic.SName),
141 (r':.*', token.Generic.Skipped),
141 (r':.*', token.Generic.Skipped),
142 ],
142 ],
143 'failed': [
143 'failed': [
144 (testpattern, token.Generic.FName),
144 (testpattern, token.Generic.FName),
145 (r'(:| ).*', token.Generic.Failed),
145 (r'(:| ).*', token.Generic.Failed),
146 ],
146 ],
147 }
147 }
148
148
149 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
149 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
150 runnerlexer = TestRunnerLexer()
150 runnerlexer = TestRunnerLexer()
151
151
152 origenviron = os.environ.copy()
152 origenviron = os.environ.copy()
153
153
154 if sys.version_info > (3, 5, 0):
154 if sys.version_info > (3, 5, 0):
155 PYTHON3 = True
155 PYTHON3 = True
156 xrange = range # we use xrange in one place, and we'd rather not use range
156 xrange = range # we use xrange in one place, and we'd rather not use range
157
157
158 def _sys2bytes(p):
158 def _sys2bytes(p):
159 if p is None:
159 if p is None:
160 return p
160 return p
161 return p.encode('utf-8')
161 return p.encode('utf-8')
162
162
163 def _bytes2sys(p):
163 def _bytes2sys(p):
164 if p is None:
164 if p is None:
165 return p
165 return p
166 return p.decode('utf-8')
166 return p.decode('utf-8')
167
167
168 osenvironb = getattr(os, 'environb', None)
168 osenvironb = getattr(os, 'environb', None)
169 if osenvironb is None:
169 if osenvironb is None:
170 # Windows lacks os.environb, for instance. A proxy over the real thing
170 # Windows lacks os.environb, for instance. A proxy over the real thing
171 # instead of a copy allows the environment to be updated via bytes on
171 # instead of a copy allows the environment to be updated via bytes on
172 # all platforms.
172 # all platforms.
173 class environbytes(object):
173 class environbytes(object):
174 def __init__(self, strenv):
174 def __init__(self, strenv):
175 self.__len__ = strenv.__len__
175 self.__len__ = strenv.__len__
176 self.clear = strenv.clear
176 self.clear = strenv.clear
177 self._strenv = strenv
177 self._strenv = strenv
178
178
179 def __getitem__(self, k):
179 def __getitem__(self, k):
180 v = self._strenv.__getitem__(_bytes2sys(k))
180 v = self._strenv.__getitem__(_bytes2sys(k))
181 return _sys2bytes(v)
181 return _sys2bytes(v)
182
182
183 def __setitem__(self, k, v):
183 def __setitem__(self, k, v):
184 self._strenv.__setitem__(_bytes2sys(k), _bytes2sys(v))
184 self._strenv.__setitem__(_bytes2sys(k), _bytes2sys(v))
185
185
186 def __delitem__(self, k):
186 def __delitem__(self, k):
187 self._strenv.__delitem__(_bytes2sys(k))
187 self._strenv.__delitem__(_bytes2sys(k))
188
188
189 def __contains__(self, k):
189 def __contains__(self, k):
190 return self._strenv.__contains__(_bytes2sys(k))
190 return self._strenv.__contains__(_bytes2sys(k))
191
191
192 def __iter__(self):
192 def __iter__(self):
193 return iter([_sys2bytes(k) for k in iter(self._strenv)])
193 return iter([_sys2bytes(k) for k in iter(self._strenv)])
194
194
195 def get(self, k, default=None):
195 def get(self, k, default=None):
196 v = self._strenv.get(_bytes2sys(k), _bytes2sys(default))
196 v = self._strenv.get(_bytes2sys(k), _bytes2sys(default))
197 return _sys2bytes(v)
197 return _sys2bytes(v)
198
198
199 def pop(self, k, default=None):
199 def pop(self, k, default=None):
200 v = self._strenv.pop(_bytes2sys(k), _bytes2sys(default))
200 v = self._strenv.pop(_bytes2sys(k), _bytes2sys(default))
201 return _sys2bytes(v)
201 return _sys2bytes(v)
202
202
203 osenvironb = environbytes(os.environ)
203 osenvironb = environbytes(os.environ)
204
204
205 getcwdb = getattr(os, 'getcwdb')
205 getcwdb = getattr(os, 'getcwdb')
206 if not getcwdb or os.name == 'nt':
206 if not getcwdb or os.name == 'nt':
207 getcwdb = lambda: _sys2bytes(os.getcwd())
207 getcwdb = lambda: _sys2bytes(os.getcwd())
208
208
209 elif sys.version_info >= (3, 0, 0):
209 elif sys.version_info >= (3, 0, 0):
210 print(
210 print(
211 '%s is only supported on Python 3.5+ and 2.7, not %s'
211 '%s is only supported on Python 3.5+ and 2.7, not %s'
212 % (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))
212 % (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))
213 )
213 )
214 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
214 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
215 else:
215 else:
216 PYTHON3 = False
216 PYTHON3 = False
217
217
218 # In python 2.x, path operations are generally done using
218 # In python 2.x, path operations are generally done using
219 # bytestrings by default, so we don't have to do any extra
219 # bytestrings by default, so we don't have to do any extra
220 # fiddling there. We define the wrapper functions anyway just to
220 # fiddling there. We define the wrapper functions anyway just to
221 # help keep code consistent between platforms.
221 # help keep code consistent between platforms.
222 def _sys2bytes(p):
222 def _sys2bytes(p):
223 return p
223 return p
224
224
225 _bytes2sys = _sys2bytes
225 _bytes2sys = _sys2bytes
226 osenvironb = os.environ
226 osenvironb = os.environ
227 getcwdb = os.getcwd
227 getcwdb = os.getcwd
228
228
229 # For Windows support
229 # For Windows support
230 wifexited = getattr(os, "WIFEXITED", lambda x: False)
230 wifexited = getattr(os, "WIFEXITED", lambda x: False)
231
231
232 # Whether to use IPv6
232 # Whether to use IPv6
233 def checksocketfamily(name, port=20058):
233 def checksocketfamily(name, port=20058):
234 """return true if we can listen on localhost using family=name
234 """return true if we can listen on localhost using family=name
235
235
236 name should be either 'AF_INET', or 'AF_INET6'.
236 name should be either 'AF_INET', or 'AF_INET6'.
237 port being used is okay - EADDRINUSE is considered as successful.
237 port being used is okay - EADDRINUSE is considered as successful.
238 """
238 """
239 family = getattr(socket, name, None)
239 family = getattr(socket, name, None)
240 if family is None:
240 if family is None:
241 return False
241 return False
242 try:
242 try:
243 s = socket.socket(family, socket.SOCK_STREAM)
243 s = socket.socket(family, socket.SOCK_STREAM)
244 s.bind(('localhost', port))
244 s.bind(('localhost', port))
245 s.close()
245 s.close()
246 return True
246 return True
247 except socket.error as exc:
247 except socket.error as exc:
248 if exc.errno == errno.EADDRINUSE:
248 if exc.errno == errno.EADDRINUSE:
249 return True
249 return True
250 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
250 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
251 return False
251 return False
252 else:
252 else:
253 raise
253 raise
254 else:
254 else:
255 return False
255 return False
256
256
257
257
258 # useipv6 will be set by parseargs
258 # useipv6 will be set by parseargs
259 useipv6 = None
259 useipv6 = None
260
260
261
261
262 def checkportisavailable(port):
262 def checkportisavailable(port):
263 """return true if a port seems free to bind on localhost"""
263 """return true if a port seems free to bind on localhost"""
264 if useipv6:
264 if useipv6:
265 family = socket.AF_INET6
265 family = socket.AF_INET6
266 else:
266 else:
267 family = socket.AF_INET
267 family = socket.AF_INET
268 try:
268 try:
269 with contextlib.closing(socket.socket(family, socket.SOCK_STREAM)) as s:
269 with contextlib.closing(socket.socket(family, socket.SOCK_STREAM)) as s:
270 s.bind(('localhost', port))
270 s.bind(('localhost', port))
271 return True
271 return True
272 except socket.error as exc:
272 except socket.error as exc:
273 if os.name == 'nt' and exc.errno == errno.WSAEACCES:
273 if os.name == 'nt' and exc.errno == errno.WSAEACCES:
274 return False
274 return False
275 elif PYTHON3:
275 elif PYTHON3:
276 # TODO: make a proper exception handler after dropping py2. This
276 # TODO: make a proper exception handler after dropping py2. This
277 # works because socket.error is an alias for OSError on py3,
277 # works because socket.error is an alias for OSError on py3,
278 # which is also the baseclass of PermissionError.
278 # which is also the baseclass of PermissionError.
279 if isinstance(exc, PermissionError):
279 if isinstance(exc, PermissionError):
280 return False
280 return False
281 if exc.errno not in (
281 if exc.errno not in (
282 errno.EADDRINUSE,
282 errno.EADDRINUSE,
283 errno.EADDRNOTAVAIL,
283 errno.EADDRNOTAVAIL,
284 errno.EPROTONOSUPPORT,
284 errno.EPROTONOSUPPORT,
285 ):
285 ):
286 raise
286 raise
287 return False
287 return False
288
288
289
289
290 closefds = os.name == 'posix'
290 closefds = os.name == 'posix'
291
291
292
292
293 def Popen4(cmd, wd, timeout, env=None):
293 def Popen4(cmd, wd, timeout, env=None):
294 processlock.acquire()
294 processlock.acquire()
295 p = subprocess.Popen(
295 p = subprocess.Popen(
296 _bytes2sys(cmd),
296 _bytes2sys(cmd),
297 shell=True,
297 shell=True,
298 bufsize=-1,
298 bufsize=-1,
299 cwd=_bytes2sys(wd),
299 cwd=_bytes2sys(wd),
300 env=env,
300 env=env,
301 close_fds=closefds,
301 close_fds=closefds,
302 stdin=subprocess.PIPE,
302 stdin=subprocess.PIPE,
303 stdout=subprocess.PIPE,
303 stdout=subprocess.PIPE,
304 stderr=subprocess.STDOUT,
304 stderr=subprocess.STDOUT,
305 )
305 )
306 processlock.release()
306 processlock.release()
307
307
308 p.fromchild = p.stdout
308 p.fromchild = p.stdout
309 p.tochild = p.stdin
309 p.tochild = p.stdin
310 p.childerr = p.stderr
310 p.childerr = p.stderr
311
311
312 p.timeout = False
312 p.timeout = False
313 if timeout:
313 if timeout:
314
314
315 def t():
315 def t():
316 start = time.time()
316 start = time.time()
317 while time.time() - start < timeout and p.returncode is None:
317 while time.time() - start < timeout and p.returncode is None:
318 time.sleep(0.1)
318 time.sleep(0.1)
319 p.timeout = True
319 p.timeout = True
320 vlog('# Timout reached for process %d' % p.pid)
320 vlog('# Timout reached for process %d' % p.pid)
321 if p.returncode is None:
321 if p.returncode is None:
322 terminate(p)
322 terminate(p)
323
323
324 threading.Thread(target=t).start()
324 threading.Thread(target=t).start()
325
325
326 return p
326 return p
327
327
328
328
329 if sys.executable:
329 if sys.executable:
330 sysexecutable = sys.executable
330 sysexecutable = sys.executable
331 elif os.environ.get('PYTHONEXECUTABLE'):
331 elif os.environ.get('PYTHONEXECUTABLE'):
332 sysexecutable = os.environ['PYTHONEXECUTABLE']
332 sysexecutable = os.environ['PYTHONEXECUTABLE']
333 elif os.environ.get('PYTHON'):
333 elif os.environ.get('PYTHON'):
334 sysexecutable = os.environ['PYTHON']
334 sysexecutable = os.environ['PYTHON']
335 else:
335 else:
336 raise AssertionError('Could not find Python interpreter')
336 raise AssertionError('Could not find Python interpreter')
337
337
338 PYTHON = _sys2bytes(sysexecutable.replace('\\', '/'))
338 PYTHON = _sys2bytes(sysexecutable.replace('\\', '/'))
339 IMPL_PATH = b'PYTHONPATH'
339 IMPL_PATH = b'PYTHONPATH'
340 if 'java' in sys.platform:
340 if 'java' in sys.platform:
341 IMPL_PATH = b'JYTHONPATH'
341 IMPL_PATH = b'JYTHONPATH'
342
342
343 default_defaults = {
343 default_defaults = {
344 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
344 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
345 'timeout': ('HGTEST_TIMEOUT', 360),
345 'timeout': ('HGTEST_TIMEOUT', 360),
346 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 1500),
346 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 1500),
347 'port': ('HGTEST_PORT', 20059),
347 'port': ('HGTEST_PORT', 20059),
348 'shell': ('HGTEST_SHELL', 'sh'),
348 'shell': ('HGTEST_SHELL', 'sh'),
349 }
349 }
350
350
351 defaults = default_defaults.copy()
351 defaults = default_defaults.copy()
352
352
353
353
354 def canonpath(path):
354 def canonpath(path):
355 return os.path.realpath(os.path.expanduser(path))
355 return os.path.realpath(os.path.expanduser(path))
356
356
357
357
358 def parselistfiles(files, listtype, warn=True):
358 def parselistfiles(files, listtype, warn=True):
359 entries = dict()
359 entries = dict()
360 for filename in files:
360 for filename in files:
361 try:
361 try:
362 path = os.path.expanduser(os.path.expandvars(filename))
362 path = os.path.expanduser(os.path.expandvars(filename))
363 f = open(path, "rb")
363 f = open(path, "rb")
364 except IOError as err:
364 except IOError as err:
365 if err.errno != errno.ENOENT:
365 if err.errno != errno.ENOENT:
366 raise
366 raise
367 if warn:
367 if warn:
368 print("warning: no such %s file: %s" % (listtype, filename))
368 print("warning: no such %s file: %s" % (listtype, filename))
369 continue
369 continue
370
370
371 for line in f.readlines():
371 for line in f.readlines():
372 line = line.split(b'#', 1)[0].strip()
372 line = line.split(b'#', 1)[0].strip()
373 if line:
373 if line:
374 # Ensure path entries are compatible with os.path.relpath()
374 # Ensure path entries are compatible with os.path.relpath()
375 entries[os.path.normpath(line)] = filename
375 entries[os.path.normpath(line)] = filename
376
376
377 f.close()
377 f.close()
378 return entries
378 return entries
379
379
380
380
381 def parsettestcases(path):
381 def parsettestcases(path):
382 """read a .t test file, return a set of test case names
382 """read a .t test file, return a set of test case names
383
383
384 If path does not exist, return an empty set.
384 If path does not exist, return an empty set.
385 """
385 """
386 cases = []
386 cases = []
387 try:
387 try:
388 with open(path, 'rb') as f:
388 with open(path, 'rb') as f:
389 for l in f:
389 for l in f:
390 if l.startswith(b'#testcases '):
390 if l.startswith(b'#testcases '):
391 cases.append(sorted(l[11:].split()))
391 cases.append(sorted(l[11:].split()))
392 except IOError as ex:
392 except IOError as ex:
393 if ex.errno != errno.ENOENT:
393 if ex.errno != errno.ENOENT:
394 raise
394 raise
395 return cases
395 return cases
396
396
397
397
398 def getparser():
398 def getparser():
399 """Obtain the OptionParser used by the CLI."""
399 """Obtain the OptionParser used by the CLI."""
400 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
400 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
401
401
402 selection = parser.add_argument_group('Test Selection')
402 selection = parser.add_argument_group('Test Selection')
403 selection.add_argument(
403 selection.add_argument(
404 '--allow-slow-tests',
404 '--allow-slow-tests',
405 action='store_true',
405 action='store_true',
406 help='allow extremely slow tests',
406 help='allow extremely slow tests',
407 )
407 )
408 selection.add_argument(
408 selection.add_argument(
409 "--blacklist",
409 "--blacklist",
410 action="append",
410 action="append",
411 help="skip tests listed in the specified blacklist file",
411 help="skip tests listed in the specified blacklist file",
412 )
412 )
413 selection.add_argument(
413 selection.add_argument(
414 "--changed",
414 "--changed",
415 help="run tests that are changed in parent rev or working directory",
415 help="run tests that are changed in parent rev or working directory",
416 )
416 )
417 selection.add_argument(
417 selection.add_argument(
418 "-k", "--keywords", help="run tests matching keywords"
418 "-k", "--keywords", help="run tests matching keywords"
419 )
419 )
420 selection.add_argument(
420 selection.add_argument(
421 "-r", "--retest", action="store_true", help="retest failed tests"
421 "-r", "--retest", action="store_true", help="retest failed tests"
422 )
422 )
423 selection.add_argument(
423 selection.add_argument(
424 "--test-list",
424 "--test-list",
425 action="append",
425 action="append",
426 help="read tests to run from the specified file",
426 help="read tests to run from the specified file",
427 )
427 )
428 selection.add_argument(
428 selection.add_argument(
429 "--whitelist",
429 "--whitelist",
430 action="append",
430 action="append",
431 help="always run tests listed in the specified whitelist file",
431 help="always run tests listed in the specified whitelist file",
432 )
432 )
433 selection.add_argument(
433 selection.add_argument(
434 'tests', metavar='TESTS', nargs='*', help='Tests to run'
434 'tests', metavar='TESTS', nargs='*', help='Tests to run'
435 )
435 )
436
436
437 harness = parser.add_argument_group('Test Harness Behavior')
437 harness = parser.add_argument_group('Test Harness Behavior')
438 harness.add_argument(
438 harness.add_argument(
439 '--bisect-repo',
439 '--bisect-repo',
440 metavar='bisect_repo',
440 metavar='bisect_repo',
441 help=(
441 help=(
442 "Path of a repo to bisect. Use together with " "--known-good-rev"
442 "Path of a repo to bisect. Use together with " "--known-good-rev"
443 ),
443 ),
444 )
444 )
445 harness.add_argument(
445 harness.add_argument(
446 "-d",
446 "-d",
447 "--debug",
447 "--debug",
448 action="store_true",
448 action="store_true",
449 help="debug mode: write output of test scripts to console"
449 help="debug mode: write output of test scripts to console"
450 " rather than capturing and diffing it (disables timeout)",
450 " rather than capturing and diffing it (disables timeout)",
451 )
451 )
452 harness.add_argument(
452 harness.add_argument(
453 "-f",
453 "-f",
454 "--first",
454 "--first",
455 action="store_true",
455 action="store_true",
456 help="exit on the first test failure",
456 help="exit on the first test failure",
457 )
457 )
458 harness.add_argument(
458 harness.add_argument(
459 "-i",
459 "-i",
460 "--interactive",
460 "--interactive",
461 action="store_true",
461 action="store_true",
462 help="prompt to accept changed output",
462 help="prompt to accept changed output",
463 )
463 )
464 harness.add_argument(
464 harness.add_argument(
465 "-j",
465 "-j",
466 "--jobs",
466 "--jobs",
467 type=int,
467 type=int,
468 help="number of jobs to run in parallel"
468 help="number of jobs to run in parallel"
469 " (default: $%s or %d)" % defaults['jobs'],
469 " (default: $%s or %d)" % defaults['jobs'],
470 )
470 )
471 harness.add_argument(
471 harness.add_argument(
472 "--keep-tmpdir",
472 "--keep-tmpdir",
473 action="store_true",
473 action="store_true",
474 help="keep temporary directory after running tests",
474 help="keep temporary directory after running tests",
475 )
475 )
476 harness.add_argument(
476 harness.add_argument(
477 '--known-good-rev',
477 '--known-good-rev',
478 metavar="known_good_rev",
478 metavar="known_good_rev",
479 help=(
479 help=(
480 "Automatically bisect any failures using this "
480 "Automatically bisect any failures using this "
481 "revision as a known-good revision."
481 "revision as a known-good revision."
482 ),
482 ),
483 )
483 )
484 harness.add_argument(
484 harness.add_argument(
485 "--list-tests",
485 "--list-tests",
486 action="store_true",
486 action="store_true",
487 help="list tests instead of running them",
487 help="list tests instead of running them",
488 )
488 )
489 harness.add_argument(
489 harness.add_argument(
490 "--loop", action="store_true", help="loop tests repeatedly"
490 "--loop", action="store_true", help="loop tests repeatedly"
491 )
491 )
492 harness.add_argument(
492 harness.add_argument(
493 '--random', action="store_true", help='run tests in random order'
493 '--random', action="store_true", help='run tests in random order'
494 )
494 )
495 harness.add_argument(
495 harness.add_argument(
496 '--order-by-runtime',
496 '--order-by-runtime',
497 action="store_true",
497 action="store_true",
498 help='run slowest tests first, according to .testtimes',
498 help='run slowest tests first, according to .testtimes',
499 )
499 )
500 harness.add_argument(
500 harness.add_argument(
501 "-p",
501 "-p",
502 "--port",
502 "--port",
503 type=int,
503 type=int,
504 help="port on which servers should listen"
504 help="port on which servers should listen"
505 " (default: $%s or %d)" % defaults['port'],
505 " (default: $%s or %d)" % defaults['port'],
506 )
506 )
507 harness.add_argument(
507 harness.add_argument(
508 '--profile-runner',
508 '--profile-runner',
509 action='store_true',
509 action='store_true',
510 help='run statprof on run-tests',
510 help='run statprof on run-tests',
511 )
511 )
512 harness.add_argument(
512 harness.add_argument(
513 "-R", "--restart", action="store_true", help="restart at last error"
513 "-R", "--restart", action="store_true", help="restart at last error"
514 )
514 )
515 harness.add_argument(
515 harness.add_argument(
516 "--runs-per-test",
516 "--runs-per-test",
517 type=int,
517 type=int,
518 dest="runs_per_test",
518 dest="runs_per_test",
519 help="run each test N times (default=1)",
519 help="run each test N times (default=1)",
520 default=1,
520 default=1,
521 )
521 )
522 harness.add_argument(
522 harness.add_argument(
523 "--shell", help="shell to use (default: $%s or %s)" % defaults['shell']
523 "--shell", help="shell to use (default: $%s or %s)" % defaults['shell']
524 )
524 )
525 harness.add_argument(
525 harness.add_argument(
526 '--showchannels', action='store_true', help='show scheduling channels'
526 '--showchannels', action='store_true', help='show scheduling channels'
527 )
527 )
528 harness.add_argument(
528 harness.add_argument(
529 "--slowtimeout",
529 "--slowtimeout",
530 type=int,
530 type=int,
531 help="kill errant slow tests after SLOWTIMEOUT seconds"
531 help="kill errant slow tests after SLOWTIMEOUT seconds"
532 " (default: $%s or %d)" % defaults['slowtimeout'],
532 " (default: $%s or %d)" % defaults['slowtimeout'],
533 )
533 )
534 harness.add_argument(
534 harness.add_argument(
535 "-t",
535 "-t",
536 "--timeout",
536 "--timeout",
537 type=int,
537 type=int,
538 help="kill errant tests after TIMEOUT seconds"
538 help="kill errant tests after TIMEOUT seconds"
539 " (default: $%s or %d)" % defaults['timeout'],
539 " (default: $%s or %d)" % defaults['timeout'],
540 )
540 )
541 harness.add_argument(
541 harness.add_argument(
542 "--tmpdir",
542 "--tmpdir",
543 help="run tests in the given temporary directory"
543 help="run tests in the given temporary directory"
544 " (implies --keep-tmpdir)",
544 " (implies --keep-tmpdir)",
545 )
545 )
546 harness.add_argument(
546 harness.add_argument(
547 "-v", "--verbose", action="store_true", help="output verbose messages"
547 "-v", "--verbose", action="store_true", help="output verbose messages"
548 )
548 )
549
549
550 hgconf = parser.add_argument_group('Mercurial Configuration')
550 hgconf = parser.add_argument_group('Mercurial Configuration')
551 hgconf.add_argument(
551 hgconf.add_argument(
552 "--chg",
552 "--chg",
553 action="store_true",
553 action="store_true",
554 help="install and use chg wrapper in place of hg",
554 help="install and use chg wrapper in place of hg",
555 )
555 )
556 hgconf.add_argument(
556 hgconf.add_argument(
557 "--chg-debug",
557 "--chg-debug",
558 action="store_true",
558 action="store_true",
559 help="show chg debug logs",
559 help="show chg debug logs",
560 )
560 )
561 hgconf.add_argument(
561 hgconf.add_argument(
562 "--rhg",
562 "--rhg",
563 action="store_true",
563 action="store_true",
564 help="install and use rhg Rust implementation in place of hg",
564 help="install and use rhg Rust implementation in place of hg",
565 )
565 )
566 hgconf.add_argument("--compiler", help="compiler to build with")
566 hgconf.add_argument("--compiler", help="compiler to build with")
567 hgconf.add_argument(
567 hgconf.add_argument(
568 '--extra-config-opt',
568 '--extra-config-opt',
569 action="append",
569 action="append",
570 default=[],
570 default=[],
571 help='set the given config opt in the test hgrc',
571 help='set the given config opt in the test hgrc',
572 )
572 )
573 hgconf.add_argument(
573 hgconf.add_argument(
574 "-l",
574 "-l",
575 "--local",
575 "--local",
576 action="store_true",
576 action="store_true",
577 help="shortcut for --with-hg=<testdir>/../hg, "
577 help="shortcut for --with-hg=<testdir>/../hg, "
578 "--with-rhg=<testdir>/../rust/target/release/rhg if --rhg is set, "
578 "--with-rhg=<testdir>/../rust/target/release/rhg if --rhg is set, "
579 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set",
579 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set",
580 )
580 )
581 hgconf.add_argument(
581 hgconf.add_argument(
582 "--ipv6",
582 "--ipv6",
583 action="store_true",
583 action="store_true",
584 help="prefer IPv6 to IPv4 for network related tests",
584 help="prefer IPv6 to IPv4 for network related tests",
585 )
585 )
586 hgconf.add_argument(
586 hgconf.add_argument(
587 "--pure",
587 "--pure",
588 action="store_true",
588 action="store_true",
589 help="use pure Python code instead of C extensions",
589 help="use pure Python code instead of C extensions",
590 )
590 )
591 hgconf.add_argument(
591 hgconf.add_argument(
592 "--rust",
592 "--rust",
593 action="store_true",
593 action="store_true",
594 help="use Rust code alongside C extensions",
594 help="use Rust code alongside C extensions",
595 )
595 )
596 hgconf.add_argument(
596 hgconf.add_argument(
597 "--no-rust",
597 "--no-rust",
598 action="store_true",
598 action="store_true",
599 help="do not use Rust code even if compiled",
599 help="do not use Rust code even if compiled",
600 )
600 )
601 hgconf.add_argument(
601 hgconf.add_argument(
602 "--with-chg",
602 "--with-chg",
603 metavar="CHG",
603 metavar="CHG",
604 help="use specified chg wrapper in place of hg",
604 help="use specified chg wrapper in place of hg",
605 )
605 )
606 hgconf.add_argument(
606 hgconf.add_argument(
607 "--with-rhg",
607 "--with-rhg",
608 metavar="RHG",
608 metavar="RHG",
609 help="use specified rhg Rust implementation in place of hg",
609 help="use specified rhg Rust implementation in place of hg",
610 )
610 )
611 hgconf.add_argument(
611 hgconf.add_argument(
612 "--with-hg",
612 "--with-hg",
613 metavar="HG",
613 metavar="HG",
614 help="test using specified hg script rather than a "
614 help="test using specified hg script rather than a "
615 "temporary installation",
615 "temporary installation",
616 )
616 )
617
617
618 reporting = parser.add_argument_group('Results Reporting')
618 reporting = parser.add_argument_group('Results Reporting')
619 reporting.add_argument(
619 reporting.add_argument(
620 "-C",
620 "-C",
621 "--annotate",
621 "--annotate",
622 action="store_true",
622 action="store_true",
623 help="output files annotated with coverage",
623 help="output files annotated with coverage",
624 )
624 )
625 reporting.add_argument(
625 reporting.add_argument(
626 "--color",
626 "--color",
627 choices=["always", "auto", "never"],
627 choices=["always", "auto", "never"],
628 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
628 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
629 help="colorisation: always|auto|never (default: auto)",
629 help="colorisation: always|auto|never (default: auto)",
630 )
630 )
631 reporting.add_argument(
631 reporting.add_argument(
632 "-c",
632 "-c",
633 "--cover",
633 "--cover",
634 action="store_true",
634 action="store_true",
635 help="print a test coverage report",
635 help="print a test coverage report",
636 )
636 )
637 reporting.add_argument(
637 reporting.add_argument(
638 '--exceptions',
638 '--exceptions',
639 action='store_true',
639 action='store_true',
640 help='log all exceptions and generate an exception report',
640 help='log all exceptions and generate an exception report',
641 )
641 )
642 reporting.add_argument(
642 reporting.add_argument(
643 "-H",
643 "-H",
644 "--htmlcov",
644 "--htmlcov",
645 action="store_true",
645 action="store_true",
646 help="create an HTML report of the coverage of the files",
646 help="create an HTML report of the coverage of the files",
647 )
647 )
648 reporting.add_argument(
648 reporting.add_argument(
649 "--json",
649 "--json",
650 action="store_true",
650 action="store_true",
651 help="store test result data in 'report.json' file",
651 help="store test result data in 'report.json' file",
652 )
652 )
653 reporting.add_argument(
653 reporting.add_argument(
654 "--outputdir",
654 "--outputdir",
655 help="directory to write error logs to (default=test directory)",
655 help="directory to write error logs to (default=test directory)",
656 )
656 )
657 reporting.add_argument(
657 reporting.add_argument(
658 "-n", "--nodiff", action="store_true", help="skip showing test changes"
658 "-n", "--nodiff", action="store_true", help="skip showing test changes"
659 )
659 )
660 reporting.add_argument(
660 reporting.add_argument(
661 "-S",
661 "-S",
662 "--noskips",
662 "--noskips",
663 action="store_true",
663 action="store_true",
664 help="don't report skip tests verbosely",
664 help="don't report skip tests verbosely",
665 )
665 )
666 reporting.add_argument(
666 reporting.add_argument(
667 "--time", action="store_true", help="time how long each test takes"
667 "--time", action="store_true", help="time how long each test takes"
668 )
668 )
669 reporting.add_argument("--view", help="external diff viewer")
669 reporting.add_argument("--view", help="external diff viewer")
670 reporting.add_argument(
670 reporting.add_argument(
671 "--xunit", help="record xunit results at specified path"
671 "--xunit", help="record xunit results at specified path"
672 )
672 )
673
673
674 for option, (envvar, default) in defaults.items():
674 for option, (envvar, default) in defaults.items():
675 defaults[option] = type(default)(os.environ.get(envvar, default))
675 defaults[option] = type(default)(os.environ.get(envvar, default))
676 parser.set_defaults(**defaults)
676 parser.set_defaults(**defaults)
677
677
678 return parser
678 return parser
679
679
680
680
681 def parseargs(args, parser):
681 def parseargs(args, parser):
682 """Parse arguments with our OptionParser and validate results."""
682 """Parse arguments with our OptionParser and validate results."""
683 options = parser.parse_args(args)
683 options = parser.parse_args(args)
684
684
685 # jython is always pure
685 # jython is always pure
686 if 'java' in sys.platform or '__pypy__' in sys.modules:
686 if 'java' in sys.platform or '__pypy__' in sys.modules:
687 options.pure = True
687 options.pure = True
688
688
689 if platform.python_implementation() != 'CPython' and options.rust:
689 if platform.python_implementation() != 'CPython' and options.rust:
690 parser.error('Rust extensions are only available with CPython')
690 parser.error('Rust extensions are only available with CPython')
691
691
692 if options.pure and options.rust:
692 if options.pure and options.rust:
693 parser.error('--rust cannot be used with --pure')
693 parser.error('--rust cannot be used with --pure')
694
694
695 if options.rust and options.no_rust:
695 if options.rust and options.no_rust:
696 parser.error('--rust cannot be used with --no-rust')
696 parser.error('--rust cannot be used with --no-rust')
697
697
698 if options.local:
698 if options.local:
699 if options.with_hg or options.with_rhg or options.with_chg:
699 if options.with_hg or options.with_rhg or options.with_chg:
700 parser.error(
700 parser.error(
701 '--local cannot be used with --with-hg or --with-rhg or --with-chg'
701 '--local cannot be used with --with-hg or --with-rhg or --with-chg'
702 )
702 )
703 testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
703 testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
704 reporootdir = os.path.dirname(testdir)
704 reporootdir = os.path.dirname(testdir)
705 pathandattrs = [(b'hg', 'with_hg')]
705 pathandattrs = [(b'hg', 'with_hg')]
706 if options.chg:
706 if options.chg:
707 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
707 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
708 if options.rhg:
708 if options.rhg:
709 pathandattrs.append((b'rust/target/release/rhg', 'with_rhg'))
709 pathandattrs.append((b'rust/target/release/rhg', 'with_rhg'))
710 for relpath, attr in pathandattrs:
710 for relpath, attr in pathandattrs:
711 binpath = os.path.join(reporootdir, relpath)
711 binpath = os.path.join(reporootdir, relpath)
712 if os.name != 'nt' and not os.access(binpath, os.X_OK):
712 if os.name != 'nt' and not os.access(binpath, os.X_OK):
713 parser.error(
713 parser.error(
714 '--local specified, but %r not found or '
714 '--local specified, but %r not found or '
715 'not executable' % binpath
715 'not executable' % binpath
716 )
716 )
717 setattr(options, attr, _bytes2sys(binpath))
717 setattr(options, attr, _bytes2sys(binpath))
718
718
719 if options.with_hg:
719 if options.with_hg:
720 options.with_hg = canonpath(_sys2bytes(options.with_hg))
720 options.with_hg = canonpath(_sys2bytes(options.with_hg))
721 if not (
721 if not (
722 os.path.isfile(options.with_hg)
722 os.path.isfile(options.with_hg)
723 and os.access(options.with_hg, os.X_OK)
723 and os.access(options.with_hg, os.X_OK)
724 ):
724 ):
725 parser.error('--with-hg must specify an executable hg script')
725 parser.error('--with-hg must specify an executable hg script')
726 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
726 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
727 sys.stderr.write('warning: --with-hg should specify an hg script\n')
727 sys.stderr.write('warning: --with-hg should specify an hg script\n')
728 sys.stderr.flush()
728 sys.stderr.flush()
729
729
730 if (options.chg or options.with_chg) and os.name == 'nt':
730 if (options.chg or options.with_chg) and os.name == 'nt':
731 parser.error('chg does not work on %s' % os.name)
731 parser.error('chg does not work on %s' % os.name)
732 if (options.rhg or options.with_rhg) and os.name == 'nt':
732 if (options.rhg or options.with_rhg) and os.name == 'nt':
733 parser.error('rhg does not work on %s' % os.name)
733 parser.error('rhg does not work on %s' % os.name)
734 if options.with_chg:
734 if options.with_chg:
735 options.chg = False # no installation to temporary location
735 options.chg = False # no installation to temporary location
736 options.with_chg = canonpath(_sys2bytes(options.with_chg))
736 options.with_chg = canonpath(_sys2bytes(options.with_chg))
737 if not (
737 if not (
738 os.path.isfile(options.with_chg)
738 os.path.isfile(options.with_chg)
739 and os.access(options.with_chg, os.X_OK)
739 and os.access(options.with_chg, os.X_OK)
740 ):
740 ):
741 parser.error('--with-chg must specify a chg executable')
741 parser.error('--with-chg must specify a chg executable')
742 if options.with_rhg:
742 if options.with_rhg:
743 options.rhg = False # no installation to temporary location
743 options.rhg = False # no installation to temporary location
744 options.with_rhg = canonpath(_sys2bytes(options.with_rhg))
744 options.with_rhg = canonpath(_sys2bytes(options.with_rhg))
745 if not (
745 if not (
746 os.path.isfile(options.with_rhg)
746 os.path.isfile(options.with_rhg)
747 and os.access(options.with_rhg, os.X_OK)
747 and os.access(options.with_rhg, os.X_OK)
748 ):
748 ):
749 parser.error('--with-rhg must specify a rhg executable')
749 parser.error('--with-rhg must specify a rhg executable')
750 if options.chg and options.with_hg:
750 if options.chg and options.with_hg:
751 # chg shares installation location with hg
751 # chg shares installation location with hg
752 parser.error(
752 parser.error(
753 '--chg does not work when --with-hg is specified '
753 '--chg does not work when --with-hg is specified '
754 '(use --with-chg instead)'
754 '(use --with-chg instead)'
755 )
755 )
756 if options.rhg and options.with_hg:
756 if options.rhg and options.with_hg:
757 # rhg shares installation location with hg
757 # rhg shares installation location with hg
758 parser.error(
758 parser.error(
759 '--rhg does not work when --with-hg is specified '
759 '--rhg does not work when --with-hg is specified '
760 '(use --with-rhg instead)'
760 '(use --with-rhg instead)'
761 )
761 )
762 if options.rhg and options.chg:
762 if options.rhg and options.chg:
763 parser.error('--rhg and --chg do not work together')
763 parser.error('--rhg and --chg do not work together')
764
764
765 if options.color == 'always' and not pygmentspresent:
765 if options.color == 'always' and not pygmentspresent:
766 sys.stderr.write(
766 sys.stderr.write(
767 'warning: --color=always ignored because '
767 'warning: --color=always ignored because '
768 'pygments is not installed\n'
768 'pygments is not installed\n'
769 )
769 )
770
770
771 if options.bisect_repo and not options.known_good_rev:
771 if options.bisect_repo and not options.known_good_rev:
772 parser.error("--bisect-repo cannot be used without --known-good-rev")
772 parser.error("--bisect-repo cannot be used without --known-good-rev")
773
773
774 global useipv6
774 global useipv6
775 if options.ipv6:
775 if options.ipv6:
776 useipv6 = checksocketfamily('AF_INET6')
776 useipv6 = checksocketfamily('AF_INET6')
777 else:
777 else:
778 # only use IPv6 if IPv4 is unavailable and IPv6 is available
778 # only use IPv6 if IPv4 is unavailable and IPv6 is available
779 useipv6 = (not checksocketfamily('AF_INET')) and checksocketfamily(
779 useipv6 = (not checksocketfamily('AF_INET')) and checksocketfamily(
780 'AF_INET6'
780 'AF_INET6'
781 )
781 )
782
782
783 options.anycoverage = options.cover or options.annotate or options.htmlcov
783 options.anycoverage = options.cover or options.annotate or options.htmlcov
784 if options.anycoverage:
784 if options.anycoverage:
785 try:
785 try:
786 import coverage
786 import coverage
787
787
788 covver = version.StrictVersion(coverage.__version__).version
788 covver = version.StrictVersion(coverage.__version__).version
789 if covver < (3, 3):
789 if covver < (3, 3):
790 parser.error('coverage options require coverage 3.3 or later')
790 parser.error('coverage options require coverage 3.3 or later')
791 except ImportError:
791 except ImportError:
792 parser.error('coverage options now require the coverage package')
792 parser.error('coverage options now require the coverage package')
793
793
794 if options.anycoverage and options.local:
794 if options.anycoverage and options.local:
795 # this needs some path mangling somewhere, I guess
795 # this needs some path mangling somewhere, I guess
796 parser.error(
796 parser.error(
797 "sorry, coverage options do not work when --local " "is specified"
797 "sorry, coverage options do not work when --local " "is specified"
798 )
798 )
799
799
800 if options.anycoverage and options.with_hg:
800 if options.anycoverage and options.with_hg:
801 parser.error(
801 parser.error(
802 "sorry, coverage options do not work when --with-hg " "is specified"
802 "sorry, coverage options do not work when --with-hg " "is specified"
803 )
803 )
804
804
805 global verbose
805 global verbose
806 if options.verbose:
806 if options.verbose:
807 verbose = ''
807 verbose = ''
808
808
809 if options.tmpdir:
809 if options.tmpdir:
810 options.tmpdir = canonpath(options.tmpdir)
810 options.tmpdir = canonpath(options.tmpdir)
811
811
812 if options.jobs < 1:
812 if options.jobs < 1:
813 parser.error('--jobs must be positive')
813 parser.error('--jobs must be positive')
814 if options.interactive and options.debug:
814 if options.interactive and options.debug:
815 parser.error("-i/--interactive and -d/--debug are incompatible")
815 parser.error("-i/--interactive and -d/--debug are incompatible")
816 if options.debug:
816 if options.debug:
817 if options.timeout != defaults['timeout']:
817 if options.timeout != defaults['timeout']:
818 sys.stderr.write('warning: --timeout option ignored with --debug\n')
818 sys.stderr.write('warning: --timeout option ignored with --debug\n')
819 if options.slowtimeout != defaults['slowtimeout']:
819 if options.slowtimeout != defaults['slowtimeout']:
820 sys.stderr.write(
820 sys.stderr.write(
821 'warning: --slowtimeout option ignored with --debug\n'
821 'warning: --slowtimeout option ignored with --debug\n'
822 )
822 )
823 options.timeout = 0
823 options.timeout = 0
824 options.slowtimeout = 0
824 options.slowtimeout = 0
825
825
826 if options.blacklist:
826 if options.blacklist:
827 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
827 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
828 if options.whitelist:
828 if options.whitelist:
829 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
829 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
830 else:
830 else:
831 options.whitelisted = {}
831 options.whitelisted = {}
832
832
833 if options.showchannels:
833 if options.showchannels:
834 options.nodiff = True
834 options.nodiff = True
835
835
836 return options
836 return options
837
837
838
838
839 def rename(src, dst):
839 def rename(src, dst):
840 """Like os.rename(), trade atomicity and opened files friendliness
840 """Like os.rename(), trade atomicity and opened files friendliness
841 for existing destination support.
841 for existing destination support.
842 """
842 """
843 shutil.copy(src, dst)
843 shutil.copy(src, dst)
844 os.remove(src)
844 os.remove(src)
845
845
846
846
847 def makecleanable(path):
847 def makecleanable(path):
848 """Try to fix directory permission recursively so that the entire tree
848 """Try to fix directory permission recursively so that the entire tree
849 can be deleted"""
849 can be deleted"""
850 for dirpath, dirnames, _filenames in os.walk(path, topdown=True):
850 for dirpath, dirnames, _filenames in os.walk(path, topdown=True):
851 for d in dirnames:
851 for d in dirnames:
852 p = os.path.join(dirpath, d)
852 p = os.path.join(dirpath, d)
853 try:
853 try:
854 os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx
854 os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx
855 except OSError:
855 except OSError:
856 pass
856 pass
857
857
858
858
859 _unified_diff = difflib.unified_diff
859 _unified_diff = difflib.unified_diff
860 if PYTHON3:
860 if PYTHON3:
861 import functools
861 import functools
862
862
863 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
863 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
864
864
865
865
866 def getdiff(expected, output, ref, err):
866 def getdiff(expected, output, ref, err):
867 servefail = False
867 servefail = False
868 lines = []
868 lines = []
869 for line in _unified_diff(expected, output, ref, err):
869 for line in _unified_diff(expected, output, ref, err):
870 if line.startswith(b'+++') or line.startswith(b'---'):
870 if line.startswith(b'+++') or line.startswith(b'---'):
871 line = line.replace(b'\\', b'/')
871 line = line.replace(b'\\', b'/')
872 if line.endswith(b' \n'):
872 if line.endswith(b' \n'):
873 line = line[:-2] + b'\n'
873 line = line[:-2] + b'\n'
874 lines.append(line)
874 lines.append(line)
875 if not servefail and line.startswith(
875 if not servefail and line.startswith(
876 b'+ abort: child process failed to start'
876 b'+ abort: child process failed to start'
877 ):
877 ):
878 servefail = True
878 servefail = True
879
879
880 return servefail, lines
880 return servefail, lines
881
881
882
882
883 verbose = False
883 verbose = False
884
884
885
885
886 def vlog(*msg):
886 def vlog(*msg):
887 """Log only when in verbose mode."""
887 """Log only when in verbose mode."""
888 if verbose is False:
888 if verbose is False:
889 return
889 return
890
890
891 return log(*msg)
891 return log(*msg)
892
892
893
893
894 # Bytes that break XML even in a CDATA block: control characters 0-31
894 # Bytes that break XML even in a CDATA block: control characters 0-31
895 # sans \t, \n and \r
895 # sans \t, \n and \r
896 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
896 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
897
897
898 # Match feature conditionalized output lines in the form, capturing the feature
898 # Match feature conditionalized output lines in the form, capturing the feature
899 # list in group 2, and the preceeding line output in group 1:
899 # list in group 2, and the preceeding line output in group 1:
900 #
900 #
901 # output..output (feature !)\n
901 # output..output (feature !)\n
902 optline = re.compile(br'(.*) \((.+?) !\)\n$')
902 optline = re.compile(br'(.*) \((.+?) !\)\n$')
903
903
904
904
905 def cdatasafe(data):
905 def cdatasafe(data):
906 """Make a string safe to include in a CDATA block.
906 """Make a string safe to include in a CDATA block.
907
907
908 Certain control characters are illegal in a CDATA block, and
908 Certain control characters are illegal in a CDATA block, and
909 there's no way to include a ]]> in a CDATA either. This function
909 there's no way to include a ]]> in a CDATA either. This function
910 replaces illegal bytes with ? and adds a space between the ]] so
910 replaces illegal bytes with ? and adds a space between the ]] so
911 that it won't break the CDATA block.
911 that it won't break the CDATA block.
912 """
912 """
913 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
913 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
914
914
915
915
916 def log(*msg):
916 def log(*msg):
917 """Log something to stdout.
917 """Log something to stdout.
918
918
919 Arguments are strings to print.
919 Arguments are strings to print.
920 """
920 """
921 with iolock:
921 with iolock:
922 if verbose:
922 if verbose:
923 print(verbose, end=' ')
923 print(verbose, end=' ')
924 for m in msg:
924 for m in msg:
925 print(m, end=' ')
925 print(m, end=' ')
926 print()
926 print()
927 sys.stdout.flush()
927 sys.stdout.flush()
928
928
929
929
930 def highlightdiff(line, color):
930 def highlightdiff(line, color):
931 if not color:
931 if not color:
932 return line
932 return line
933 assert pygmentspresent
933 assert pygmentspresent
934 return pygments.highlight(
934 return pygments.highlight(
935 line.decode('latin1'), difflexer, terminal256formatter
935 line.decode('latin1'), difflexer, terminal256formatter
936 ).encode('latin1')
936 ).encode('latin1')
937
937
938
938
939 def highlightmsg(msg, color):
939 def highlightmsg(msg, color):
940 if not color:
940 if not color:
941 return msg
941 return msg
942 assert pygmentspresent
942 assert pygmentspresent
943 return pygments.highlight(msg, runnerlexer, runnerformatter)
943 return pygments.highlight(msg, runnerlexer, runnerformatter)
944
944
945
945
946 def terminate(proc):
946 def terminate(proc):
947 """Terminate subprocess"""
947 """Terminate subprocess"""
948 vlog('# Terminating process %d' % proc.pid)
948 vlog('# Terminating process %d' % proc.pid)
949 try:
949 try:
950 proc.terminate()
950 proc.terminate()
951 except OSError:
951 except OSError:
952 pass
952 pass
953
953
954
954
955 def killdaemons(pidfile):
955 def killdaemons(pidfile):
956 import killdaemons as killmod
956 import killdaemons as killmod
957
957
958 return killmod.killdaemons(pidfile, tryhard=False, remove=True, logfn=vlog)
958 return killmod.killdaemons(pidfile, tryhard=False, remove=True, logfn=vlog)
959
959
960
960
961 class Test(unittest.TestCase):
961 class Test(unittest.TestCase):
962 """Encapsulates a single, runnable test.
962 """Encapsulates a single, runnable test.
963
963
964 While this class conforms to the unittest.TestCase API, it differs in that
964 While this class conforms to the unittest.TestCase API, it differs in that
965 instances need to be instantiated manually. (Typically, unittest.TestCase
965 instances need to be instantiated manually. (Typically, unittest.TestCase
966 classes are instantiated automatically by scanning modules.)
966 classes are instantiated automatically by scanning modules.)
967 """
967 """
968
968
969 # Status code reserved for skipped tests (used by hghave).
969 # Status code reserved for skipped tests (used by hghave).
970 SKIPPED_STATUS = 80
970 SKIPPED_STATUS = 80
971
971
972 def __init__(
972 def __init__(
973 self,
973 self,
974 path,
974 path,
975 outputdir,
975 outputdir,
976 tmpdir,
976 tmpdir,
977 keeptmpdir=False,
977 keeptmpdir=False,
978 debug=False,
978 debug=False,
979 first=False,
979 first=False,
980 timeout=None,
980 timeout=None,
981 startport=None,
981 startport=None,
982 extraconfigopts=None,
982 extraconfigopts=None,
983 shell=None,
983 shell=None,
984 hgcommand=None,
984 hgcommand=None,
985 slowtimeout=None,
985 slowtimeout=None,
986 usechg=False,
986 usechg=False,
987 chgdebug=False,
987 chgdebug=False,
988 useipv6=False,
988 useipv6=False,
989 ):
989 ):
990 """Create a test from parameters.
990 """Create a test from parameters.
991
991
992 path is the full path to the file defining the test.
992 path is the full path to the file defining the test.
993
993
994 tmpdir is the main temporary directory to use for this test.
994 tmpdir is the main temporary directory to use for this test.
995
995
996 keeptmpdir determines whether to keep the test's temporary directory
996 keeptmpdir determines whether to keep the test's temporary directory
997 after execution. It defaults to removal (False).
997 after execution. It defaults to removal (False).
998
998
999 debug mode will make the test execute verbosely, with unfiltered
999 debug mode will make the test execute verbosely, with unfiltered
1000 output.
1000 output.
1001
1001
1002 timeout controls the maximum run time of the test. It is ignored when
1002 timeout controls the maximum run time of the test. It is ignored when
1003 debug is True. See slowtimeout for tests with #require slow.
1003 debug is True. See slowtimeout for tests with #require slow.
1004
1004
1005 slowtimeout overrides timeout if the test has #require slow.
1005 slowtimeout overrides timeout if the test has #require slow.
1006
1006
1007 startport controls the starting port number to use for this test. Each
1007 startport controls the starting port number to use for this test. Each
1008 test will reserve 3 port numbers for execution. It is the caller's
1008 test will reserve 3 port numbers for execution. It is the caller's
1009 responsibility to allocate a non-overlapping port range to Test
1009 responsibility to allocate a non-overlapping port range to Test
1010 instances.
1010 instances.
1011
1011
1012 extraconfigopts is an iterable of extra hgrc config options. Values
1012 extraconfigopts is an iterable of extra hgrc config options. Values
1013 must have the form "key=value" (something understood by hgrc). Values
1013 must have the form "key=value" (something understood by hgrc). Values
1014 of the form "foo.key=value" will result in "[foo] key=value".
1014 of the form "foo.key=value" will result in "[foo] key=value".
1015
1015
1016 shell is the shell to execute tests in.
1016 shell is the shell to execute tests in.
1017 """
1017 """
1018 if timeout is None:
1018 if timeout is None:
1019 timeout = defaults['timeout']
1019 timeout = defaults['timeout']
1020 if startport is None:
1020 if startport is None:
1021 startport = defaults['port']
1021 startport = defaults['port']
1022 if slowtimeout is None:
1022 if slowtimeout is None:
1023 slowtimeout = defaults['slowtimeout']
1023 slowtimeout = defaults['slowtimeout']
1024 self.path = path
1024 self.path = path
1025 self.relpath = os.path.relpath(path)
1025 self.relpath = os.path.relpath(path)
1026 self.bname = os.path.basename(path)
1026 self.bname = os.path.basename(path)
1027 self.name = _bytes2sys(self.bname)
1027 self.name = _bytes2sys(self.bname)
1028 self._testdir = os.path.dirname(path)
1028 self._testdir = os.path.dirname(path)
1029 self._outputdir = outputdir
1029 self._outputdir = outputdir
1030 self._tmpname = os.path.basename(path)
1030 self._tmpname = os.path.basename(path)
1031 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
1031 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
1032
1032
1033 self._threadtmp = tmpdir
1033 self._threadtmp = tmpdir
1034 self._keeptmpdir = keeptmpdir
1034 self._keeptmpdir = keeptmpdir
1035 self._debug = debug
1035 self._debug = debug
1036 self._first = first
1036 self._first = first
1037 self._timeout = timeout
1037 self._timeout = timeout
1038 self._slowtimeout = slowtimeout
1038 self._slowtimeout = slowtimeout
1039 self._startport = startport
1039 self._startport = startport
1040 self._extraconfigopts = extraconfigopts or []
1040 self._extraconfigopts = extraconfigopts or []
1041 self._shell = _sys2bytes(shell)
1041 self._shell = _sys2bytes(shell)
1042 self._hgcommand = hgcommand or b'hg'
1042 self._hgcommand = hgcommand or b'hg'
1043 self._usechg = usechg
1043 self._usechg = usechg
1044 self._chgdebug = chgdebug
1044 self._chgdebug = chgdebug
1045 self._useipv6 = useipv6
1045 self._useipv6 = useipv6
1046
1046
1047 self._aborted = False
1047 self._aborted = False
1048 self._daemonpids = []
1048 self._daemonpids = []
1049 self._finished = None
1049 self._finished = None
1050 self._ret = None
1050 self._ret = None
1051 self._out = None
1051 self._out = None
1052 self._skipped = None
1052 self._skipped = None
1053 self._testtmp = None
1053 self._testtmp = None
1054 self._chgsockdir = None
1054 self._chgsockdir = None
1055
1055
1056 self._refout = self.readrefout()
1056 self._refout = self.readrefout()
1057
1057
1058 def readrefout(self):
1058 def readrefout(self):
1059 """read reference output"""
1059 """read reference output"""
1060 # If we're not in --debug mode and reference output file exists,
1060 # If we're not in --debug mode and reference output file exists,
1061 # check test output against it.
1061 # check test output against it.
1062 if self._debug:
1062 if self._debug:
1063 return None # to match "out is None"
1063 return None # to match "out is None"
1064 elif os.path.exists(self.refpath):
1064 elif os.path.exists(self.refpath):
1065 with open(self.refpath, 'rb') as f:
1065 with open(self.refpath, 'rb') as f:
1066 return f.read().splitlines(True)
1066 return f.read().splitlines(True)
1067 else:
1067 else:
1068 return []
1068 return []
1069
1069
1070 # needed to get base class __repr__ running
1070 # needed to get base class __repr__ running
1071 @property
1071 @property
1072 def _testMethodName(self):
1072 def _testMethodName(self):
1073 return self.name
1073 return self.name
1074
1074
1075 def __str__(self):
1075 def __str__(self):
1076 return self.name
1076 return self.name
1077
1077
1078 def shortDescription(self):
1078 def shortDescription(self):
1079 return self.name
1079 return self.name
1080
1080
1081 def setUp(self):
1081 def setUp(self):
1082 """Tasks to perform before run()."""
1082 """Tasks to perform before run()."""
1083 self._finished = False
1083 self._finished = False
1084 self._ret = None
1084 self._ret = None
1085 self._out = None
1085 self._out = None
1086 self._skipped = None
1086 self._skipped = None
1087
1087
1088 try:
1088 try:
1089 os.mkdir(self._threadtmp)
1089 os.mkdir(self._threadtmp)
1090 except OSError as e:
1090 except OSError as e:
1091 if e.errno != errno.EEXIST:
1091 if e.errno != errno.EEXIST:
1092 raise
1092 raise
1093
1093
1094 name = self._tmpname
1094 name = self._tmpname
1095 self._testtmp = os.path.join(self._threadtmp, name)
1095 self._testtmp = os.path.join(self._threadtmp, name)
1096 os.mkdir(self._testtmp)
1096 os.mkdir(self._testtmp)
1097
1097
1098 # Remove any previous output files.
1098 # Remove any previous output files.
1099 if os.path.exists(self.errpath):
1099 if os.path.exists(self.errpath):
1100 try:
1100 try:
1101 os.remove(self.errpath)
1101 os.remove(self.errpath)
1102 except OSError as e:
1102 except OSError as e:
1103 # We might have raced another test to clean up a .err
1103 # We might have raced another test to clean up a .err
1104 # file, so ignore ENOENT when removing a previous .err
1104 # file, so ignore ENOENT when removing a previous .err
1105 # file.
1105 # file.
1106 if e.errno != errno.ENOENT:
1106 if e.errno != errno.ENOENT:
1107 raise
1107 raise
1108
1108
1109 if self._usechg:
1109 if self._usechg:
1110 self._chgsockdir = os.path.join(
1110 self._chgsockdir = os.path.join(
1111 self._threadtmp, b'%s.chgsock' % name
1111 self._threadtmp, b'%s.chgsock' % name
1112 )
1112 )
1113 os.mkdir(self._chgsockdir)
1113 os.mkdir(self._chgsockdir)
1114
1114
1115 def run(self, result):
1115 def run(self, result):
1116 """Run this test and report results against a TestResult instance."""
1116 """Run this test and report results against a TestResult instance."""
1117 # This function is extremely similar to unittest.TestCase.run(). Once
1117 # This function is extremely similar to unittest.TestCase.run(). Once
1118 # we require Python 2.7 (or at least its version of unittest), this
1118 # we require Python 2.7 (or at least its version of unittest), this
1119 # function can largely go away.
1119 # function can largely go away.
1120 self._result = result
1120 self._result = result
1121 result.startTest(self)
1121 result.startTest(self)
1122 try:
1122 try:
1123 try:
1123 try:
1124 self.setUp()
1124 self.setUp()
1125 except (KeyboardInterrupt, SystemExit):
1125 except (KeyboardInterrupt, SystemExit):
1126 self._aborted = True
1126 self._aborted = True
1127 raise
1127 raise
1128 except Exception:
1128 except Exception:
1129 result.addError(self, sys.exc_info())
1129 result.addError(self, sys.exc_info())
1130 return
1130 return
1131
1131
1132 success = False
1132 success = False
1133 try:
1133 try:
1134 self.runTest()
1134 self.runTest()
1135 except KeyboardInterrupt:
1135 except KeyboardInterrupt:
1136 self._aborted = True
1136 self._aborted = True
1137 raise
1137 raise
1138 except unittest.SkipTest as e:
1138 except unittest.SkipTest as e:
1139 result.addSkip(self, str(e))
1139 result.addSkip(self, str(e))
1140 # The base class will have already counted this as a
1140 # The base class will have already counted this as a
1141 # test we "ran", but we want to exclude skipped tests
1141 # test we "ran", but we want to exclude skipped tests
1142 # from those we count towards those run.
1142 # from those we count towards those run.
1143 result.testsRun -= 1
1143 result.testsRun -= 1
1144 except self.failureException as e:
1144 except self.failureException as e:
1145 # This differs from unittest in that we don't capture
1145 # This differs from unittest in that we don't capture
1146 # the stack trace. This is for historical reasons and
1146 # the stack trace. This is for historical reasons and
1147 # this decision could be revisited in the future,
1147 # this decision could be revisited in the future,
1148 # especially for PythonTest instances.
1148 # especially for PythonTest instances.
1149 if result.addFailure(self, str(e)):
1149 if result.addFailure(self, str(e)):
1150 success = True
1150 success = True
1151 except Exception:
1151 except Exception:
1152 result.addError(self, sys.exc_info())
1152 result.addError(self, sys.exc_info())
1153 else:
1153 else:
1154 success = True
1154 success = True
1155
1155
1156 try:
1156 try:
1157 self.tearDown()
1157 self.tearDown()
1158 except (KeyboardInterrupt, SystemExit):
1158 except (KeyboardInterrupt, SystemExit):
1159 self._aborted = True
1159 self._aborted = True
1160 raise
1160 raise
1161 except Exception:
1161 except Exception:
1162 result.addError(self, sys.exc_info())
1162 result.addError(self, sys.exc_info())
1163 success = False
1163 success = False
1164
1164
1165 if success:
1165 if success:
1166 result.addSuccess(self)
1166 result.addSuccess(self)
1167 finally:
1167 finally:
1168 result.stopTest(self, interrupted=self._aborted)
1168 result.stopTest(self, interrupted=self._aborted)
1169
1169
1170 def runTest(self):
1170 def runTest(self):
1171 """Run this test instance.
1171 """Run this test instance.
1172
1172
1173 This will return a tuple describing the result of the test.
1173 This will return a tuple describing the result of the test.
1174 """
1174 """
1175 env = self._getenv()
1175 env = self._getenv()
1176 self._genrestoreenv(env)
1176 self._genrestoreenv(env)
1177 self._daemonpids.append(env['DAEMON_PIDS'])
1177 self._daemonpids.append(env['DAEMON_PIDS'])
1178 self._createhgrc(env['HGRCPATH'])
1178 self._createhgrc(env['HGRCPATH'])
1179
1179
1180 vlog('# Test', self.name)
1180 vlog('# Test', self.name)
1181
1181
1182 ret, out = self._run(env)
1182 ret, out = self._run(env)
1183 self._finished = True
1183 self._finished = True
1184 self._ret = ret
1184 self._ret = ret
1185 self._out = out
1185 self._out = out
1186
1186
1187 def describe(ret):
1187 def describe(ret):
1188 if ret < 0:
1188 if ret < 0:
1189 return 'killed by signal: %d' % -ret
1189 return 'killed by signal: %d' % -ret
1190 return 'returned error code %d' % ret
1190 return 'returned error code %d' % ret
1191
1191
1192 self._skipped = False
1192 self._skipped = False
1193
1193
1194 if ret == self.SKIPPED_STATUS:
1194 if ret == self.SKIPPED_STATUS:
1195 if out is None: # Debug mode, nothing to parse.
1195 if out is None: # Debug mode, nothing to parse.
1196 missing = ['unknown']
1196 missing = ['unknown']
1197 failed = None
1197 failed = None
1198 else:
1198 else:
1199 missing, failed = TTest.parsehghaveoutput(out)
1199 missing, failed = TTest.parsehghaveoutput(out)
1200
1200
1201 if not missing:
1201 if not missing:
1202 missing = ['skipped']
1202 missing = ['skipped']
1203
1203
1204 if failed:
1204 if failed:
1205 self.fail('hg have failed checking for %s' % failed[-1])
1205 self.fail('hg have failed checking for %s' % failed[-1])
1206 else:
1206 else:
1207 self._skipped = True
1207 self._skipped = True
1208 raise unittest.SkipTest(missing[-1])
1208 raise unittest.SkipTest(missing[-1])
1209 elif ret == 'timeout':
1209 elif ret == 'timeout':
1210 self.fail('timed out')
1210 self.fail('timed out')
1211 elif ret is False:
1211 elif ret is False:
1212 self.fail('no result code from test')
1212 self.fail('no result code from test')
1213 elif out != self._refout:
1213 elif out != self._refout:
1214 # Diff generation may rely on written .err file.
1214 # Diff generation may rely on written .err file.
1215 if (
1215 if (
1216 (ret != 0 or out != self._refout)
1216 (ret != 0 or out != self._refout)
1217 and not self._skipped
1217 and not self._skipped
1218 and not self._debug
1218 and not self._debug
1219 ):
1219 ):
1220 with open(self.errpath, 'wb') as f:
1220 with open(self.errpath, 'wb') as f:
1221 for line in out:
1221 for line in out:
1222 f.write(line)
1222 f.write(line)
1223
1223
1224 # The result object handles diff calculation for us.
1224 # The result object handles diff calculation for us.
1225 with firstlock:
1225 with firstlock:
1226 if self._result.addOutputMismatch(self, ret, out, self._refout):
1226 if self._result.addOutputMismatch(self, ret, out, self._refout):
1227 # change was accepted, skip failing
1227 # change was accepted, skip failing
1228 return
1228 return
1229 if self._first:
1229 if self._first:
1230 global firsterror
1230 global firsterror
1231 firsterror = True
1231 firsterror = True
1232
1232
1233 if ret:
1233 if ret:
1234 msg = 'output changed and ' + describe(ret)
1234 msg = 'output changed and ' + describe(ret)
1235 else:
1235 else:
1236 msg = 'output changed'
1236 msg = 'output changed'
1237
1237
1238 self.fail(msg)
1238 self.fail(msg)
1239 elif ret:
1239 elif ret:
1240 self.fail(describe(ret))
1240 self.fail(describe(ret))
1241
1241
1242 def tearDown(self):
1242 def tearDown(self):
1243 """Tasks to perform after run()."""
1243 """Tasks to perform after run()."""
1244 for entry in self._daemonpids:
1244 for entry in self._daemonpids:
1245 killdaemons(entry)
1245 killdaemons(entry)
1246 self._daemonpids = []
1246 self._daemonpids = []
1247
1247
1248 if self._keeptmpdir:
1248 if self._keeptmpdir:
1249 log(
1249 log(
1250 '\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s'
1250 '\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s'
1251 % (
1251 % (
1252 _bytes2sys(self._testtmp),
1252 _bytes2sys(self._testtmp),
1253 _bytes2sys(self._threadtmp),
1253 _bytes2sys(self._threadtmp),
1254 )
1254 )
1255 )
1255 )
1256 else:
1256 else:
1257 try:
1257 try:
1258 shutil.rmtree(self._testtmp)
1258 shutil.rmtree(self._testtmp)
1259 except OSError:
1259 except OSError:
1260 # unreadable directory may be left in $TESTTMP; fix permission
1260 # unreadable directory may be left in $TESTTMP; fix permission
1261 # and try again
1261 # and try again
1262 makecleanable(self._testtmp)
1262 makecleanable(self._testtmp)
1263 shutil.rmtree(self._testtmp, True)
1263 shutil.rmtree(self._testtmp, True)
1264 shutil.rmtree(self._threadtmp, True)
1264 shutil.rmtree(self._threadtmp, True)
1265
1265
1266 if self._usechg:
1266 if self._usechg:
1267 # chgservers will stop automatically after they find the socket
1267 # chgservers will stop automatically after they find the socket
1268 # files are deleted
1268 # files are deleted
1269 shutil.rmtree(self._chgsockdir, True)
1269 shutil.rmtree(self._chgsockdir, True)
1270
1270
1271 if (
1271 if (
1272 (self._ret != 0 or self._out != self._refout)
1272 (self._ret != 0 or self._out != self._refout)
1273 and not self._skipped
1273 and not self._skipped
1274 and not self._debug
1274 and not self._debug
1275 and self._out
1275 and self._out
1276 ):
1276 ):
1277 with open(self.errpath, 'wb') as f:
1277 with open(self.errpath, 'wb') as f:
1278 for line in self._out:
1278 for line in self._out:
1279 f.write(line)
1279 f.write(line)
1280
1280
1281 vlog("# Ret was:", self._ret, '(%s)' % self.name)
1281 vlog("# Ret was:", self._ret, '(%s)' % self.name)
1282
1282
1283 def _run(self, env):
1283 def _run(self, env):
1284 # This should be implemented in child classes to run tests.
1284 # This should be implemented in child classes to run tests.
1285 raise unittest.SkipTest('unknown test type')
1285 raise unittest.SkipTest('unknown test type')
1286
1286
1287 def abort(self):
1287 def abort(self):
1288 """Terminate execution of this test."""
1288 """Terminate execution of this test."""
1289 self._aborted = True
1289 self._aborted = True
1290
1290
1291 def _portmap(self, i):
1291 def _portmap(self, i):
1292 offset = b'' if i == 0 else b'%d' % i
1292 offset = b'' if i == 0 else b'%d' % i
1293 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
1293 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
1294
1294
1295 def _getreplacements(self):
1295 def _getreplacements(self):
1296 """Obtain a mapping of text replacements to apply to test output.
1296 """Obtain a mapping of text replacements to apply to test output.
1297
1297
1298 Test output needs to be normalized so it can be compared to expected
1298 Test output needs to be normalized so it can be compared to expected
1299 output. This function defines how some of that normalization will
1299 output. This function defines how some of that normalization will
1300 occur.
1300 occur.
1301 """
1301 """
1302 r = [
1302 r = [
1303 # This list should be parallel to defineport in _getenv
1303 # This list should be parallel to defineport in _getenv
1304 self._portmap(0),
1304 self._portmap(0),
1305 self._portmap(1),
1305 self._portmap(1),
1306 self._portmap(2),
1306 self._portmap(2),
1307 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
1307 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
1308 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
1308 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
1309 ]
1309 ]
1310 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
1310 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
1311
1311
1312 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1312 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1313
1313
1314 if os.path.exists(replacementfile):
1314 if os.path.exists(replacementfile):
1315 data = {}
1315 data = {}
1316 with open(replacementfile, mode='rb') as source:
1316 with open(replacementfile, mode='rb') as source:
1317 # the intermediate 'compile' step help with debugging
1317 # the intermediate 'compile' step help with debugging
1318 code = compile(source.read(), replacementfile, 'exec')
1318 code = compile(source.read(), replacementfile, 'exec')
1319 exec(code, data)
1319 exec(code, data)
1320 for value in data.get('substitutions', ()):
1320 for value in data.get('substitutions', ()):
1321 if len(value) != 2:
1321 if len(value) != 2:
1322 msg = 'malformatted substitution in %s: %r'
1322 msg = 'malformatted substitution in %s: %r'
1323 msg %= (replacementfile, value)
1323 msg %= (replacementfile, value)
1324 raise ValueError(msg)
1324 raise ValueError(msg)
1325 r.append(value)
1325 r.append(value)
1326 return r
1326 return r
1327
1327
1328 def _escapepath(self, p):
1328 def _escapepath(self, p):
1329 if os.name == 'nt':
1329 if os.name == 'nt':
1330 return b''.join(
1330 return b''.join(
1331 c.isalpha()
1331 c.isalpha()
1332 and b'[%s%s]' % (c.lower(), c.upper())
1332 and b'[%s%s]' % (c.lower(), c.upper())
1333 or c in b'/\\'
1333 or c in b'/\\'
1334 and br'[/\\]'
1334 and br'[/\\]'
1335 or c.isdigit()
1335 or c.isdigit()
1336 and c
1336 and c
1337 or b'\\' + c
1337 or b'\\' + c
1338 for c in [p[i : i + 1] for i in range(len(p))]
1338 for c in [p[i : i + 1] for i in range(len(p))]
1339 )
1339 )
1340 else:
1340 else:
1341 return re.escape(p)
1341 return re.escape(p)
1342
1342
1343 def _localip(self):
1343 def _localip(self):
1344 if self._useipv6:
1344 if self._useipv6:
1345 return b'::1'
1345 return b'::1'
1346 else:
1346 else:
1347 return b'127.0.0.1'
1347 return b'127.0.0.1'
1348
1348
1349 def _genrestoreenv(self, testenv):
1349 def _genrestoreenv(self, testenv):
1350 """Generate a script that can be used by tests to restore the original
1350 """Generate a script that can be used by tests to restore the original
1351 environment."""
1351 environment."""
1352 # Put the restoreenv script inside self._threadtmp
1352 # Put the restoreenv script inside self._threadtmp
1353 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1353 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1354 testenv['HGTEST_RESTOREENV'] = _bytes2sys(scriptpath)
1354 testenv['HGTEST_RESTOREENV'] = _bytes2sys(scriptpath)
1355
1355
1356 # Only restore environment variable names that the shell allows
1356 # Only restore environment variable names that the shell allows
1357 # us to export.
1357 # us to export.
1358 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1358 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1359
1359
1360 # Do not restore these variables; otherwise tests would fail.
1360 # Do not restore these variables; otherwise tests would fail.
1361 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1361 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1362
1362
1363 with open(scriptpath, 'w') as envf:
1363 with open(scriptpath, 'w') as envf:
1364 for name, value in origenviron.items():
1364 for name, value in origenviron.items():
1365 if not name_regex.match(name):
1365 if not name_regex.match(name):
1366 # Skip environment variables with unusual names not
1366 # Skip environment variables with unusual names not
1367 # allowed by most shells.
1367 # allowed by most shells.
1368 continue
1368 continue
1369 if name in reqnames:
1369 if name in reqnames:
1370 continue
1370 continue
1371 envf.write('%s=%s\n' % (name, shellquote(value)))
1371 envf.write('%s=%s\n' % (name, shellquote(value)))
1372
1372
1373 for name in testenv:
1373 for name in testenv:
1374 if name in origenviron or name in reqnames:
1374 if name in origenviron or name in reqnames:
1375 continue
1375 continue
1376 envf.write('unset %s\n' % (name,))
1376 envf.write('unset %s\n' % (name,))
1377
1377
1378 def _getenv(self):
1378 def _getenv(self):
1379 """Obtain environment variables to use during test execution."""
1379 """Obtain environment variables to use during test execution."""
1380
1380
1381 def defineport(i):
1381 def defineport(i):
1382 offset = '' if i == 0 else '%s' % i
1382 offset = '' if i == 0 else '%s' % i
1383 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1383 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1384
1384
1385 env = os.environ.copy()
1385 env = os.environ.copy()
1386 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1386 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1387 env['HGEMITWARNINGS'] = '1'
1387 env['HGEMITWARNINGS'] = '1'
1388 env['TESTTMP'] = _bytes2sys(self._testtmp)
1388 env['TESTTMP'] = _bytes2sys(self._testtmp)
1389 docket_id_file = os.path.join(_bytes2sys(self._testtmp), 'DOCKETID')
1390 env['HGTEST_DOCKETIDFILE'] = docket_id_file
1389 env['TESTNAME'] = self.name
1391 env['TESTNAME'] = self.name
1390 env['HOME'] = _bytes2sys(self._testtmp)
1392 env['HOME'] = _bytes2sys(self._testtmp)
1391 if os.name == 'nt':
1393 if os.name == 'nt':
1392 env['REALUSERPROFILE'] = env['USERPROFILE']
1394 env['REALUSERPROFILE'] = env['USERPROFILE']
1393 # py3.8+ ignores HOME: https://bugs.python.org/issue36264
1395 # py3.8+ ignores HOME: https://bugs.python.org/issue36264
1394 env['USERPROFILE'] = env['HOME']
1396 env['USERPROFILE'] = env['HOME']
1395 formated_timeout = _bytes2sys(b"%d" % default_defaults['timeout'][1])
1397 formated_timeout = _bytes2sys(b"%d" % default_defaults['timeout'][1])
1396 env['HGTEST_TIMEOUT_DEFAULT'] = formated_timeout
1398 env['HGTEST_TIMEOUT_DEFAULT'] = formated_timeout
1397 env['HGTEST_TIMEOUT'] = _bytes2sys(b"%d" % self._timeout)
1399 env['HGTEST_TIMEOUT'] = _bytes2sys(b"%d" % self._timeout)
1398 # This number should match portneeded in _getport
1400 # This number should match portneeded in _getport
1399 for port in xrange(3):
1401 for port in xrange(3):
1400 # This list should be parallel to _portmap in _getreplacements
1402 # This list should be parallel to _portmap in _getreplacements
1401 defineport(port)
1403 defineport(port)
1402 env["HGRCPATH"] = _bytes2sys(os.path.join(self._threadtmp, b'.hgrc'))
1404 env["HGRCPATH"] = _bytes2sys(os.path.join(self._threadtmp, b'.hgrc'))
1403 env["DAEMON_PIDS"] = _bytes2sys(
1405 env["DAEMON_PIDS"] = _bytes2sys(
1404 os.path.join(self._threadtmp, b'daemon.pids')
1406 os.path.join(self._threadtmp, b'daemon.pids')
1405 )
1407 )
1406 env["HGEDITOR"] = (
1408 env["HGEDITOR"] = (
1407 '"' + sysexecutable + '"' + ' -c "import sys; sys.exit(0)"'
1409 '"' + sysexecutable + '"' + ' -c "import sys; sys.exit(0)"'
1408 )
1410 )
1409 env["HGUSER"] = "test"
1411 env["HGUSER"] = "test"
1410 env["HGENCODING"] = "ascii"
1412 env["HGENCODING"] = "ascii"
1411 env["HGENCODINGMODE"] = "strict"
1413 env["HGENCODINGMODE"] = "strict"
1412 env["HGHOSTNAME"] = "test-hostname"
1414 env["HGHOSTNAME"] = "test-hostname"
1413 env['HGIPV6'] = str(int(self._useipv6))
1415 env['HGIPV6'] = str(int(self._useipv6))
1414 # See contrib/catapipe.py for how to use this functionality.
1416 # See contrib/catapipe.py for how to use this functionality.
1415 if 'HGTESTCATAPULTSERVERPIPE' not in env:
1417 if 'HGTESTCATAPULTSERVERPIPE' not in env:
1416 # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
1418 # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
1417 # non-test one in as a default, otherwise set to devnull
1419 # non-test one in as a default, otherwise set to devnull
1418 env['HGTESTCATAPULTSERVERPIPE'] = env.get(
1420 env['HGTESTCATAPULTSERVERPIPE'] = env.get(
1419 'HGCATAPULTSERVERPIPE', os.devnull
1421 'HGCATAPULTSERVERPIPE', os.devnull
1420 )
1422 )
1421
1423
1422 extraextensions = []
1424 extraextensions = []
1423 for opt in self._extraconfigopts:
1425 for opt in self._extraconfigopts:
1424 section, key = opt.split('.', 1)
1426 section, key = opt.split('.', 1)
1425 if section != 'extensions':
1427 if section != 'extensions':
1426 continue
1428 continue
1427 name = key.split('=', 1)[0]
1429 name = key.split('=', 1)[0]
1428 extraextensions.append(name)
1430 extraextensions.append(name)
1429
1431
1430 if extraextensions:
1432 if extraextensions:
1431 env['HGTESTEXTRAEXTENSIONS'] = ' '.join(extraextensions)
1433 env['HGTESTEXTRAEXTENSIONS'] = ' '.join(extraextensions)
1432
1434
1433 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1435 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1434 # IP addresses.
1436 # IP addresses.
1435 env['LOCALIP'] = _bytes2sys(self._localip())
1437 env['LOCALIP'] = _bytes2sys(self._localip())
1436
1438
1437 # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
1439 # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
1438 # but this is needed for testing python instances like dummyssh,
1440 # but this is needed for testing python instances like dummyssh,
1439 # dummysmtpd.py, and dumbhttp.py.
1441 # dummysmtpd.py, and dumbhttp.py.
1440 if PYTHON3 and os.name == 'nt':
1442 if PYTHON3 and os.name == 'nt':
1441 env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
1443 env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
1442
1444
1443 # Modified HOME in test environment can confuse Rust tools. So set
1445 # Modified HOME in test environment can confuse Rust tools. So set
1444 # CARGO_HOME and RUSTUP_HOME automatically if a Rust toolchain is
1446 # CARGO_HOME and RUSTUP_HOME automatically if a Rust toolchain is
1445 # present and these variables aren't already defined.
1447 # present and these variables aren't already defined.
1446 cargo_home_path = os.path.expanduser('~/.cargo')
1448 cargo_home_path = os.path.expanduser('~/.cargo')
1447 rustup_home_path = os.path.expanduser('~/.rustup')
1449 rustup_home_path = os.path.expanduser('~/.rustup')
1448
1450
1449 if os.path.exists(cargo_home_path) and b'CARGO_HOME' not in osenvironb:
1451 if os.path.exists(cargo_home_path) and b'CARGO_HOME' not in osenvironb:
1450 env['CARGO_HOME'] = cargo_home_path
1452 env['CARGO_HOME'] = cargo_home_path
1451 if (
1453 if (
1452 os.path.exists(rustup_home_path)
1454 os.path.exists(rustup_home_path)
1453 and b'RUSTUP_HOME' not in osenvironb
1455 and b'RUSTUP_HOME' not in osenvironb
1454 ):
1456 ):
1455 env['RUSTUP_HOME'] = rustup_home_path
1457 env['RUSTUP_HOME'] = rustup_home_path
1456
1458
1457 # Reset some environment variables to well-known values so that
1459 # Reset some environment variables to well-known values so that
1458 # the tests produce repeatable output.
1460 # the tests produce repeatable output.
1459 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1461 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1460 env['TZ'] = 'GMT'
1462 env['TZ'] = 'GMT'
1461 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1463 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1462 env['COLUMNS'] = '80'
1464 env['COLUMNS'] = '80'
1463 env['TERM'] = 'xterm'
1465 env['TERM'] = 'xterm'
1464
1466
1465 dropped = [
1467 dropped = [
1466 'CDPATH',
1468 'CDPATH',
1467 'CHGDEBUG',
1469 'CHGDEBUG',
1468 'EDITOR',
1470 'EDITOR',
1469 'GREP_OPTIONS',
1471 'GREP_OPTIONS',
1470 'HG',
1472 'HG',
1471 'HGMERGE',
1473 'HGMERGE',
1472 'HGPLAIN',
1474 'HGPLAIN',
1473 'HGPLAINEXCEPT',
1475 'HGPLAINEXCEPT',
1474 'HGPROF',
1476 'HGPROF',
1475 'http_proxy',
1477 'http_proxy',
1476 'no_proxy',
1478 'no_proxy',
1477 'NO_PROXY',
1479 'NO_PROXY',
1478 'PAGER',
1480 'PAGER',
1479 'VISUAL',
1481 'VISUAL',
1480 ]
1482 ]
1481
1483
1482 for k in dropped:
1484 for k in dropped:
1483 if k in env:
1485 if k in env:
1484 del env[k]
1486 del env[k]
1485
1487
1486 # unset env related to hooks
1488 # unset env related to hooks
1487 for k in list(env):
1489 for k in list(env):
1488 if k.startswith('HG_'):
1490 if k.startswith('HG_'):
1489 del env[k]
1491 del env[k]
1490
1492
1491 if self._usechg:
1493 if self._usechg:
1492 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1494 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1493 if self._chgdebug:
1495 if self._chgdebug:
1494 env['CHGDEBUG'] = 'true'
1496 env['CHGDEBUG'] = 'true'
1495
1497
1496 return env
1498 return env
1497
1499
1498 def _createhgrc(self, path):
1500 def _createhgrc(self, path):
1499 """Create an hgrc file for this test."""
1501 """Create an hgrc file for this test."""
1500 with open(path, 'wb') as hgrc:
1502 with open(path, 'wb') as hgrc:
1501 hgrc.write(b'[ui]\n')
1503 hgrc.write(b'[ui]\n')
1502 hgrc.write(b'slash = True\n')
1504 hgrc.write(b'slash = True\n')
1503 hgrc.write(b'interactive = False\n')
1505 hgrc.write(b'interactive = False\n')
1504 hgrc.write(b'detailed-exit-code = True\n')
1506 hgrc.write(b'detailed-exit-code = True\n')
1505 hgrc.write(b'merge = internal:merge\n')
1507 hgrc.write(b'merge = internal:merge\n')
1506 hgrc.write(b'mergemarkers = detailed\n')
1508 hgrc.write(b'mergemarkers = detailed\n')
1507 hgrc.write(b'promptecho = True\n')
1509 hgrc.write(b'promptecho = True\n')
1508 hgrc.write(b'timeout.warn=15\n')
1510 hgrc.write(b'timeout.warn=15\n')
1509 hgrc.write(b'[defaults]\n')
1511 hgrc.write(b'[defaults]\n')
1510 hgrc.write(b'[devel]\n')
1512 hgrc.write(b'[devel]\n')
1511 hgrc.write(b'all-warnings = true\n')
1513 hgrc.write(b'all-warnings = true\n')
1512 hgrc.write(b'default-date = 0 0\n')
1514 hgrc.write(b'default-date = 0 0\n')
1513 hgrc.write(b'[largefiles]\n')
1515 hgrc.write(b'[largefiles]\n')
1514 hgrc.write(
1516 hgrc.write(
1515 b'usercache = %s\n'
1517 b'usercache = %s\n'
1516 % (os.path.join(self._testtmp, b'.cache/largefiles'))
1518 % (os.path.join(self._testtmp, b'.cache/largefiles'))
1517 )
1519 )
1518 hgrc.write(b'[lfs]\n')
1520 hgrc.write(b'[lfs]\n')
1519 hgrc.write(
1521 hgrc.write(
1520 b'usercache = %s\n'
1522 b'usercache = %s\n'
1521 % (os.path.join(self._testtmp, b'.cache/lfs'))
1523 % (os.path.join(self._testtmp, b'.cache/lfs'))
1522 )
1524 )
1523 hgrc.write(b'[web]\n')
1525 hgrc.write(b'[web]\n')
1524 hgrc.write(b'address = localhost\n')
1526 hgrc.write(b'address = localhost\n')
1525 hgrc.write(b'ipv6 = %r\n' % self._useipv6)
1527 hgrc.write(b'ipv6 = %r\n' % self._useipv6)
1526 hgrc.write(b'server-header = testing stub value\n')
1528 hgrc.write(b'server-header = testing stub value\n')
1527
1529
1528 for opt in self._extraconfigopts:
1530 for opt in self._extraconfigopts:
1529 section, key = _sys2bytes(opt).split(b'.', 1)
1531 section, key = _sys2bytes(opt).split(b'.', 1)
1530 assert b'=' in key, (
1532 assert b'=' in key, (
1531 'extra config opt %s must ' 'have an = for assignment' % opt
1533 'extra config opt %s must ' 'have an = for assignment' % opt
1532 )
1534 )
1533 hgrc.write(b'[%s]\n%s\n' % (section, key))
1535 hgrc.write(b'[%s]\n%s\n' % (section, key))
1534
1536
1535 def fail(self, msg):
1537 def fail(self, msg):
1536 # unittest differentiates between errored and failed.
1538 # unittest differentiates between errored and failed.
1537 # Failed is denoted by AssertionError (by default at least).
1539 # Failed is denoted by AssertionError (by default at least).
1538 raise AssertionError(msg)
1540 raise AssertionError(msg)
1539
1541
1540 def _runcommand(self, cmd, env, normalizenewlines=False):
1542 def _runcommand(self, cmd, env, normalizenewlines=False):
1541 """Run command in a sub-process, capturing the output (stdout and
1543 """Run command in a sub-process, capturing the output (stdout and
1542 stderr).
1544 stderr).
1543
1545
1544 Return a tuple (exitcode, output). output is None in debug mode.
1546 Return a tuple (exitcode, output). output is None in debug mode.
1545 """
1547 """
1546 if self._debug:
1548 if self._debug:
1547 proc = subprocess.Popen(
1549 proc = subprocess.Popen(
1548 _bytes2sys(cmd),
1550 _bytes2sys(cmd),
1549 shell=True,
1551 shell=True,
1550 cwd=_bytes2sys(self._testtmp),
1552 cwd=_bytes2sys(self._testtmp),
1551 env=env,
1553 env=env,
1552 )
1554 )
1553 ret = proc.wait()
1555 ret = proc.wait()
1554 return (ret, None)
1556 return (ret, None)
1555
1557
1556 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1558 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1557
1559
1558 def cleanup():
1560 def cleanup():
1559 terminate(proc)
1561 terminate(proc)
1560 ret = proc.wait()
1562 ret = proc.wait()
1561 if ret == 0:
1563 if ret == 0:
1562 ret = signal.SIGTERM << 8
1564 ret = signal.SIGTERM << 8
1563 killdaemons(env['DAEMON_PIDS'])
1565 killdaemons(env['DAEMON_PIDS'])
1564 return ret
1566 return ret
1565
1567
1566 proc.tochild.close()
1568 proc.tochild.close()
1567
1569
1568 try:
1570 try:
1569 output = proc.fromchild.read()
1571 output = proc.fromchild.read()
1570 except KeyboardInterrupt:
1572 except KeyboardInterrupt:
1571 vlog('# Handling keyboard interrupt')
1573 vlog('# Handling keyboard interrupt')
1572 cleanup()
1574 cleanup()
1573 raise
1575 raise
1574
1576
1575 ret = proc.wait()
1577 ret = proc.wait()
1576 if wifexited(ret):
1578 if wifexited(ret):
1577 ret = os.WEXITSTATUS(ret)
1579 ret = os.WEXITSTATUS(ret)
1578
1580
1579 if proc.timeout:
1581 if proc.timeout:
1580 ret = 'timeout'
1582 ret = 'timeout'
1581
1583
1582 if ret:
1584 if ret:
1583 killdaemons(env['DAEMON_PIDS'])
1585 killdaemons(env['DAEMON_PIDS'])
1584
1586
1585 for s, r in self._getreplacements():
1587 for s, r in self._getreplacements():
1586 output = re.sub(s, r, output)
1588 output = re.sub(s, r, output)
1587
1589
1588 if normalizenewlines:
1590 if normalizenewlines:
1589 output = output.replace(b'\r\n', b'\n')
1591 output = output.replace(b'\r\n', b'\n')
1590
1592
1591 return ret, output.splitlines(True)
1593 return ret, output.splitlines(True)
1592
1594
1593
1595
1594 class PythonTest(Test):
1596 class PythonTest(Test):
1595 """A Python-based test."""
1597 """A Python-based test."""
1596
1598
1597 @property
1599 @property
1598 def refpath(self):
1600 def refpath(self):
1599 return os.path.join(self._testdir, b'%s.out' % self.bname)
1601 return os.path.join(self._testdir, b'%s.out' % self.bname)
1600
1602
1601 def _run(self, env):
1603 def _run(self, env):
1602 # Quote the python(3) executable for Windows
1604 # Quote the python(3) executable for Windows
1603 cmd = b'"%s" "%s"' % (PYTHON, self.path)
1605 cmd = b'"%s" "%s"' % (PYTHON, self.path)
1604 vlog("# Running", cmd.decode("utf-8"))
1606 vlog("# Running", cmd.decode("utf-8"))
1605 normalizenewlines = os.name == 'nt'
1607 normalizenewlines = os.name == 'nt'
1606 result = self._runcommand(cmd, env, normalizenewlines=normalizenewlines)
1608 result = self._runcommand(cmd, env, normalizenewlines=normalizenewlines)
1607 if self._aborted:
1609 if self._aborted:
1608 raise KeyboardInterrupt()
1610 raise KeyboardInterrupt()
1609
1611
1610 return result
1612 return result
1611
1613
1612
1614
1613 # Some glob patterns apply only in some circumstances, so the script
1615 # Some glob patterns apply only in some circumstances, so the script
1614 # might want to remove (glob) annotations that otherwise should be
1616 # might want to remove (glob) annotations that otherwise should be
1615 # retained.
1617 # retained.
1616 checkcodeglobpats = [
1618 checkcodeglobpats = [
1617 # On Windows it looks like \ doesn't require a (glob), but we know
1619 # On Windows it looks like \ doesn't require a (glob), but we know
1618 # better.
1620 # better.
1619 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1621 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1620 re.compile(br'^moving \S+/.*[^)]$'),
1622 re.compile(br'^moving \S+/.*[^)]$'),
1621 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1623 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1622 # Not all platforms have 127.0.0.1 as loopback (though most do),
1624 # Not all platforms have 127.0.0.1 as loopback (though most do),
1623 # so we always glob that too.
1625 # so we always glob that too.
1624 re.compile(br'.*\$LOCALIP.*$'),
1626 re.compile(br'.*\$LOCALIP.*$'),
1625 ]
1627 ]
1626
1628
1627 bchr = chr
1629 bchr = chr
1628 if PYTHON3:
1630 if PYTHON3:
1629 bchr = lambda x: bytes([x])
1631 bchr = lambda x: bytes([x])
1630
1632
1631 WARN_UNDEFINED = 1
1633 WARN_UNDEFINED = 1
1632 WARN_YES = 2
1634 WARN_YES = 2
1633 WARN_NO = 3
1635 WARN_NO = 3
1634
1636
1635 MARK_OPTIONAL = b" (?)\n"
1637 MARK_OPTIONAL = b" (?)\n"
1636
1638
1637
1639
1638 def isoptional(line):
1640 def isoptional(line):
1639 return line.endswith(MARK_OPTIONAL)
1641 return line.endswith(MARK_OPTIONAL)
1640
1642
1641
1643
1642 class TTest(Test):
1644 class TTest(Test):
1643 """A "t test" is a test backed by a .t file."""
1645 """A "t test" is a test backed by a .t file."""
1644
1646
1645 SKIPPED_PREFIX = b'skipped: '
1647 SKIPPED_PREFIX = b'skipped: '
1646 FAILED_PREFIX = b'hghave check failed: '
1648 FAILED_PREFIX = b'hghave check failed: '
1647 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1649 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1648
1650
1649 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1651 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1650 ESCAPEMAP = {bchr(i): br'\x%02x' % i for i in range(256)}
1652 ESCAPEMAP = {bchr(i): br'\x%02x' % i for i in range(256)}
1651 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1653 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1652
1654
1653 def __init__(self, path, *args, **kwds):
1655 def __init__(self, path, *args, **kwds):
1654 # accept an extra "case" parameter
1656 # accept an extra "case" parameter
1655 case = kwds.pop('case', [])
1657 case = kwds.pop('case', [])
1656 self._case = case
1658 self._case = case
1657 self._allcases = {x for y in parsettestcases(path) for x in y}
1659 self._allcases = {x for y in parsettestcases(path) for x in y}
1658 super(TTest, self).__init__(path, *args, **kwds)
1660 super(TTest, self).__init__(path, *args, **kwds)
1659 if case:
1661 if case:
1660 casepath = b'#'.join(case)
1662 casepath = b'#'.join(case)
1661 self.name = '%s#%s' % (self.name, _bytes2sys(casepath))
1663 self.name = '%s#%s' % (self.name, _bytes2sys(casepath))
1662 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1664 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1663 self._tmpname += b'-%s' % casepath.replace(b'#', b'-')
1665 self._tmpname += b'-%s' % casepath.replace(b'#', b'-')
1664 self._have = {}
1666 self._have = {}
1665
1667
1666 @property
1668 @property
1667 def refpath(self):
1669 def refpath(self):
1668 return os.path.join(self._testdir, self.bname)
1670 return os.path.join(self._testdir, self.bname)
1669
1671
1670 def _run(self, env):
1672 def _run(self, env):
1671 with open(self.path, 'rb') as f:
1673 with open(self.path, 'rb') as f:
1672 lines = f.readlines()
1674 lines = f.readlines()
1673
1675
1674 # .t file is both reference output and the test input, keep reference
1676 # .t file is both reference output and the test input, keep reference
1675 # output updated with the the test input. This avoids some race
1677 # output updated with the the test input. This avoids some race
1676 # conditions where the reference output does not match the actual test.
1678 # conditions where the reference output does not match the actual test.
1677 if self._refout is not None:
1679 if self._refout is not None:
1678 self._refout = lines
1680 self._refout = lines
1679
1681
1680 salt, script, after, expected = self._parsetest(lines)
1682 salt, script, after, expected = self._parsetest(lines)
1681
1683
1682 # Write out the generated script.
1684 # Write out the generated script.
1683 fname = b'%s.sh' % self._testtmp
1685 fname = b'%s.sh' % self._testtmp
1684 with open(fname, 'wb') as f:
1686 with open(fname, 'wb') as f:
1685 for l in script:
1687 for l in script:
1686 f.write(l)
1688 f.write(l)
1687
1689
1688 cmd = b'%s "%s"' % (self._shell, fname)
1690 cmd = b'%s "%s"' % (self._shell, fname)
1689 vlog("# Running", cmd.decode("utf-8"))
1691 vlog("# Running", cmd.decode("utf-8"))
1690
1692
1691 exitcode, output = self._runcommand(cmd, env)
1693 exitcode, output = self._runcommand(cmd, env)
1692
1694
1693 if self._aborted:
1695 if self._aborted:
1694 raise KeyboardInterrupt()
1696 raise KeyboardInterrupt()
1695
1697
1696 # Do not merge output if skipped. Return hghave message instead.
1698 # Do not merge output if skipped. Return hghave message instead.
1697 # Similarly, with --debug, output is None.
1699 # Similarly, with --debug, output is None.
1698 if exitcode == self.SKIPPED_STATUS or output is None:
1700 if exitcode == self.SKIPPED_STATUS or output is None:
1699 return exitcode, output
1701 return exitcode, output
1700
1702
1701 return self._processoutput(exitcode, output, salt, after, expected)
1703 return self._processoutput(exitcode, output, salt, after, expected)
1702
1704
1703 def _hghave(self, reqs):
1705 def _hghave(self, reqs):
1704 allreqs = b' '.join(reqs)
1706 allreqs = b' '.join(reqs)
1705
1707
1706 self._detectslow(reqs)
1708 self._detectslow(reqs)
1707
1709
1708 if allreqs in self._have:
1710 if allreqs in self._have:
1709 return self._have.get(allreqs)
1711 return self._have.get(allreqs)
1710
1712
1711 # TODO do something smarter when all other uses of hghave are gone.
1713 # TODO do something smarter when all other uses of hghave are gone.
1712 runtestdir = osenvironb[b'RUNTESTDIR']
1714 runtestdir = osenvironb[b'RUNTESTDIR']
1713 tdir = runtestdir.replace(b'\\', b'/')
1715 tdir = runtestdir.replace(b'\\', b'/')
1714 proc = Popen4(
1716 proc = Popen4(
1715 b'%s -c "%s/hghave %s"' % (self._shell, tdir, allreqs),
1717 b'%s -c "%s/hghave %s"' % (self._shell, tdir, allreqs),
1716 self._testtmp,
1718 self._testtmp,
1717 0,
1719 0,
1718 self._getenv(),
1720 self._getenv(),
1719 )
1721 )
1720 stdout, stderr = proc.communicate()
1722 stdout, stderr = proc.communicate()
1721 ret = proc.wait()
1723 ret = proc.wait()
1722 if wifexited(ret):
1724 if wifexited(ret):
1723 ret = os.WEXITSTATUS(ret)
1725 ret = os.WEXITSTATUS(ret)
1724 if ret == 2:
1726 if ret == 2:
1725 print(stdout.decode('utf-8'))
1727 print(stdout.decode('utf-8'))
1726 sys.exit(1)
1728 sys.exit(1)
1727
1729
1728 if ret != 0:
1730 if ret != 0:
1729 self._have[allreqs] = (False, stdout)
1731 self._have[allreqs] = (False, stdout)
1730 return False, stdout
1732 return False, stdout
1731
1733
1732 self._have[allreqs] = (True, None)
1734 self._have[allreqs] = (True, None)
1733 return True, None
1735 return True, None
1734
1736
1735 def _detectslow(self, reqs):
1737 def _detectslow(self, reqs):
1736 """update the timeout of slow test when appropriate"""
1738 """update the timeout of slow test when appropriate"""
1737 if b'slow' in reqs:
1739 if b'slow' in reqs:
1738 self._timeout = self._slowtimeout
1740 self._timeout = self._slowtimeout
1739
1741
1740 def _iftest(self, args):
1742 def _iftest(self, args):
1741 # implements "#if"
1743 # implements "#if"
1742 reqs = []
1744 reqs = []
1743 for arg in args:
1745 for arg in args:
1744 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1746 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1745 if arg[3:] in self._case:
1747 if arg[3:] in self._case:
1746 return False
1748 return False
1747 elif arg in self._allcases:
1749 elif arg in self._allcases:
1748 if arg not in self._case:
1750 if arg not in self._case:
1749 return False
1751 return False
1750 else:
1752 else:
1751 reqs.append(arg)
1753 reqs.append(arg)
1752 self._detectslow(reqs)
1754 self._detectslow(reqs)
1753 return self._hghave(reqs)[0]
1755 return self._hghave(reqs)[0]
1754
1756
1755 def _parsetest(self, lines):
1757 def _parsetest(self, lines):
1756 # We generate a shell script which outputs unique markers to line
1758 # We generate a shell script which outputs unique markers to line
1757 # up script results with our source. These markers include input
1759 # up script results with our source. These markers include input
1758 # line number and the last return code.
1760 # line number and the last return code.
1759 salt = b"SALT%d" % time.time()
1761 salt = b"SALT%d" % time.time()
1760
1762
1761 def addsalt(line, inpython):
1763 def addsalt(line, inpython):
1762 if inpython:
1764 if inpython:
1763 script.append(b'%s %d 0\n' % (salt, line))
1765 script.append(b'%s %d 0\n' % (salt, line))
1764 else:
1766 else:
1765 script.append(b'echo %s %d $?\n' % (salt, line))
1767 script.append(b'echo %s %d $?\n' % (salt, line))
1766
1768
1767 activetrace = []
1769 activetrace = []
1768 session = str(uuid.uuid4())
1770 session = str(uuid.uuid4())
1769 if PYTHON3:
1771 if PYTHON3:
1770 session = session.encode('ascii')
1772 session = session.encode('ascii')
1771 hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE') or os.getenv(
1773 hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE') or os.getenv(
1772 'HGCATAPULTSERVERPIPE'
1774 'HGCATAPULTSERVERPIPE'
1773 )
1775 )
1774
1776
1775 def toggletrace(cmd=None):
1777 def toggletrace(cmd=None):
1776 if not hgcatapult or hgcatapult == os.devnull:
1778 if not hgcatapult or hgcatapult == os.devnull:
1777 return
1779 return
1778
1780
1779 if activetrace:
1781 if activetrace:
1780 script.append(
1782 script.append(
1781 b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1783 b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1782 % (session, activetrace[0])
1784 % (session, activetrace[0])
1783 )
1785 )
1784 if cmd is None:
1786 if cmd is None:
1785 return
1787 return
1786
1788
1787 if isinstance(cmd, str):
1789 if isinstance(cmd, str):
1788 quoted = shellquote(cmd.strip())
1790 quoted = shellquote(cmd.strip())
1789 else:
1791 else:
1790 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1792 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1791 quoted = quoted.replace(b'\\', b'\\\\')
1793 quoted = quoted.replace(b'\\', b'\\\\')
1792 script.append(
1794 script.append(
1793 b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1795 b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1794 % (session, quoted)
1796 % (session, quoted)
1795 )
1797 )
1796 activetrace[0:] = [quoted]
1798 activetrace[0:] = [quoted]
1797
1799
1798 script = []
1800 script = []
1799
1801
1800 # After we run the shell script, we re-unify the script output
1802 # After we run the shell script, we re-unify the script output
1801 # with non-active parts of the source, with synchronization by our
1803 # with non-active parts of the source, with synchronization by our
1802 # SALT line number markers. The after table contains the non-active
1804 # SALT line number markers. The after table contains the non-active
1803 # components, ordered by line number.
1805 # components, ordered by line number.
1804 after = {}
1806 after = {}
1805
1807
1806 # Expected shell script output.
1808 # Expected shell script output.
1807 expected = {}
1809 expected = {}
1808
1810
1809 pos = prepos = -1
1811 pos = prepos = -1
1810
1812
1811 # True or False when in a true or false conditional section
1813 # True or False when in a true or false conditional section
1812 skipping = None
1814 skipping = None
1813
1815
1814 # We keep track of whether or not we're in a Python block so we
1816 # We keep track of whether or not we're in a Python block so we
1815 # can generate the surrounding doctest magic.
1817 # can generate the surrounding doctest magic.
1816 inpython = False
1818 inpython = False
1817
1819
1818 if self._debug:
1820 if self._debug:
1819 script.append(b'set -x\n')
1821 script.append(b'set -x\n')
1820 if self._hgcommand != b'hg':
1822 if self._hgcommand != b'hg':
1821 script.append(b'alias hg="%s"\n' % self._hgcommand)
1823 script.append(b'alias hg="%s"\n' % self._hgcommand)
1822 if os.getenv('MSYSTEM'):
1824 if os.getenv('MSYSTEM'):
1823 script.append(b'alias pwd="pwd -W"\n')
1825 script.append(b'alias pwd="pwd -W"\n')
1824
1826
1825 if hgcatapult and hgcatapult != os.devnull:
1827 if hgcatapult and hgcatapult != os.devnull:
1826 if PYTHON3:
1828 if PYTHON3:
1827 hgcatapult = hgcatapult.encode('utf8')
1829 hgcatapult = hgcatapult.encode('utf8')
1828 cataname = self.name.encode('utf8')
1830 cataname = self.name.encode('utf8')
1829 else:
1831 else:
1830 cataname = self.name
1832 cataname = self.name
1831
1833
1832 # Kludge: use a while loop to keep the pipe from getting
1834 # Kludge: use a while loop to keep the pipe from getting
1833 # closed by our echo commands. The still-running file gets
1835 # closed by our echo commands. The still-running file gets
1834 # reaped at the end of the script, which causes the while
1836 # reaped at the end of the script, which causes the while
1835 # loop to exit and closes the pipe. Sigh.
1837 # loop to exit and closes the pipe. Sigh.
1836 script.append(
1838 script.append(
1837 b'rtendtracing() {\n'
1839 b'rtendtracing() {\n'
1838 b' echo END %(session)s %(name)s >> %(catapult)s\n'
1840 b' echo END %(session)s %(name)s >> %(catapult)s\n'
1839 b' rm -f "$TESTTMP/.still-running"\n'
1841 b' rm -f "$TESTTMP/.still-running"\n'
1840 b'}\n'
1842 b'}\n'
1841 b'trap "rtendtracing" 0\n'
1843 b'trap "rtendtracing" 0\n'
1842 b'touch "$TESTTMP/.still-running"\n'
1844 b'touch "$TESTTMP/.still-running"\n'
1843 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1845 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1844 b'> %(catapult)s &\n'
1846 b'> %(catapult)s &\n'
1845 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1847 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1846 b'echo START %(session)s %(name)s >> %(catapult)s\n'
1848 b'echo START %(session)s %(name)s >> %(catapult)s\n'
1847 % {
1849 % {
1848 b'name': cataname,
1850 b'name': cataname,
1849 b'session': session,
1851 b'session': session,
1850 b'catapult': hgcatapult,
1852 b'catapult': hgcatapult,
1851 }
1853 }
1852 )
1854 )
1853
1855
1854 if self._case:
1856 if self._case:
1855 casestr = b'#'.join(self._case)
1857 casestr = b'#'.join(self._case)
1856 if isinstance(casestr, str):
1858 if isinstance(casestr, str):
1857 quoted = shellquote(casestr)
1859 quoted = shellquote(casestr)
1858 else:
1860 else:
1859 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1861 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1860 script.append(b'TESTCASE=%s\n' % quoted)
1862 script.append(b'TESTCASE=%s\n' % quoted)
1861 script.append(b'export TESTCASE\n')
1863 script.append(b'export TESTCASE\n')
1862
1864
1863 n = 0
1865 n = 0
1864 for n, l in enumerate(lines):
1866 for n, l in enumerate(lines):
1865 if not l.endswith(b'\n'):
1867 if not l.endswith(b'\n'):
1866 l += b'\n'
1868 l += b'\n'
1867 if l.startswith(b'#require'):
1869 if l.startswith(b'#require'):
1868 lsplit = l.split()
1870 lsplit = l.split()
1869 if len(lsplit) < 2 or lsplit[0] != b'#require':
1871 if len(lsplit) < 2 or lsplit[0] != b'#require':
1870 after.setdefault(pos, []).append(
1872 after.setdefault(pos, []).append(
1871 b' !!! invalid #require\n'
1873 b' !!! invalid #require\n'
1872 )
1874 )
1873 if not skipping:
1875 if not skipping:
1874 haveresult, message = self._hghave(lsplit[1:])
1876 haveresult, message = self._hghave(lsplit[1:])
1875 if not haveresult:
1877 if not haveresult:
1876 script = [b'echo "%s"\nexit 80\n' % message]
1878 script = [b'echo "%s"\nexit 80\n' % message]
1877 break
1879 break
1878 after.setdefault(pos, []).append(l)
1880 after.setdefault(pos, []).append(l)
1879 elif l.startswith(b'#if'):
1881 elif l.startswith(b'#if'):
1880 lsplit = l.split()
1882 lsplit = l.split()
1881 if len(lsplit) < 2 or lsplit[0] != b'#if':
1883 if len(lsplit) < 2 or lsplit[0] != b'#if':
1882 after.setdefault(pos, []).append(b' !!! invalid #if\n')
1884 after.setdefault(pos, []).append(b' !!! invalid #if\n')
1883 if skipping is not None:
1885 if skipping is not None:
1884 after.setdefault(pos, []).append(b' !!! nested #if\n')
1886 after.setdefault(pos, []).append(b' !!! nested #if\n')
1885 skipping = not self._iftest(lsplit[1:])
1887 skipping = not self._iftest(lsplit[1:])
1886 after.setdefault(pos, []).append(l)
1888 after.setdefault(pos, []).append(l)
1887 elif l.startswith(b'#else'):
1889 elif l.startswith(b'#else'):
1888 if skipping is None:
1890 if skipping is None:
1889 after.setdefault(pos, []).append(b' !!! missing #if\n')
1891 after.setdefault(pos, []).append(b' !!! missing #if\n')
1890 skipping = not skipping
1892 skipping = not skipping
1891 after.setdefault(pos, []).append(l)
1893 after.setdefault(pos, []).append(l)
1892 elif l.startswith(b'#endif'):
1894 elif l.startswith(b'#endif'):
1893 if skipping is None:
1895 if skipping is None:
1894 after.setdefault(pos, []).append(b' !!! missing #if\n')
1896 after.setdefault(pos, []).append(b' !!! missing #if\n')
1895 skipping = None
1897 skipping = None
1896 after.setdefault(pos, []).append(l)
1898 after.setdefault(pos, []).append(l)
1897 elif skipping:
1899 elif skipping:
1898 after.setdefault(pos, []).append(l)
1900 after.setdefault(pos, []).append(l)
1899 elif l.startswith(b' >>> '): # python inlines
1901 elif l.startswith(b' >>> '): # python inlines
1900 after.setdefault(pos, []).append(l)
1902 after.setdefault(pos, []).append(l)
1901 prepos = pos
1903 prepos = pos
1902 pos = n
1904 pos = n
1903 if not inpython:
1905 if not inpython:
1904 # We've just entered a Python block. Add the header.
1906 # We've just entered a Python block. Add the header.
1905 inpython = True
1907 inpython = True
1906 addsalt(prepos, False) # Make sure we report the exit code.
1908 addsalt(prepos, False) # Make sure we report the exit code.
1907 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1909 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1908 addsalt(n, True)
1910 addsalt(n, True)
1909 script.append(l[2:])
1911 script.append(l[2:])
1910 elif l.startswith(b' ... '): # python inlines
1912 elif l.startswith(b' ... '): # python inlines
1911 after.setdefault(prepos, []).append(l)
1913 after.setdefault(prepos, []).append(l)
1912 script.append(l[2:])
1914 script.append(l[2:])
1913 elif l.startswith(b' $ '): # commands
1915 elif l.startswith(b' $ '): # commands
1914 if inpython:
1916 if inpython:
1915 script.append(b'EOF\n')
1917 script.append(b'EOF\n')
1916 inpython = False
1918 inpython = False
1917 after.setdefault(pos, []).append(l)
1919 after.setdefault(pos, []).append(l)
1918 prepos = pos
1920 prepos = pos
1919 pos = n
1921 pos = n
1920 addsalt(n, False)
1922 addsalt(n, False)
1921 rawcmd = l[4:]
1923 rawcmd = l[4:]
1922 cmd = rawcmd.split()
1924 cmd = rawcmd.split()
1923 toggletrace(rawcmd)
1925 toggletrace(rawcmd)
1924 if len(cmd) == 2 and cmd[0] == b'cd':
1926 if len(cmd) == 2 and cmd[0] == b'cd':
1925 rawcmd = b'cd %s || exit 1\n' % cmd[1]
1927 rawcmd = b'cd %s || exit 1\n' % cmd[1]
1926 script.append(rawcmd)
1928 script.append(rawcmd)
1927 elif l.startswith(b' > '): # continuations
1929 elif l.startswith(b' > '): # continuations
1928 after.setdefault(prepos, []).append(l)
1930 after.setdefault(prepos, []).append(l)
1929 script.append(l[4:])
1931 script.append(l[4:])
1930 elif l.startswith(b' '): # results
1932 elif l.startswith(b' '): # results
1931 # Queue up a list of expected results.
1933 # Queue up a list of expected results.
1932 expected.setdefault(pos, []).append(l[2:])
1934 expected.setdefault(pos, []).append(l[2:])
1933 else:
1935 else:
1934 if inpython:
1936 if inpython:
1935 script.append(b'EOF\n')
1937 script.append(b'EOF\n')
1936 inpython = False
1938 inpython = False
1937 # Non-command/result. Queue up for merged output.
1939 # Non-command/result. Queue up for merged output.
1938 after.setdefault(pos, []).append(l)
1940 after.setdefault(pos, []).append(l)
1939
1941
1940 if inpython:
1942 if inpython:
1941 script.append(b'EOF\n')
1943 script.append(b'EOF\n')
1942 if skipping is not None:
1944 if skipping is not None:
1943 after.setdefault(pos, []).append(b' !!! missing #endif\n')
1945 after.setdefault(pos, []).append(b' !!! missing #endif\n')
1944 addsalt(n + 1, False)
1946 addsalt(n + 1, False)
1945 # Need to end any current per-command trace
1947 # Need to end any current per-command trace
1946 if activetrace:
1948 if activetrace:
1947 toggletrace()
1949 toggletrace()
1948 return salt, script, after, expected
1950 return salt, script, after, expected
1949
1951
1950 def _processoutput(self, exitcode, output, salt, after, expected):
1952 def _processoutput(self, exitcode, output, salt, after, expected):
1951 # Merge the script output back into a unified test.
1953 # Merge the script output back into a unified test.
1952 warnonly = WARN_UNDEFINED # 1: not yet; 2: yes; 3: for sure not
1954 warnonly = WARN_UNDEFINED # 1: not yet; 2: yes; 3: for sure not
1953 if exitcode != 0:
1955 if exitcode != 0:
1954 warnonly = WARN_NO
1956 warnonly = WARN_NO
1955
1957
1956 pos = -1
1958 pos = -1
1957 postout = []
1959 postout = []
1958 for out_rawline in output:
1960 for out_rawline in output:
1959 out_line, cmd_line = out_rawline, None
1961 out_line, cmd_line = out_rawline, None
1960 if salt in out_rawline:
1962 if salt in out_rawline:
1961 out_line, cmd_line = out_rawline.split(salt, 1)
1963 out_line, cmd_line = out_rawline.split(salt, 1)
1962
1964
1963 pos, postout, warnonly = self._process_out_line(
1965 pos, postout, warnonly = self._process_out_line(
1964 out_line, pos, postout, expected, warnonly
1966 out_line, pos, postout, expected, warnonly
1965 )
1967 )
1966 pos, postout = self._process_cmd_line(cmd_line, pos, postout, after)
1968 pos, postout = self._process_cmd_line(cmd_line, pos, postout, after)
1967
1969
1968 if pos in after:
1970 if pos in after:
1969 postout += after.pop(pos)
1971 postout += after.pop(pos)
1970
1972
1971 if warnonly == WARN_YES:
1973 if warnonly == WARN_YES:
1972 exitcode = False # Set exitcode to warned.
1974 exitcode = False # Set exitcode to warned.
1973
1975
1974 return exitcode, postout
1976 return exitcode, postout
1975
1977
1976 def _process_out_line(self, out_line, pos, postout, expected, warnonly):
1978 def _process_out_line(self, out_line, pos, postout, expected, warnonly):
1977 while out_line:
1979 while out_line:
1978 if not out_line.endswith(b'\n'):
1980 if not out_line.endswith(b'\n'):
1979 out_line += b' (no-eol)\n'
1981 out_line += b' (no-eol)\n'
1980
1982
1981 # Find the expected output at the current position.
1983 # Find the expected output at the current position.
1982 els = [None]
1984 els = [None]
1983 if expected.get(pos, None):
1985 if expected.get(pos, None):
1984 els = expected[pos]
1986 els = expected[pos]
1985
1987
1986 optional = []
1988 optional = []
1987 for i, el in enumerate(els):
1989 for i, el in enumerate(els):
1988 r = False
1990 r = False
1989 if el:
1991 if el:
1990 r, exact = self.linematch(el, out_line)
1992 r, exact = self.linematch(el, out_line)
1991 if isinstance(r, str):
1993 if isinstance(r, str):
1992 if r == '-glob':
1994 if r == '-glob':
1993 out_line = ''.join(el.rsplit(' (glob)', 1))
1995 out_line = ''.join(el.rsplit(' (glob)', 1))
1994 r = '' # Warn only this line.
1996 r = '' # Warn only this line.
1995 elif r == "retry":
1997 elif r == "retry":
1996 postout.append(b' ' + el)
1998 postout.append(b' ' + el)
1997 else:
1999 else:
1998 log('\ninfo, unknown linematch result: %r\n' % r)
2000 log('\ninfo, unknown linematch result: %r\n' % r)
1999 r = False
2001 r = False
2000 if r:
2002 if r:
2001 els.pop(i)
2003 els.pop(i)
2002 break
2004 break
2003 if el:
2005 if el:
2004 if isoptional(el):
2006 if isoptional(el):
2005 optional.append(i)
2007 optional.append(i)
2006 else:
2008 else:
2007 m = optline.match(el)
2009 m = optline.match(el)
2008 if m:
2010 if m:
2009 conditions = [c for c in m.group(2).split(b' ')]
2011 conditions = [c for c in m.group(2).split(b' ')]
2010
2012
2011 if not self._iftest(conditions):
2013 if not self._iftest(conditions):
2012 optional.append(i)
2014 optional.append(i)
2013 if exact:
2015 if exact:
2014 # Don't allow line to be matches against a later
2016 # Don't allow line to be matches against a later
2015 # line in the output
2017 # line in the output
2016 els.pop(i)
2018 els.pop(i)
2017 break
2019 break
2018
2020
2019 if r:
2021 if r:
2020 if r == "retry":
2022 if r == "retry":
2021 continue
2023 continue
2022 # clean up any optional leftovers
2024 # clean up any optional leftovers
2023 for i in optional:
2025 for i in optional:
2024 postout.append(b' ' + els[i])
2026 postout.append(b' ' + els[i])
2025 for i in reversed(optional):
2027 for i in reversed(optional):
2026 del els[i]
2028 del els[i]
2027 postout.append(b' ' + el)
2029 postout.append(b' ' + el)
2028 else:
2030 else:
2029 if self.NEEDESCAPE(out_line):
2031 if self.NEEDESCAPE(out_line):
2030 out_line = TTest._stringescape(
2032 out_line = TTest._stringescape(
2031 b'%s (esc)\n' % out_line.rstrip(b'\n')
2033 b'%s (esc)\n' % out_line.rstrip(b'\n')
2032 )
2034 )
2033 postout.append(b' ' + out_line) # Let diff deal with it.
2035 postout.append(b' ' + out_line) # Let diff deal with it.
2034 if r != '': # If line failed.
2036 if r != '': # If line failed.
2035 warnonly = WARN_NO
2037 warnonly = WARN_NO
2036 elif warnonly == WARN_UNDEFINED:
2038 elif warnonly == WARN_UNDEFINED:
2037 warnonly = WARN_YES
2039 warnonly = WARN_YES
2038 break
2040 break
2039 else:
2041 else:
2040 # clean up any optional leftovers
2042 # clean up any optional leftovers
2041 while expected.get(pos, None):
2043 while expected.get(pos, None):
2042 el = expected[pos].pop(0)
2044 el = expected[pos].pop(0)
2043 if el:
2045 if el:
2044 if not isoptional(el):
2046 if not isoptional(el):
2045 m = optline.match(el)
2047 m = optline.match(el)
2046 if m:
2048 if m:
2047 conditions = [c for c in m.group(2).split(b' ')]
2049 conditions = [c for c in m.group(2).split(b' ')]
2048
2050
2049 if self._iftest(conditions):
2051 if self._iftest(conditions):
2050 # Don't append as optional line
2052 # Don't append as optional line
2051 continue
2053 continue
2052 else:
2054 else:
2053 continue
2055 continue
2054 postout.append(b' ' + el)
2056 postout.append(b' ' + el)
2055 return pos, postout, warnonly
2057 return pos, postout, warnonly
2056
2058
2057 def _process_cmd_line(self, cmd_line, pos, postout, after):
2059 def _process_cmd_line(self, cmd_line, pos, postout, after):
2058 """process a "command" part of a line from unified test output"""
2060 """process a "command" part of a line from unified test output"""
2059 if cmd_line:
2061 if cmd_line:
2060 # Add on last return code.
2062 # Add on last return code.
2061 ret = int(cmd_line.split()[1])
2063 ret = int(cmd_line.split()[1])
2062 if ret != 0:
2064 if ret != 0:
2063 postout.append(b' [%d]\n' % ret)
2065 postout.append(b' [%d]\n' % ret)
2064 if pos in after:
2066 if pos in after:
2065 # Merge in non-active test bits.
2067 # Merge in non-active test bits.
2066 postout += after.pop(pos)
2068 postout += after.pop(pos)
2067 pos = int(cmd_line.split()[0])
2069 pos = int(cmd_line.split()[0])
2068 return pos, postout
2070 return pos, postout
2069
2071
2070 @staticmethod
2072 @staticmethod
2071 def rematch(el, l):
2073 def rematch(el, l):
2072 try:
2074 try:
2073 # parse any flags at the beginning of the regex. Only 'i' is
2075 # parse any flags at the beginning of the regex. Only 'i' is
2074 # supported right now, but this should be easy to extend.
2076 # supported right now, but this should be easy to extend.
2075 flags, el = re.match(br'^(\(\?i\))?(.*)', el).groups()[0:2]
2077 flags, el = re.match(br'^(\(\?i\))?(.*)', el).groups()[0:2]
2076 flags = flags or b''
2078 flags = flags or b''
2077 el = flags + b'(?:' + el + b')'
2079 el = flags + b'(?:' + el + b')'
2078 # use \Z to ensure that the regex matches to the end of the string
2080 # use \Z to ensure that the regex matches to the end of the string
2079 if os.name == 'nt':
2081 if os.name == 'nt':
2080 return re.match(el + br'\r?\n\Z', l)
2082 return re.match(el + br'\r?\n\Z', l)
2081 return re.match(el + br'\n\Z', l)
2083 return re.match(el + br'\n\Z', l)
2082 except re.error:
2084 except re.error:
2083 # el is an invalid regex
2085 # el is an invalid regex
2084 return False
2086 return False
2085
2087
2086 @staticmethod
2088 @staticmethod
2087 def globmatch(el, l):
2089 def globmatch(el, l):
2088 # The only supported special characters are * and ? plus / which also
2090 # The only supported special characters are * and ? plus / which also
2089 # matches \ on windows. Escaping of these characters is supported.
2091 # matches \ on windows. Escaping of these characters is supported.
2090 if el + b'\n' == l:
2092 if el + b'\n' == l:
2091 if os.altsep:
2093 if os.altsep:
2092 # matching on "/" is not needed for this line
2094 # matching on "/" is not needed for this line
2093 for pat in checkcodeglobpats:
2095 for pat in checkcodeglobpats:
2094 if pat.match(el):
2096 if pat.match(el):
2095 return True
2097 return True
2096 return b'-glob'
2098 return b'-glob'
2097 return True
2099 return True
2098 el = el.replace(b'$LOCALIP', b'*')
2100 el = el.replace(b'$LOCALIP', b'*')
2099 i, n = 0, len(el)
2101 i, n = 0, len(el)
2100 res = b''
2102 res = b''
2101 while i < n:
2103 while i < n:
2102 c = el[i : i + 1]
2104 c = el[i : i + 1]
2103 i += 1
2105 i += 1
2104 if c == b'\\' and i < n and el[i : i + 1] in b'*?\\/':
2106 if c == b'\\' and i < n and el[i : i + 1] in b'*?\\/':
2105 res += el[i - 1 : i + 1]
2107 res += el[i - 1 : i + 1]
2106 i += 1
2108 i += 1
2107 elif c == b'*':
2109 elif c == b'*':
2108 res += b'.*'
2110 res += b'.*'
2109 elif c == b'?':
2111 elif c == b'?':
2110 res += b'.'
2112 res += b'.'
2111 elif c == b'/' and os.altsep:
2113 elif c == b'/' and os.altsep:
2112 res += b'[/\\\\]'
2114 res += b'[/\\\\]'
2113 else:
2115 else:
2114 res += re.escape(c)
2116 res += re.escape(c)
2115 return TTest.rematch(res, l)
2117 return TTest.rematch(res, l)
2116
2118
2117 def linematch(self, el, l):
2119 def linematch(self, el, l):
2118 if el == l: # perfect match (fast)
2120 if el == l: # perfect match (fast)
2119 return True, True
2121 return True, True
2120 retry = False
2122 retry = False
2121 if isoptional(el):
2123 if isoptional(el):
2122 retry = "retry"
2124 retry = "retry"
2123 el = el[: -len(MARK_OPTIONAL)] + b"\n"
2125 el = el[: -len(MARK_OPTIONAL)] + b"\n"
2124 else:
2126 else:
2125 m = optline.match(el)
2127 m = optline.match(el)
2126 if m:
2128 if m:
2127 conditions = [c for c in m.group(2).split(b' ')]
2129 conditions = [c for c in m.group(2).split(b' ')]
2128
2130
2129 el = m.group(1) + b"\n"
2131 el = m.group(1) + b"\n"
2130 if not self._iftest(conditions):
2132 if not self._iftest(conditions):
2131 # listed feature missing, should not match
2133 # listed feature missing, should not match
2132 return "retry", False
2134 return "retry", False
2133
2135
2134 if el.endswith(b" (esc)\n"):
2136 if el.endswith(b" (esc)\n"):
2135 if PYTHON3:
2137 if PYTHON3:
2136 el = el[:-7].decode('unicode_escape') + '\n'
2138 el = el[:-7].decode('unicode_escape') + '\n'
2137 el = el.encode('latin-1')
2139 el = el.encode('latin-1')
2138 else:
2140 else:
2139 el = el[:-7].decode('string-escape') + '\n'
2141 el = el[:-7].decode('string-escape') + '\n'
2140 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
2142 if el == l or os.name == 'nt' and el[:-1] + b'\r\n' == l:
2141 return True, True
2143 return True, True
2142 if el.endswith(b" (re)\n"):
2144 if el.endswith(b" (re)\n"):
2143 return (TTest.rematch(el[:-6], l) or retry), False
2145 return (TTest.rematch(el[:-6], l) or retry), False
2144 if el.endswith(b" (glob)\n"):
2146 if el.endswith(b" (glob)\n"):
2145 # ignore '(glob)' added to l by 'replacements'
2147 # ignore '(glob)' added to l by 'replacements'
2146 if l.endswith(b" (glob)\n"):
2148 if l.endswith(b" (glob)\n"):
2147 l = l[:-8] + b"\n"
2149 l = l[:-8] + b"\n"
2148 return (TTest.globmatch(el[:-8], l) or retry), False
2150 return (TTest.globmatch(el[:-8], l) or retry), False
2149 if os.altsep:
2151 if os.altsep:
2150 _l = l.replace(b'\\', b'/')
2152 _l = l.replace(b'\\', b'/')
2151 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
2153 if el == _l or os.name == 'nt' and el[:-1] + b'\r\n' == _l:
2152 return True, True
2154 return True, True
2153 return retry, True
2155 return retry, True
2154
2156
2155 @staticmethod
2157 @staticmethod
2156 def parsehghaveoutput(lines):
2158 def parsehghaveoutput(lines):
2157 """Parse hghave log lines.
2159 """Parse hghave log lines.
2158
2160
2159 Return tuple of lists (missing, failed):
2161 Return tuple of lists (missing, failed):
2160 * the missing/unknown features
2162 * the missing/unknown features
2161 * the features for which existence check failed"""
2163 * the features for which existence check failed"""
2162 missing = []
2164 missing = []
2163 failed = []
2165 failed = []
2164 for line in lines:
2166 for line in lines:
2165 if line.startswith(TTest.SKIPPED_PREFIX):
2167 if line.startswith(TTest.SKIPPED_PREFIX):
2166 line = line.splitlines()[0]
2168 line = line.splitlines()[0]
2167 missing.append(_bytes2sys(line[len(TTest.SKIPPED_PREFIX) :]))
2169 missing.append(_bytes2sys(line[len(TTest.SKIPPED_PREFIX) :]))
2168 elif line.startswith(TTest.FAILED_PREFIX):
2170 elif line.startswith(TTest.FAILED_PREFIX):
2169 line = line.splitlines()[0]
2171 line = line.splitlines()[0]
2170 failed.append(_bytes2sys(line[len(TTest.FAILED_PREFIX) :]))
2172 failed.append(_bytes2sys(line[len(TTest.FAILED_PREFIX) :]))
2171
2173
2172 return missing, failed
2174 return missing, failed
2173
2175
2174 @staticmethod
2176 @staticmethod
2175 def _escapef(m):
2177 def _escapef(m):
2176 return TTest.ESCAPEMAP[m.group(0)]
2178 return TTest.ESCAPEMAP[m.group(0)]
2177
2179
2178 @staticmethod
2180 @staticmethod
2179 def _stringescape(s):
2181 def _stringescape(s):
2180 return TTest.ESCAPESUB(TTest._escapef, s)
2182 return TTest.ESCAPESUB(TTest._escapef, s)
2181
2183
2182
2184
2183 iolock = threading.RLock()
2185 iolock = threading.RLock()
2184 firstlock = threading.RLock()
2186 firstlock = threading.RLock()
2185 firsterror = False
2187 firsterror = False
2186
2188
2187
2189
2188 class TestResult(unittest._TextTestResult):
2190 class TestResult(unittest._TextTestResult):
2189 """Holds results when executing via unittest."""
2191 """Holds results when executing via unittest."""
2190
2192
2191 # Don't worry too much about accessing the non-public _TextTestResult.
2193 # Don't worry too much about accessing the non-public _TextTestResult.
2192 # It is relatively common in Python testing tools.
2194 # It is relatively common in Python testing tools.
2193 def __init__(self, options, *args, **kwargs):
2195 def __init__(self, options, *args, **kwargs):
2194 super(TestResult, self).__init__(*args, **kwargs)
2196 super(TestResult, self).__init__(*args, **kwargs)
2195
2197
2196 self._options = options
2198 self._options = options
2197
2199
2198 # unittest.TestResult didn't have skipped until 2.7. We need to
2200 # unittest.TestResult didn't have skipped until 2.7. We need to
2199 # polyfill it.
2201 # polyfill it.
2200 self.skipped = []
2202 self.skipped = []
2201
2203
2202 # We have a custom "ignored" result that isn't present in any Python
2204 # We have a custom "ignored" result that isn't present in any Python
2203 # unittest implementation. It is very similar to skipped. It may make
2205 # unittest implementation. It is very similar to skipped. It may make
2204 # sense to map it into skip some day.
2206 # sense to map it into skip some day.
2205 self.ignored = []
2207 self.ignored = []
2206
2208
2207 self.times = []
2209 self.times = []
2208 self._firststarttime = None
2210 self._firststarttime = None
2209 # Data stored for the benefit of generating xunit reports.
2211 # Data stored for the benefit of generating xunit reports.
2210 self.successes = []
2212 self.successes = []
2211 self.faildata = {}
2213 self.faildata = {}
2212
2214
2213 if options.color == 'auto':
2215 if options.color == 'auto':
2214 isatty = self.stream.isatty()
2216 isatty = self.stream.isatty()
2215 # For some reason, redirecting stdout on Windows disables the ANSI
2217 # For some reason, redirecting stdout on Windows disables the ANSI
2216 # color processing of stderr, which is what is used to print the
2218 # color processing of stderr, which is what is used to print the
2217 # output. Therefore, both must be tty on Windows to enable color.
2219 # output. Therefore, both must be tty on Windows to enable color.
2218 if os.name == 'nt':
2220 if os.name == 'nt':
2219 isatty = isatty and sys.stdout.isatty()
2221 isatty = isatty and sys.stdout.isatty()
2220 self.color = pygmentspresent and isatty
2222 self.color = pygmentspresent and isatty
2221 elif options.color == 'never':
2223 elif options.color == 'never':
2222 self.color = False
2224 self.color = False
2223 else: # 'always', for testing purposes
2225 else: # 'always', for testing purposes
2224 self.color = pygmentspresent
2226 self.color = pygmentspresent
2225
2227
2226 def onStart(self, test):
2228 def onStart(self, test):
2227 """Can be overriden by custom TestResult"""
2229 """Can be overriden by custom TestResult"""
2228
2230
2229 def onEnd(self):
2231 def onEnd(self):
2230 """Can be overriden by custom TestResult"""
2232 """Can be overriden by custom TestResult"""
2231
2233
2232 def addFailure(self, test, reason):
2234 def addFailure(self, test, reason):
2233 self.failures.append((test, reason))
2235 self.failures.append((test, reason))
2234
2236
2235 if self._options.first:
2237 if self._options.first:
2236 self.stop()
2238 self.stop()
2237 else:
2239 else:
2238 with iolock:
2240 with iolock:
2239 if reason == "timed out":
2241 if reason == "timed out":
2240 self.stream.write('t')
2242 self.stream.write('t')
2241 else:
2243 else:
2242 if not self._options.nodiff:
2244 if not self._options.nodiff:
2243 self.stream.write('\n')
2245 self.stream.write('\n')
2244 # Exclude the '\n' from highlighting to lex correctly
2246 # Exclude the '\n' from highlighting to lex correctly
2245 formatted = 'ERROR: %s output changed\n' % test
2247 formatted = 'ERROR: %s output changed\n' % test
2246 self.stream.write(highlightmsg(formatted, self.color))
2248 self.stream.write(highlightmsg(formatted, self.color))
2247 self.stream.write('!')
2249 self.stream.write('!')
2248
2250
2249 self.stream.flush()
2251 self.stream.flush()
2250
2252
2251 def addSuccess(self, test):
2253 def addSuccess(self, test):
2252 with iolock:
2254 with iolock:
2253 super(TestResult, self).addSuccess(test)
2255 super(TestResult, self).addSuccess(test)
2254 self.successes.append(test)
2256 self.successes.append(test)
2255
2257
2256 def addError(self, test, err):
2258 def addError(self, test, err):
2257 super(TestResult, self).addError(test, err)
2259 super(TestResult, self).addError(test, err)
2258 if self._options.first:
2260 if self._options.first:
2259 self.stop()
2261 self.stop()
2260
2262
2261 # Polyfill.
2263 # Polyfill.
2262 def addSkip(self, test, reason):
2264 def addSkip(self, test, reason):
2263 self.skipped.append((test, reason))
2265 self.skipped.append((test, reason))
2264 with iolock:
2266 with iolock:
2265 if self.showAll:
2267 if self.showAll:
2266 self.stream.writeln('skipped %s' % reason)
2268 self.stream.writeln('skipped %s' % reason)
2267 else:
2269 else:
2268 self.stream.write('s')
2270 self.stream.write('s')
2269 self.stream.flush()
2271 self.stream.flush()
2270
2272
2271 def addIgnore(self, test, reason):
2273 def addIgnore(self, test, reason):
2272 self.ignored.append((test, reason))
2274 self.ignored.append((test, reason))
2273 with iolock:
2275 with iolock:
2274 if self.showAll:
2276 if self.showAll:
2275 self.stream.writeln('ignored %s' % reason)
2277 self.stream.writeln('ignored %s' % reason)
2276 else:
2278 else:
2277 if reason not in ('not retesting', "doesn't match keyword"):
2279 if reason not in ('not retesting', "doesn't match keyword"):
2278 self.stream.write('i')
2280 self.stream.write('i')
2279 else:
2281 else:
2280 self.testsRun += 1
2282 self.testsRun += 1
2281 self.stream.flush()
2283 self.stream.flush()
2282
2284
2283 def addOutputMismatch(self, test, ret, got, expected):
2285 def addOutputMismatch(self, test, ret, got, expected):
2284 """Record a mismatch in test output for a particular test."""
2286 """Record a mismatch in test output for a particular test."""
2285 if self.shouldStop or firsterror:
2287 if self.shouldStop or firsterror:
2286 # don't print, some other test case already failed and
2288 # don't print, some other test case already failed and
2287 # printed, we're just stale and probably failed due to our
2289 # printed, we're just stale and probably failed due to our
2288 # temp dir getting cleaned up.
2290 # temp dir getting cleaned up.
2289 return
2291 return
2290
2292
2291 accepted = False
2293 accepted = False
2292 lines = []
2294 lines = []
2293
2295
2294 with iolock:
2296 with iolock:
2295 if self._options.nodiff:
2297 if self._options.nodiff:
2296 pass
2298 pass
2297 elif self._options.view:
2299 elif self._options.view:
2298 v = self._options.view
2300 v = self._options.view
2299 subprocess.call(
2301 subprocess.call(
2300 r'"%s" "%s" "%s"'
2302 r'"%s" "%s" "%s"'
2301 % (v, _bytes2sys(test.refpath), _bytes2sys(test.errpath)),
2303 % (v, _bytes2sys(test.refpath), _bytes2sys(test.errpath)),
2302 shell=True,
2304 shell=True,
2303 )
2305 )
2304 else:
2306 else:
2305 servefail, lines = getdiff(
2307 servefail, lines = getdiff(
2306 expected, got, test.refpath, test.errpath
2308 expected, got, test.refpath, test.errpath
2307 )
2309 )
2308 self.stream.write('\n')
2310 self.stream.write('\n')
2309 for line in lines:
2311 for line in lines:
2310 line = highlightdiff(line, self.color)
2312 line = highlightdiff(line, self.color)
2311 if PYTHON3:
2313 if PYTHON3:
2312 self.stream.flush()
2314 self.stream.flush()
2313 self.stream.buffer.write(line)
2315 self.stream.buffer.write(line)
2314 self.stream.buffer.flush()
2316 self.stream.buffer.flush()
2315 else:
2317 else:
2316 self.stream.write(line)
2318 self.stream.write(line)
2317 self.stream.flush()
2319 self.stream.flush()
2318
2320
2319 if servefail:
2321 if servefail:
2320 raise test.failureException(
2322 raise test.failureException(
2321 'server failed to start (HGPORT=%s)' % test._startport
2323 'server failed to start (HGPORT=%s)' % test._startport
2322 )
2324 )
2323
2325
2324 # handle interactive prompt without releasing iolock
2326 # handle interactive prompt without releasing iolock
2325 if self._options.interactive:
2327 if self._options.interactive:
2326 if test.readrefout() != expected:
2328 if test.readrefout() != expected:
2327 self.stream.write(
2329 self.stream.write(
2328 'Reference output has changed (run again to prompt '
2330 'Reference output has changed (run again to prompt '
2329 'changes)'
2331 'changes)'
2330 )
2332 )
2331 else:
2333 else:
2332 self.stream.write('Accept this change? [y/N] ')
2334 self.stream.write('Accept this change? [y/N] ')
2333 self.stream.flush()
2335 self.stream.flush()
2334 answer = sys.stdin.readline().strip()
2336 answer = sys.stdin.readline().strip()
2335 if answer.lower() in ('y', 'yes'):
2337 if answer.lower() in ('y', 'yes'):
2336 if test.path.endswith(b'.t'):
2338 if test.path.endswith(b'.t'):
2337 rename(test.errpath, test.path)
2339 rename(test.errpath, test.path)
2338 else:
2340 else:
2339 rename(test.errpath, b'%s.out' % test.path)
2341 rename(test.errpath, b'%s.out' % test.path)
2340 accepted = True
2342 accepted = True
2341 if not accepted:
2343 if not accepted:
2342 self.faildata[test.name] = b''.join(lines)
2344 self.faildata[test.name] = b''.join(lines)
2343
2345
2344 return accepted
2346 return accepted
2345
2347
2346 def startTest(self, test):
2348 def startTest(self, test):
2347 super(TestResult, self).startTest(test)
2349 super(TestResult, self).startTest(test)
2348
2350
2349 # os.times module computes the user time and system time spent by
2351 # os.times module computes the user time and system time spent by
2350 # child's processes along with real elapsed time taken by a process.
2352 # child's processes along with real elapsed time taken by a process.
2351 # This module has one limitation. It can only work for Linux user
2353 # This module has one limitation. It can only work for Linux user
2352 # and not for Windows. Hence why we fall back to another function
2354 # and not for Windows. Hence why we fall back to another function
2353 # for wall time calculations.
2355 # for wall time calculations.
2354 test.started_times = os.times()
2356 test.started_times = os.times()
2355 # TODO use a monotonic clock once support for Python 2.7 is dropped.
2357 # TODO use a monotonic clock once support for Python 2.7 is dropped.
2356 test.started_time = time.time()
2358 test.started_time = time.time()
2357 if self._firststarttime is None: # thread racy but irrelevant
2359 if self._firststarttime is None: # thread racy but irrelevant
2358 self._firststarttime = test.started_time
2360 self._firststarttime = test.started_time
2359
2361
2360 def stopTest(self, test, interrupted=False):
2362 def stopTest(self, test, interrupted=False):
2361 super(TestResult, self).stopTest(test)
2363 super(TestResult, self).stopTest(test)
2362
2364
2363 test.stopped_times = os.times()
2365 test.stopped_times = os.times()
2364 stopped_time = time.time()
2366 stopped_time = time.time()
2365
2367
2366 starttime = test.started_times
2368 starttime = test.started_times
2367 endtime = test.stopped_times
2369 endtime = test.stopped_times
2368 origin = self._firststarttime
2370 origin = self._firststarttime
2369 self.times.append(
2371 self.times.append(
2370 (
2372 (
2371 test.name,
2373 test.name,
2372 endtime[2] - starttime[2], # user space CPU time
2374 endtime[2] - starttime[2], # user space CPU time
2373 endtime[3] - starttime[3], # sys space CPU time
2375 endtime[3] - starttime[3], # sys space CPU time
2374 stopped_time - test.started_time, # real time
2376 stopped_time - test.started_time, # real time
2375 test.started_time - origin, # start date in run context
2377 test.started_time - origin, # start date in run context
2376 stopped_time - origin, # end date in run context
2378 stopped_time - origin, # end date in run context
2377 )
2379 )
2378 )
2380 )
2379
2381
2380 if interrupted:
2382 if interrupted:
2381 with iolock:
2383 with iolock:
2382 self.stream.writeln(
2384 self.stream.writeln(
2383 'INTERRUPTED: %s (after %d seconds)'
2385 'INTERRUPTED: %s (after %d seconds)'
2384 % (test.name, self.times[-1][3])
2386 % (test.name, self.times[-1][3])
2385 )
2387 )
2386
2388
2387
2389
2388 def getTestResult():
2390 def getTestResult():
2389 """
2391 """
2390 Returns the relevant test result
2392 Returns the relevant test result
2391 """
2393 """
2392 if "CUSTOM_TEST_RESULT" in os.environ:
2394 if "CUSTOM_TEST_RESULT" in os.environ:
2393 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
2395 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
2394 return testresultmodule.TestResult
2396 return testresultmodule.TestResult
2395 else:
2397 else:
2396 return TestResult
2398 return TestResult
2397
2399
2398
2400
2399 class TestSuite(unittest.TestSuite):
2401 class TestSuite(unittest.TestSuite):
2400 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
2402 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
2401
2403
2402 def __init__(
2404 def __init__(
2403 self,
2405 self,
2404 testdir,
2406 testdir,
2405 jobs=1,
2407 jobs=1,
2406 whitelist=None,
2408 whitelist=None,
2407 blacklist=None,
2409 blacklist=None,
2408 keywords=None,
2410 keywords=None,
2409 loop=False,
2411 loop=False,
2410 runs_per_test=1,
2412 runs_per_test=1,
2411 loadtest=None,
2413 loadtest=None,
2412 showchannels=False,
2414 showchannels=False,
2413 *args,
2415 *args,
2414 **kwargs
2416 **kwargs
2415 ):
2417 ):
2416 """Create a new instance that can run tests with a configuration.
2418 """Create a new instance that can run tests with a configuration.
2417
2419
2418 testdir specifies the directory where tests are executed from. This
2420 testdir specifies the directory where tests are executed from. This
2419 is typically the ``tests`` directory from Mercurial's source
2421 is typically the ``tests`` directory from Mercurial's source
2420 repository.
2422 repository.
2421
2423
2422 jobs specifies the number of jobs to run concurrently. Each test
2424 jobs specifies the number of jobs to run concurrently. Each test
2423 executes on its own thread. Tests actually spawn new processes, so
2425 executes on its own thread. Tests actually spawn new processes, so
2424 state mutation should not be an issue.
2426 state mutation should not be an issue.
2425
2427
2426 If there is only one job, it will use the main thread.
2428 If there is only one job, it will use the main thread.
2427
2429
2428 whitelist and blacklist denote tests that have been whitelisted and
2430 whitelist and blacklist denote tests that have been whitelisted and
2429 blacklisted, respectively. These arguments don't belong in TestSuite.
2431 blacklisted, respectively. These arguments don't belong in TestSuite.
2430 Instead, whitelist and blacklist should be handled by the thing that
2432 Instead, whitelist and blacklist should be handled by the thing that
2431 populates the TestSuite with tests. They are present to preserve
2433 populates the TestSuite with tests. They are present to preserve
2432 backwards compatible behavior which reports skipped tests as part
2434 backwards compatible behavior which reports skipped tests as part
2433 of the results.
2435 of the results.
2434
2436
2435 keywords denotes key words that will be used to filter which tests
2437 keywords denotes key words that will be used to filter which tests
2436 to execute. This arguably belongs outside of TestSuite.
2438 to execute. This arguably belongs outside of TestSuite.
2437
2439
2438 loop denotes whether to loop over tests forever.
2440 loop denotes whether to loop over tests forever.
2439 """
2441 """
2440 super(TestSuite, self).__init__(*args, **kwargs)
2442 super(TestSuite, self).__init__(*args, **kwargs)
2441
2443
2442 self._jobs = jobs
2444 self._jobs = jobs
2443 self._whitelist = whitelist
2445 self._whitelist = whitelist
2444 self._blacklist = blacklist
2446 self._blacklist = blacklist
2445 self._keywords = keywords
2447 self._keywords = keywords
2446 self._loop = loop
2448 self._loop = loop
2447 self._runs_per_test = runs_per_test
2449 self._runs_per_test = runs_per_test
2448 self._loadtest = loadtest
2450 self._loadtest = loadtest
2449 self._showchannels = showchannels
2451 self._showchannels = showchannels
2450
2452
2451 def run(self, result):
2453 def run(self, result):
2452 # We have a number of filters that need to be applied. We do this
2454 # We have a number of filters that need to be applied. We do this
2453 # here instead of inside Test because it makes the running logic for
2455 # here instead of inside Test because it makes the running logic for
2454 # Test simpler.
2456 # Test simpler.
2455 tests = []
2457 tests = []
2456 num_tests = [0]
2458 num_tests = [0]
2457 for test in self._tests:
2459 for test in self._tests:
2458
2460
2459 def get():
2461 def get():
2460 num_tests[0] += 1
2462 num_tests[0] += 1
2461 if getattr(test, 'should_reload', False):
2463 if getattr(test, 'should_reload', False):
2462 return self._loadtest(test, num_tests[0])
2464 return self._loadtest(test, num_tests[0])
2463 return test
2465 return test
2464
2466
2465 if not os.path.exists(test.path):
2467 if not os.path.exists(test.path):
2466 result.addSkip(test, "Doesn't exist")
2468 result.addSkip(test, "Doesn't exist")
2467 continue
2469 continue
2468
2470
2469 is_whitelisted = self._whitelist and (
2471 is_whitelisted = self._whitelist and (
2470 test.relpath in self._whitelist or test.bname in self._whitelist
2472 test.relpath in self._whitelist or test.bname in self._whitelist
2471 )
2473 )
2472 if not is_whitelisted:
2474 if not is_whitelisted:
2473 is_blacklisted = self._blacklist and (
2475 is_blacklisted = self._blacklist and (
2474 test.relpath in self._blacklist
2476 test.relpath in self._blacklist
2475 or test.bname in self._blacklist
2477 or test.bname in self._blacklist
2476 )
2478 )
2477 if is_blacklisted:
2479 if is_blacklisted:
2478 result.addSkip(test, 'blacklisted')
2480 result.addSkip(test, 'blacklisted')
2479 continue
2481 continue
2480 if self._keywords:
2482 if self._keywords:
2481 with open(test.path, 'rb') as f:
2483 with open(test.path, 'rb') as f:
2482 t = f.read().lower() + test.bname.lower()
2484 t = f.read().lower() + test.bname.lower()
2483 ignored = False
2485 ignored = False
2484 for k in self._keywords.lower().split():
2486 for k in self._keywords.lower().split():
2485 if k not in t:
2487 if k not in t:
2486 result.addIgnore(test, "doesn't match keyword")
2488 result.addIgnore(test, "doesn't match keyword")
2487 ignored = True
2489 ignored = True
2488 break
2490 break
2489
2491
2490 if ignored:
2492 if ignored:
2491 continue
2493 continue
2492 for _ in xrange(self._runs_per_test):
2494 for _ in xrange(self._runs_per_test):
2493 tests.append(get())
2495 tests.append(get())
2494
2496
2495 runtests = list(tests)
2497 runtests = list(tests)
2496 done = queue.Queue()
2498 done = queue.Queue()
2497 running = 0
2499 running = 0
2498
2500
2499 channels = [""] * self._jobs
2501 channels = [""] * self._jobs
2500
2502
2501 def job(test, result):
2503 def job(test, result):
2502 for n, v in enumerate(channels):
2504 for n, v in enumerate(channels):
2503 if not v:
2505 if not v:
2504 channel = n
2506 channel = n
2505 break
2507 break
2506 else:
2508 else:
2507 raise ValueError('Could not find output channel')
2509 raise ValueError('Could not find output channel')
2508 channels[channel] = "=" + test.name[5:].split(".")[0]
2510 channels[channel] = "=" + test.name[5:].split(".")[0]
2509 try:
2511 try:
2510 test(result)
2512 test(result)
2511 done.put(None)
2513 done.put(None)
2512 except KeyboardInterrupt:
2514 except KeyboardInterrupt:
2513 pass
2515 pass
2514 except: # re-raises
2516 except: # re-raises
2515 done.put(('!', test, 'run-test raised an error, see traceback'))
2517 done.put(('!', test, 'run-test raised an error, see traceback'))
2516 raise
2518 raise
2517 finally:
2519 finally:
2518 try:
2520 try:
2519 channels[channel] = ''
2521 channels[channel] = ''
2520 except IndexError:
2522 except IndexError:
2521 pass
2523 pass
2522
2524
2523 def stat():
2525 def stat():
2524 count = 0
2526 count = 0
2525 while channels:
2527 while channels:
2526 d = '\n%03s ' % count
2528 d = '\n%03s ' % count
2527 for n, v in enumerate(channels):
2529 for n, v in enumerate(channels):
2528 if v:
2530 if v:
2529 d += v[0]
2531 d += v[0]
2530 channels[n] = v[1:] or '.'
2532 channels[n] = v[1:] or '.'
2531 else:
2533 else:
2532 d += ' '
2534 d += ' '
2533 d += ' '
2535 d += ' '
2534 with iolock:
2536 with iolock:
2535 sys.stdout.write(d + ' ')
2537 sys.stdout.write(d + ' ')
2536 sys.stdout.flush()
2538 sys.stdout.flush()
2537 for x in xrange(10):
2539 for x in xrange(10):
2538 if channels:
2540 if channels:
2539 time.sleep(0.1)
2541 time.sleep(0.1)
2540 count += 1
2542 count += 1
2541
2543
2542 stoppedearly = False
2544 stoppedearly = False
2543
2545
2544 if self._showchannels:
2546 if self._showchannels:
2545 statthread = threading.Thread(target=stat, name="stat")
2547 statthread = threading.Thread(target=stat, name="stat")
2546 statthread.start()
2548 statthread.start()
2547
2549
2548 try:
2550 try:
2549 while tests or running:
2551 while tests or running:
2550 if not done.empty() or running == self._jobs or not tests:
2552 if not done.empty() or running == self._jobs or not tests:
2551 try:
2553 try:
2552 done.get(True, 1)
2554 done.get(True, 1)
2553 running -= 1
2555 running -= 1
2554 if result and result.shouldStop:
2556 if result and result.shouldStop:
2555 stoppedearly = True
2557 stoppedearly = True
2556 break
2558 break
2557 except queue.Empty:
2559 except queue.Empty:
2558 continue
2560 continue
2559 if tests and not running == self._jobs:
2561 if tests and not running == self._jobs:
2560 test = tests.pop(0)
2562 test = tests.pop(0)
2561 if self._loop:
2563 if self._loop:
2562 if getattr(test, 'should_reload', False):
2564 if getattr(test, 'should_reload', False):
2563 num_tests[0] += 1
2565 num_tests[0] += 1
2564 tests.append(self._loadtest(test, num_tests[0]))
2566 tests.append(self._loadtest(test, num_tests[0]))
2565 else:
2567 else:
2566 tests.append(test)
2568 tests.append(test)
2567 if self._jobs == 1:
2569 if self._jobs == 1:
2568 job(test, result)
2570 job(test, result)
2569 else:
2571 else:
2570 t = threading.Thread(
2572 t = threading.Thread(
2571 target=job, name=test.name, args=(test, result)
2573 target=job, name=test.name, args=(test, result)
2572 )
2574 )
2573 t.start()
2575 t.start()
2574 running += 1
2576 running += 1
2575
2577
2576 # If we stop early we still need to wait on started tests to
2578 # If we stop early we still need to wait on started tests to
2577 # finish. Otherwise, there is a race between the test completing
2579 # finish. Otherwise, there is a race between the test completing
2578 # and the test's cleanup code running. This could result in the
2580 # and the test's cleanup code running. This could result in the
2579 # test reporting incorrect.
2581 # test reporting incorrect.
2580 if stoppedearly:
2582 if stoppedearly:
2581 while running:
2583 while running:
2582 try:
2584 try:
2583 done.get(True, 1)
2585 done.get(True, 1)
2584 running -= 1
2586 running -= 1
2585 except queue.Empty:
2587 except queue.Empty:
2586 continue
2588 continue
2587 except KeyboardInterrupt:
2589 except KeyboardInterrupt:
2588 for test in runtests:
2590 for test in runtests:
2589 test.abort()
2591 test.abort()
2590
2592
2591 channels = []
2593 channels = []
2592
2594
2593 return result
2595 return result
2594
2596
2595
2597
2596 # Save the most recent 5 wall-clock runtimes of each test to a
2598 # Save the most recent 5 wall-clock runtimes of each test to a
2597 # human-readable text file named .testtimes. Tests are sorted
2599 # human-readable text file named .testtimes. Tests are sorted
2598 # alphabetically, while times for each test are listed from oldest to
2600 # alphabetically, while times for each test are listed from oldest to
2599 # newest.
2601 # newest.
2600
2602
2601
2603
2602 def loadtimes(outputdir):
2604 def loadtimes(outputdir):
2603 times = []
2605 times = []
2604 try:
2606 try:
2605 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2607 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2606 for line in fp:
2608 for line in fp:
2607 m = re.match('(.*?) ([0-9. ]+)', line)
2609 m = re.match('(.*?) ([0-9. ]+)', line)
2608 times.append(
2610 times.append(
2609 (m.group(1), [float(t) for t in m.group(2).split()])
2611 (m.group(1), [float(t) for t in m.group(2).split()])
2610 )
2612 )
2611 except IOError as err:
2613 except IOError as err:
2612 if err.errno != errno.ENOENT:
2614 if err.errno != errno.ENOENT:
2613 raise
2615 raise
2614 return times
2616 return times
2615
2617
2616
2618
2617 def savetimes(outputdir, result):
2619 def savetimes(outputdir, result):
2618 saved = dict(loadtimes(outputdir))
2620 saved = dict(loadtimes(outputdir))
2619 maxruns = 5
2621 maxruns = 5
2620 skipped = {str(t[0]) for t in result.skipped}
2622 skipped = {str(t[0]) for t in result.skipped}
2621 for tdata in result.times:
2623 for tdata in result.times:
2622 test, real = tdata[0], tdata[3]
2624 test, real = tdata[0], tdata[3]
2623 if test not in skipped:
2625 if test not in skipped:
2624 ts = saved.setdefault(test, [])
2626 ts = saved.setdefault(test, [])
2625 ts.append(real)
2627 ts.append(real)
2626 ts[:] = ts[-maxruns:]
2628 ts[:] = ts[-maxruns:]
2627
2629
2628 fd, tmpname = tempfile.mkstemp(
2630 fd, tmpname = tempfile.mkstemp(
2629 prefix=b'.testtimes', dir=outputdir, text=True
2631 prefix=b'.testtimes', dir=outputdir, text=True
2630 )
2632 )
2631 with os.fdopen(fd, 'w') as fp:
2633 with os.fdopen(fd, 'w') as fp:
2632 for name, ts in sorted(saved.items()):
2634 for name, ts in sorted(saved.items()):
2633 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2635 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2634 timepath = os.path.join(outputdir, b'.testtimes')
2636 timepath = os.path.join(outputdir, b'.testtimes')
2635 try:
2637 try:
2636 os.unlink(timepath)
2638 os.unlink(timepath)
2637 except OSError:
2639 except OSError:
2638 pass
2640 pass
2639 try:
2641 try:
2640 os.rename(tmpname, timepath)
2642 os.rename(tmpname, timepath)
2641 except OSError:
2643 except OSError:
2642 pass
2644 pass
2643
2645
2644
2646
2645 class TextTestRunner(unittest.TextTestRunner):
2647 class TextTestRunner(unittest.TextTestRunner):
2646 """Custom unittest test runner that uses appropriate settings."""
2648 """Custom unittest test runner that uses appropriate settings."""
2647
2649
2648 def __init__(self, runner, *args, **kwargs):
2650 def __init__(self, runner, *args, **kwargs):
2649 super(TextTestRunner, self).__init__(*args, **kwargs)
2651 super(TextTestRunner, self).__init__(*args, **kwargs)
2650
2652
2651 self._runner = runner
2653 self._runner = runner
2652
2654
2653 self._result = getTestResult()(
2655 self._result = getTestResult()(
2654 self._runner.options, self.stream, self.descriptions, self.verbosity
2656 self._runner.options, self.stream, self.descriptions, self.verbosity
2655 )
2657 )
2656
2658
2657 def listtests(self, test):
2659 def listtests(self, test):
2658 test = sorted(test, key=lambda t: t.name)
2660 test = sorted(test, key=lambda t: t.name)
2659
2661
2660 self._result.onStart(test)
2662 self._result.onStart(test)
2661
2663
2662 for t in test:
2664 for t in test:
2663 print(t.name)
2665 print(t.name)
2664 self._result.addSuccess(t)
2666 self._result.addSuccess(t)
2665
2667
2666 if self._runner.options.xunit:
2668 if self._runner.options.xunit:
2667 with open(self._runner.options.xunit, "wb") as xuf:
2669 with open(self._runner.options.xunit, "wb") as xuf:
2668 self._writexunit(self._result, xuf)
2670 self._writexunit(self._result, xuf)
2669
2671
2670 if self._runner.options.json:
2672 if self._runner.options.json:
2671 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2673 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2672 with open(jsonpath, 'w') as fp:
2674 with open(jsonpath, 'w') as fp:
2673 self._writejson(self._result, fp)
2675 self._writejson(self._result, fp)
2674
2676
2675 return self._result
2677 return self._result
2676
2678
2677 def run(self, test):
2679 def run(self, test):
2678 self._result.onStart(test)
2680 self._result.onStart(test)
2679 test(self._result)
2681 test(self._result)
2680
2682
2681 failed = len(self._result.failures)
2683 failed = len(self._result.failures)
2682 skipped = len(self._result.skipped)
2684 skipped = len(self._result.skipped)
2683 ignored = len(self._result.ignored)
2685 ignored = len(self._result.ignored)
2684
2686
2685 with iolock:
2687 with iolock:
2686 self.stream.writeln('')
2688 self.stream.writeln('')
2687
2689
2688 if not self._runner.options.noskips:
2690 if not self._runner.options.noskips:
2689 for test, msg in sorted(
2691 for test, msg in sorted(
2690 self._result.skipped, key=lambda s: s[0].name
2692 self._result.skipped, key=lambda s: s[0].name
2691 ):
2693 ):
2692 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2694 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2693 msg = highlightmsg(formatted, self._result.color)
2695 msg = highlightmsg(formatted, self._result.color)
2694 self.stream.write(msg)
2696 self.stream.write(msg)
2695 for test, msg in sorted(
2697 for test, msg in sorted(
2696 self._result.failures, key=lambda f: f[0].name
2698 self._result.failures, key=lambda f: f[0].name
2697 ):
2699 ):
2698 formatted = 'Failed %s: %s\n' % (test.name, msg)
2700 formatted = 'Failed %s: %s\n' % (test.name, msg)
2699 self.stream.write(highlightmsg(formatted, self._result.color))
2701 self.stream.write(highlightmsg(formatted, self._result.color))
2700 for test, msg in sorted(
2702 for test, msg in sorted(
2701 self._result.errors, key=lambda e: e[0].name
2703 self._result.errors, key=lambda e: e[0].name
2702 ):
2704 ):
2703 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2705 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2704
2706
2705 if self._runner.options.xunit:
2707 if self._runner.options.xunit:
2706 with open(self._runner.options.xunit, "wb") as xuf:
2708 with open(self._runner.options.xunit, "wb") as xuf:
2707 self._writexunit(self._result, xuf)
2709 self._writexunit(self._result, xuf)
2708
2710
2709 if self._runner.options.json:
2711 if self._runner.options.json:
2710 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2712 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2711 with open(jsonpath, 'w') as fp:
2713 with open(jsonpath, 'w') as fp:
2712 self._writejson(self._result, fp)
2714 self._writejson(self._result, fp)
2713
2715
2714 self._runner._checkhglib('Tested')
2716 self._runner._checkhglib('Tested')
2715
2717
2716 savetimes(self._runner._outputdir, self._result)
2718 savetimes(self._runner._outputdir, self._result)
2717
2719
2718 if failed and self._runner.options.known_good_rev:
2720 if failed and self._runner.options.known_good_rev:
2719 self._bisecttests(t for t, m in self._result.failures)
2721 self._bisecttests(t for t, m in self._result.failures)
2720 self.stream.writeln(
2722 self.stream.writeln(
2721 '# Ran %d tests, %d skipped, %d failed.'
2723 '# Ran %d tests, %d skipped, %d failed.'
2722 % (self._result.testsRun, skipped + ignored, failed)
2724 % (self._result.testsRun, skipped + ignored, failed)
2723 )
2725 )
2724 if failed:
2726 if failed:
2725 self.stream.writeln(
2727 self.stream.writeln(
2726 'python hash seed: %s' % os.environ['PYTHONHASHSEED']
2728 'python hash seed: %s' % os.environ['PYTHONHASHSEED']
2727 )
2729 )
2728 if self._runner.options.time:
2730 if self._runner.options.time:
2729 self.printtimes(self._result.times)
2731 self.printtimes(self._result.times)
2730
2732
2731 if self._runner.options.exceptions:
2733 if self._runner.options.exceptions:
2732 exceptions = aggregateexceptions(
2734 exceptions = aggregateexceptions(
2733 os.path.join(self._runner._outputdir, b'exceptions')
2735 os.path.join(self._runner._outputdir, b'exceptions')
2734 )
2736 )
2735
2737
2736 self.stream.writeln('Exceptions Report:')
2738 self.stream.writeln('Exceptions Report:')
2737 self.stream.writeln(
2739 self.stream.writeln(
2738 '%d total from %d frames'
2740 '%d total from %d frames'
2739 % (exceptions['total'], len(exceptions['exceptioncounts']))
2741 % (exceptions['total'], len(exceptions['exceptioncounts']))
2740 )
2742 )
2741 combined = exceptions['combined']
2743 combined = exceptions['combined']
2742 for key in sorted(combined, key=combined.get, reverse=True):
2744 for key in sorted(combined, key=combined.get, reverse=True):
2743 frame, line, exc = key
2745 frame, line, exc = key
2744 totalcount, testcount, leastcount, leasttest = combined[key]
2746 totalcount, testcount, leastcount, leasttest = combined[key]
2745
2747
2746 self.stream.writeln(
2748 self.stream.writeln(
2747 '%d (%d tests)\t%s: %s (%s - %d total)'
2749 '%d (%d tests)\t%s: %s (%s - %d total)'
2748 % (
2750 % (
2749 totalcount,
2751 totalcount,
2750 testcount,
2752 testcount,
2751 frame,
2753 frame,
2752 exc,
2754 exc,
2753 leasttest,
2755 leasttest,
2754 leastcount,
2756 leastcount,
2755 )
2757 )
2756 )
2758 )
2757
2759
2758 self.stream.flush()
2760 self.stream.flush()
2759
2761
2760 return self._result
2762 return self._result
2761
2763
2762 def _bisecttests(self, tests):
2764 def _bisecttests(self, tests):
2763 bisectcmd = ['hg', 'bisect']
2765 bisectcmd = ['hg', 'bisect']
2764 bisectrepo = self._runner.options.bisect_repo
2766 bisectrepo = self._runner.options.bisect_repo
2765 if bisectrepo:
2767 if bisectrepo:
2766 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2768 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2767
2769
2768 def pread(args):
2770 def pread(args):
2769 env = os.environ.copy()
2771 env = os.environ.copy()
2770 env['HGPLAIN'] = '1'
2772 env['HGPLAIN'] = '1'
2771 p = subprocess.Popen(
2773 p = subprocess.Popen(
2772 args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env
2774 args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env
2773 )
2775 )
2774 data = p.stdout.read()
2776 data = p.stdout.read()
2775 p.wait()
2777 p.wait()
2776 return data
2778 return data
2777
2779
2778 for test in tests:
2780 for test in tests:
2779 pread(bisectcmd + ['--reset']),
2781 pread(bisectcmd + ['--reset']),
2780 pread(bisectcmd + ['--bad', '.'])
2782 pread(bisectcmd + ['--bad', '.'])
2781 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2783 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2782 # TODO: we probably need to forward more options
2784 # TODO: we probably need to forward more options
2783 # that alter hg's behavior inside the tests.
2785 # that alter hg's behavior inside the tests.
2784 opts = ''
2786 opts = ''
2785 withhg = self._runner.options.with_hg
2787 withhg = self._runner.options.with_hg
2786 if withhg:
2788 if withhg:
2787 opts += ' --with-hg=%s ' % shellquote(_bytes2sys(withhg))
2789 opts += ' --with-hg=%s ' % shellquote(_bytes2sys(withhg))
2788 rtc = '%s %s %s %s' % (sysexecutable, sys.argv[0], opts, test)
2790 rtc = '%s %s %s %s' % (sysexecutable, sys.argv[0], opts, test)
2789 data = pread(bisectcmd + ['--command', rtc])
2791 data = pread(bisectcmd + ['--command', rtc])
2790 m = re.search(
2792 m = re.search(
2791 (
2793 (
2792 br'\nThe first (?P<goodbad>bad|good) revision '
2794 br'\nThe first (?P<goodbad>bad|good) revision '
2793 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2795 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2794 br'summary: +(?P<summary>[^\n]+)\n'
2796 br'summary: +(?P<summary>[^\n]+)\n'
2795 ),
2797 ),
2796 data,
2798 data,
2797 (re.MULTILINE | re.DOTALL),
2799 (re.MULTILINE | re.DOTALL),
2798 )
2800 )
2799 if m is None:
2801 if m is None:
2800 self.stream.writeln(
2802 self.stream.writeln(
2801 'Failed to identify failure point for %s' % test
2803 'Failed to identify failure point for %s' % test
2802 )
2804 )
2803 continue
2805 continue
2804 dat = m.groupdict()
2806 dat = m.groupdict()
2805 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2807 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2806 self.stream.writeln(
2808 self.stream.writeln(
2807 '%s %s by %s (%s)'
2809 '%s %s by %s (%s)'
2808 % (
2810 % (
2809 test,
2811 test,
2810 verb,
2812 verb,
2811 dat['node'].decode('ascii'),
2813 dat['node'].decode('ascii'),
2812 dat['summary'].decode('utf8', 'ignore'),
2814 dat['summary'].decode('utf8', 'ignore'),
2813 )
2815 )
2814 )
2816 )
2815
2817
2816 def printtimes(self, times):
2818 def printtimes(self, times):
2817 # iolock held by run
2819 # iolock held by run
2818 self.stream.writeln('# Producing time report')
2820 self.stream.writeln('# Producing time report')
2819 times.sort(key=lambda t: (t[3]))
2821 times.sort(key=lambda t: (t[3]))
2820 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2822 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2821 self.stream.writeln(
2823 self.stream.writeln(
2822 '%-7s %-7s %-7s %-7s %-7s %s'
2824 '%-7s %-7s %-7s %-7s %-7s %s'
2823 % ('start', 'end', 'cuser', 'csys', 'real', 'Test')
2825 % ('start', 'end', 'cuser', 'csys', 'real', 'Test')
2824 )
2826 )
2825 for tdata in times:
2827 for tdata in times:
2826 test = tdata[0]
2828 test = tdata[0]
2827 cuser, csys, real, start, end = tdata[1:6]
2829 cuser, csys, real, start, end = tdata[1:6]
2828 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2830 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2829
2831
2830 @staticmethod
2832 @staticmethod
2831 def _writexunit(result, outf):
2833 def _writexunit(result, outf):
2832 # See http://llg.cubic.org/docs/junit/ for a reference.
2834 # See http://llg.cubic.org/docs/junit/ for a reference.
2833 timesd = {t[0]: t[3] for t in result.times}
2835 timesd = {t[0]: t[3] for t in result.times}
2834 doc = minidom.Document()
2836 doc = minidom.Document()
2835 s = doc.createElement('testsuite')
2837 s = doc.createElement('testsuite')
2836 s.setAttribute('errors', "0") # TODO
2838 s.setAttribute('errors', "0") # TODO
2837 s.setAttribute('failures', str(len(result.failures)))
2839 s.setAttribute('failures', str(len(result.failures)))
2838 s.setAttribute('name', 'run-tests')
2840 s.setAttribute('name', 'run-tests')
2839 s.setAttribute(
2841 s.setAttribute(
2840 'skipped', str(len(result.skipped) + len(result.ignored))
2842 'skipped', str(len(result.skipped) + len(result.ignored))
2841 )
2843 )
2842 s.setAttribute('tests', str(result.testsRun))
2844 s.setAttribute('tests', str(result.testsRun))
2843 doc.appendChild(s)
2845 doc.appendChild(s)
2844 for tc in result.successes:
2846 for tc in result.successes:
2845 t = doc.createElement('testcase')
2847 t = doc.createElement('testcase')
2846 t.setAttribute('name', tc.name)
2848 t.setAttribute('name', tc.name)
2847 tctime = timesd.get(tc.name)
2849 tctime = timesd.get(tc.name)
2848 if tctime is not None:
2850 if tctime is not None:
2849 t.setAttribute('time', '%.3f' % tctime)
2851 t.setAttribute('time', '%.3f' % tctime)
2850 s.appendChild(t)
2852 s.appendChild(t)
2851 for tc, err in sorted(result.faildata.items()):
2853 for tc, err in sorted(result.faildata.items()):
2852 t = doc.createElement('testcase')
2854 t = doc.createElement('testcase')
2853 t.setAttribute('name', tc)
2855 t.setAttribute('name', tc)
2854 tctime = timesd.get(tc)
2856 tctime = timesd.get(tc)
2855 if tctime is not None:
2857 if tctime is not None:
2856 t.setAttribute('time', '%.3f' % tctime)
2858 t.setAttribute('time', '%.3f' % tctime)
2857 # createCDATASection expects a unicode or it will
2859 # createCDATASection expects a unicode or it will
2858 # convert using default conversion rules, which will
2860 # convert using default conversion rules, which will
2859 # fail if string isn't ASCII.
2861 # fail if string isn't ASCII.
2860 err = cdatasafe(err).decode('utf-8', 'replace')
2862 err = cdatasafe(err).decode('utf-8', 'replace')
2861 cd = doc.createCDATASection(err)
2863 cd = doc.createCDATASection(err)
2862 # Use 'failure' here instead of 'error' to match errors = 0,
2864 # Use 'failure' here instead of 'error' to match errors = 0,
2863 # failures = len(result.failures) in the testsuite element.
2865 # failures = len(result.failures) in the testsuite element.
2864 failelem = doc.createElement('failure')
2866 failelem = doc.createElement('failure')
2865 failelem.setAttribute('message', 'output changed')
2867 failelem.setAttribute('message', 'output changed')
2866 failelem.setAttribute('type', 'output-mismatch')
2868 failelem.setAttribute('type', 'output-mismatch')
2867 failelem.appendChild(cd)
2869 failelem.appendChild(cd)
2868 t.appendChild(failelem)
2870 t.appendChild(failelem)
2869 s.appendChild(t)
2871 s.appendChild(t)
2870 for tc, message in result.skipped:
2872 for tc, message in result.skipped:
2871 # According to the schema, 'skipped' has no attributes. So store
2873 # According to the schema, 'skipped' has no attributes. So store
2872 # the skip message as a text node instead.
2874 # the skip message as a text node instead.
2873 t = doc.createElement('testcase')
2875 t = doc.createElement('testcase')
2874 t.setAttribute('name', tc.name)
2876 t.setAttribute('name', tc.name)
2875 binmessage = message.encode('utf-8')
2877 binmessage = message.encode('utf-8')
2876 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2878 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2877 cd = doc.createCDATASection(message)
2879 cd = doc.createCDATASection(message)
2878 skipelem = doc.createElement('skipped')
2880 skipelem = doc.createElement('skipped')
2879 skipelem.appendChild(cd)
2881 skipelem.appendChild(cd)
2880 t.appendChild(skipelem)
2882 t.appendChild(skipelem)
2881 s.appendChild(t)
2883 s.appendChild(t)
2882 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2884 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2883
2885
2884 @staticmethod
2886 @staticmethod
2885 def _writejson(result, outf):
2887 def _writejson(result, outf):
2886 timesd = {}
2888 timesd = {}
2887 for tdata in result.times:
2889 for tdata in result.times:
2888 test = tdata[0]
2890 test = tdata[0]
2889 timesd[test] = tdata[1:]
2891 timesd[test] = tdata[1:]
2890
2892
2891 outcome = {}
2893 outcome = {}
2892 groups = [
2894 groups = [
2893 ('success', ((tc, None) for tc in result.successes)),
2895 ('success', ((tc, None) for tc in result.successes)),
2894 ('failure', result.failures),
2896 ('failure', result.failures),
2895 ('skip', result.skipped),
2897 ('skip', result.skipped),
2896 ]
2898 ]
2897 for res, testcases in groups:
2899 for res, testcases in groups:
2898 for tc, __ in testcases:
2900 for tc, __ in testcases:
2899 if tc.name in timesd:
2901 if tc.name in timesd:
2900 diff = result.faildata.get(tc.name, b'')
2902 diff = result.faildata.get(tc.name, b'')
2901 try:
2903 try:
2902 diff = diff.decode('unicode_escape')
2904 diff = diff.decode('unicode_escape')
2903 except UnicodeDecodeError as e:
2905 except UnicodeDecodeError as e:
2904 diff = '%r decoding diff, sorry' % e
2906 diff = '%r decoding diff, sorry' % e
2905 tres = {
2907 tres = {
2906 'result': res,
2908 'result': res,
2907 'time': ('%0.3f' % timesd[tc.name][2]),
2909 'time': ('%0.3f' % timesd[tc.name][2]),
2908 'cuser': ('%0.3f' % timesd[tc.name][0]),
2910 'cuser': ('%0.3f' % timesd[tc.name][0]),
2909 'csys': ('%0.3f' % timesd[tc.name][1]),
2911 'csys': ('%0.3f' % timesd[tc.name][1]),
2910 'start': ('%0.3f' % timesd[tc.name][3]),
2912 'start': ('%0.3f' % timesd[tc.name][3]),
2911 'end': ('%0.3f' % timesd[tc.name][4]),
2913 'end': ('%0.3f' % timesd[tc.name][4]),
2912 'diff': diff,
2914 'diff': diff,
2913 }
2915 }
2914 else:
2916 else:
2915 # blacklisted test
2917 # blacklisted test
2916 tres = {'result': res}
2918 tres = {'result': res}
2917
2919
2918 outcome[tc.name] = tres
2920 outcome[tc.name] = tres
2919 jsonout = json.dumps(
2921 jsonout = json.dumps(
2920 outcome, sort_keys=True, indent=4, separators=(',', ': ')
2922 outcome, sort_keys=True, indent=4, separators=(',', ': ')
2921 )
2923 )
2922 outf.writelines(("testreport =", jsonout))
2924 outf.writelines(("testreport =", jsonout))
2923
2925
2924
2926
2925 def sorttests(testdescs, previoustimes, shuffle=False):
2927 def sorttests(testdescs, previoustimes, shuffle=False):
2926 """Do an in-place sort of tests."""
2928 """Do an in-place sort of tests."""
2927 if shuffle:
2929 if shuffle:
2928 random.shuffle(testdescs)
2930 random.shuffle(testdescs)
2929 return
2931 return
2930
2932
2931 if previoustimes:
2933 if previoustimes:
2932
2934
2933 def sortkey(f):
2935 def sortkey(f):
2934 f = f['path']
2936 f = f['path']
2935 if f in previoustimes:
2937 if f in previoustimes:
2936 # Use most recent time as estimate
2938 # Use most recent time as estimate
2937 return -(previoustimes[f][-1])
2939 return -(previoustimes[f][-1])
2938 else:
2940 else:
2939 # Default to a rather arbitrary value of 1 second for new tests
2941 # Default to a rather arbitrary value of 1 second for new tests
2940 return -1.0
2942 return -1.0
2941
2943
2942 else:
2944 else:
2943 # keywords for slow tests
2945 # keywords for slow tests
2944 slow = {
2946 slow = {
2945 b'svn': 10,
2947 b'svn': 10,
2946 b'cvs': 10,
2948 b'cvs': 10,
2947 b'hghave': 10,
2949 b'hghave': 10,
2948 b'largefiles-update': 10,
2950 b'largefiles-update': 10,
2949 b'run-tests': 10,
2951 b'run-tests': 10,
2950 b'corruption': 10,
2952 b'corruption': 10,
2951 b'race': 10,
2953 b'race': 10,
2952 b'i18n': 10,
2954 b'i18n': 10,
2953 b'check': 100,
2955 b'check': 100,
2954 b'gendoc': 100,
2956 b'gendoc': 100,
2955 b'contrib-perf': 200,
2957 b'contrib-perf': 200,
2956 b'merge-combination': 100,
2958 b'merge-combination': 100,
2957 }
2959 }
2958 perf = {}
2960 perf = {}
2959
2961
2960 def sortkey(f):
2962 def sortkey(f):
2961 # run largest tests first, as they tend to take the longest
2963 # run largest tests first, as they tend to take the longest
2962 f = f['path']
2964 f = f['path']
2963 try:
2965 try:
2964 return perf[f]
2966 return perf[f]
2965 except KeyError:
2967 except KeyError:
2966 try:
2968 try:
2967 val = -os.stat(f).st_size
2969 val = -os.stat(f).st_size
2968 except OSError as e:
2970 except OSError as e:
2969 if e.errno != errno.ENOENT:
2971 if e.errno != errno.ENOENT:
2970 raise
2972 raise
2971 perf[f] = -1e9 # file does not exist, tell early
2973 perf[f] = -1e9 # file does not exist, tell early
2972 return -1e9
2974 return -1e9
2973 for kw, mul in slow.items():
2975 for kw, mul in slow.items():
2974 if kw in f:
2976 if kw in f:
2975 val *= mul
2977 val *= mul
2976 if f.endswith(b'.py'):
2978 if f.endswith(b'.py'):
2977 val /= 10.0
2979 val /= 10.0
2978 perf[f] = val / 1000.0
2980 perf[f] = val / 1000.0
2979 return perf[f]
2981 return perf[f]
2980
2982
2981 testdescs.sort(key=sortkey)
2983 testdescs.sort(key=sortkey)
2982
2984
2983
2985
2984 class TestRunner(object):
2986 class TestRunner(object):
2985 """Holds context for executing tests.
2987 """Holds context for executing tests.
2986
2988
2987 Tests rely on a lot of state. This object holds it for them.
2989 Tests rely on a lot of state. This object holds it for them.
2988 """
2990 """
2989
2991
2990 # Programs required to run tests.
2992 # Programs required to run tests.
2991 REQUIREDTOOLS = [
2993 REQUIREDTOOLS = [
2992 b'diff',
2994 b'diff',
2993 b'grep',
2995 b'grep',
2994 b'unzip',
2996 b'unzip',
2995 b'gunzip',
2997 b'gunzip',
2996 b'bunzip2',
2998 b'bunzip2',
2997 b'sed',
2999 b'sed',
2998 ]
3000 ]
2999
3001
3000 # Maps file extensions to test class.
3002 # Maps file extensions to test class.
3001 TESTTYPES = [
3003 TESTTYPES = [
3002 (b'.py', PythonTest),
3004 (b'.py', PythonTest),
3003 (b'.t', TTest),
3005 (b'.t', TTest),
3004 ]
3006 ]
3005
3007
3006 def __init__(self):
3008 def __init__(self):
3007 self.options = None
3009 self.options = None
3008 self._hgroot = None
3010 self._hgroot = None
3009 self._testdir = None
3011 self._testdir = None
3010 self._outputdir = None
3012 self._outputdir = None
3011 self._hgtmp = None
3013 self._hgtmp = None
3012 self._installdir = None
3014 self._installdir = None
3013 self._bindir = None
3015 self._bindir = None
3014 self._tmpbindir = None
3016 self._tmpbindir = None
3015 self._pythondir = None
3017 self._pythondir = None
3016 self._coveragefile = None
3018 self._coveragefile = None
3017 self._createdfiles = []
3019 self._createdfiles = []
3018 self._hgcommand = None
3020 self._hgcommand = None
3019 self._hgpath = None
3021 self._hgpath = None
3020 self._portoffset = 0
3022 self._portoffset = 0
3021 self._ports = {}
3023 self._ports = {}
3022
3024
3023 def run(self, args, parser=None):
3025 def run(self, args, parser=None):
3024 """Run the test suite."""
3026 """Run the test suite."""
3025 oldmask = os.umask(0o22)
3027 oldmask = os.umask(0o22)
3026 try:
3028 try:
3027 parser = parser or getparser()
3029 parser = parser or getparser()
3028 options = parseargs(args, parser)
3030 options = parseargs(args, parser)
3029 tests = [_sys2bytes(a) for a in options.tests]
3031 tests = [_sys2bytes(a) for a in options.tests]
3030 if options.test_list is not None:
3032 if options.test_list is not None:
3031 for listfile in options.test_list:
3033 for listfile in options.test_list:
3032 with open(listfile, 'rb') as f:
3034 with open(listfile, 'rb') as f:
3033 tests.extend(t for t in f.read().splitlines() if t)
3035 tests.extend(t for t in f.read().splitlines() if t)
3034 self.options = options
3036 self.options = options
3035
3037
3036 self._checktools()
3038 self._checktools()
3037 testdescs = self.findtests(tests)
3039 testdescs = self.findtests(tests)
3038 if options.profile_runner:
3040 if options.profile_runner:
3039 import statprof
3041 import statprof
3040
3042
3041 statprof.start()
3043 statprof.start()
3042 result = self._run(testdescs)
3044 result = self._run(testdescs)
3043 if options.profile_runner:
3045 if options.profile_runner:
3044 statprof.stop()
3046 statprof.stop()
3045 statprof.display()
3047 statprof.display()
3046 return result
3048 return result
3047
3049
3048 finally:
3050 finally:
3049 os.umask(oldmask)
3051 os.umask(oldmask)
3050
3052
3051 def _run(self, testdescs):
3053 def _run(self, testdescs):
3052 testdir = getcwdb()
3054 testdir = getcwdb()
3053 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
3055 self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
3054 # assume all tests in same folder for now
3056 # assume all tests in same folder for now
3055 if testdescs:
3057 if testdescs:
3056 pathname = os.path.dirname(testdescs[0]['path'])
3058 pathname = os.path.dirname(testdescs[0]['path'])
3057 if pathname:
3059 if pathname:
3058 testdir = os.path.join(testdir, pathname)
3060 testdir = os.path.join(testdir, pathname)
3059 self._testdir = osenvironb[b'TESTDIR'] = testdir
3061 self._testdir = osenvironb[b'TESTDIR'] = testdir
3060 if self.options.outputdir:
3062 if self.options.outputdir:
3061 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3063 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3062 else:
3064 else:
3063 self._outputdir = getcwdb()
3065 self._outputdir = getcwdb()
3064 if testdescs and pathname:
3066 if testdescs and pathname:
3065 self._outputdir = os.path.join(self._outputdir, pathname)
3067 self._outputdir = os.path.join(self._outputdir, pathname)
3066 previoustimes = {}
3068 previoustimes = {}
3067 if self.options.order_by_runtime:
3069 if self.options.order_by_runtime:
3068 previoustimes = dict(loadtimes(self._outputdir))
3070 previoustimes = dict(loadtimes(self._outputdir))
3069 sorttests(testdescs, previoustimes, shuffle=self.options.random)
3071 sorttests(testdescs, previoustimes, shuffle=self.options.random)
3070
3072
3071 if 'PYTHONHASHSEED' not in os.environ:
3073 if 'PYTHONHASHSEED' not in os.environ:
3072 # use a random python hash seed all the time
3074 # use a random python hash seed all the time
3073 # we do the randomness ourself to know what seed is used
3075 # we do the randomness ourself to know what seed is used
3074 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
3076 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
3075
3077
3076 # Rayon (Rust crate for multi-threading) will use all logical CPU cores
3078 # Rayon (Rust crate for multi-threading) will use all logical CPU cores
3077 # by default, causing thrashing on high-cpu-count systems.
3079 # by default, causing thrashing on high-cpu-count systems.
3078 # Setting its limit to 3 during tests should still let us uncover
3080 # Setting its limit to 3 during tests should still let us uncover
3079 # multi-threading bugs while keeping the thrashing reasonable.
3081 # multi-threading bugs while keeping the thrashing reasonable.
3080 os.environ.setdefault("RAYON_NUM_THREADS", "3")
3082 os.environ.setdefault("RAYON_NUM_THREADS", "3")
3081
3083
3082 if self.options.tmpdir:
3084 if self.options.tmpdir:
3083 self.options.keep_tmpdir = True
3085 self.options.keep_tmpdir = True
3084 tmpdir = _sys2bytes(self.options.tmpdir)
3086 tmpdir = _sys2bytes(self.options.tmpdir)
3085 if os.path.exists(tmpdir):
3087 if os.path.exists(tmpdir):
3086 # Meaning of tmpdir has changed since 1.3: we used to create
3088 # Meaning of tmpdir has changed since 1.3: we used to create
3087 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
3089 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
3088 # tmpdir already exists.
3090 # tmpdir already exists.
3089 print("error: temp dir %r already exists" % tmpdir)
3091 print("error: temp dir %r already exists" % tmpdir)
3090 return 1
3092 return 1
3091
3093
3092 os.makedirs(tmpdir)
3094 os.makedirs(tmpdir)
3093 else:
3095 else:
3094 d = None
3096 d = None
3095 if os.name == 'nt':
3097 if os.name == 'nt':
3096 # without this, we get the default temp dir location, but
3098 # without this, we get the default temp dir location, but
3097 # in all lowercase, which causes troubles with paths (issue3490)
3099 # in all lowercase, which causes troubles with paths (issue3490)
3098 d = osenvironb.get(b'TMP', None)
3100 d = osenvironb.get(b'TMP', None)
3099 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
3101 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
3100
3102
3101 self._hgtmp = osenvironb[b'HGTMP'] = os.path.realpath(tmpdir)
3103 self._hgtmp = osenvironb[b'HGTMP'] = os.path.realpath(tmpdir)
3102
3104
3103 if self.options.with_hg:
3105 if self.options.with_hg:
3104 self._installdir = None
3106 self._installdir = None
3105 whg = self.options.with_hg
3107 whg = self.options.with_hg
3106 self._bindir = os.path.dirname(os.path.realpath(whg))
3108 self._bindir = os.path.dirname(os.path.realpath(whg))
3107 assert isinstance(self._bindir, bytes)
3109 assert isinstance(self._bindir, bytes)
3108 self._hgcommand = os.path.basename(whg)
3110 self._hgcommand = os.path.basename(whg)
3109 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
3111 self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin')
3110 os.makedirs(self._tmpbindir)
3112 os.makedirs(self._tmpbindir)
3111
3113
3112 normbin = os.path.normpath(os.path.abspath(whg))
3114 normbin = os.path.normpath(os.path.abspath(whg))
3113 normbin = normbin.replace(_sys2bytes(os.sep), b'/')
3115 normbin = normbin.replace(_sys2bytes(os.sep), b'/')
3114
3116
3115 # Other Python scripts in the test harness need to
3117 # Other Python scripts in the test harness need to
3116 # `import mercurial`. If `hg` is a Python script, we assume
3118 # `import mercurial`. If `hg` is a Python script, we assume
3117 # the Mercurial modules are relative to its path and tell the tests
3119 # the Mercurial modules are relative to its path and tell the tests
3118 # to load Python modules from its directory.
3120 # to load Python modules from its directory.
3119 with open(whg, 'rb') as fh:
3121 with open(whg, 'rb') as fh:
3120 initial = fh.read(1024)
3122 initial = fh.read(1024)
3121
3123
3122 if re.match(b'#!.*python', initial):
3124 if re.match(b'#!.*python', initial):
3123 self._pythondir = self._bindir
3125 self._pythondir = self._bindir
3124 # If it looks like our in-repo Rust binary, use the source root.
3126 # If it looks like our in-repo Rust binary, use the source root.
3125 # This is a bit hacky. But rhg is still not supported outside the
3127 # This is a bit hacky. But rhg is still not supported outside the
3126 # source directory. So until it is, do the simple thing.
3128 # source directory. So until it is, do the simple thing.
3127 elif re.search(b'/rust/target/[^/]+/hg', normbin):
3129 elif re.search(b'/rust/target/[^/]+/hg', normbin):
3128 self._pythondir = os.path.dirname(self._testdir)
3130 self._pythondir = os.path.dirname(self._testdir)
3129 # Fall back to the legacy behavior.
3131 # Fall back to the legacy behavior.
3130 else:
3132 else:
3131 self._pythondir = self._bindir
3133 self._pythondir = self._bindir
3132
3134
3133 else:
3135 else:
3134 self._installdir = os.path.join(self._hgtmp, b"install")
3136 self._installdir = os.path.join(self._hgtmp, b"install")
3135 self._bindir = os.path.join(self._installdir, b"bin")
3137 self._bindir = os.path.join(self._installdir, b"bin")
3136 self._hgcommand = b'hg'
3138 self._hgcommand = b'hg'
3137 self._tmpbindir = self._bindir
3139 self._tmpbindir = self._bindir
3138 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
3140 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
3139
3141
3140 # Force the use of hg.exe instead of relying on MSYS to recognize hg is
3142 # Force the use of hg.exe instead of relying on MSYS to recognize hg is
3141 # a python script and feed it to python.exe. Legacy stdio is force
3143 # a python script and feed it to python.exe. Legacy stdio is force
3142 # enabled by hg.exe, and this is a more realistic way to launch hg
3144 # enabled by hg.exe, and this is a more realistic way to launch hg
3143 # anyway.
3145 # anyway.
3144 if os.name == 'nt' and not self._hgcommand.endswith(b'.exe'):
3146 if os.name == 'nt' and not self._hgcommand.endswith(b'.exe'):
3145 self._hgcommand += b'.exe'
3147 self._hgcommand += b'.exe'
3146
3148
3147 # set CHGHG, then replace "hg" command by "chg"
3149 # set CHGHG, then replace "hg" command by "chg"
3148 chgbindir = self._bindir
3150 chgbindir = self._bindir
3149 if self.options.chg or self.options.with_chg:
3151 if self.options.chg or self.options.with_chg:
3150 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
3152 osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand)
3151 else:
3153 else:
3152 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
3154 osenvironb.pop(b'CHGHG', None) # drop flag for hghave
3153 if self.options.chg:
3155 if self.options.chg:
3154 self._hgcommand = b'chg'
3156 self._hgcommand = b'chg'
3155 elif self.options.with_chg:
3157 elif self.options.with_chg:
3156 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
3158 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
3157 self._hgcommand = os.path.basename(self.options.with_chg)
3159 self._hgcommand = os.path.basename(self.options.with_chg)
3158
3160
3159 # configure fallback and replace "hg" command by "rhg"
3161 # configure fallback and replace "hg" command by "rhg"
3160 rhgbindir = self._bindir
3162 rhgbindir = self._bindir
3161 if self.options.rhg or self.options.with_rhg:
3163 if self.options.rhg or self.options.with_rhg:
3162 # Affects hghave.py
3164 # Affects hghave.py
3163 osenvironb[b'RHG_INSTALLED_AS_HG'] = b'1'
3165 osenvironb[b'RHG_INSTALLED_AS_HG'] = b'1'
3164 # Affects configuration. Alternatives would be setting configuration through
3166 # Affects configuration. Alternatives would be setting configuration through
3165 # `$HGRCPATH` but some tests override that, or changing `_hgcommand` to include
3167 # `$HGRCPATH` but some tests override that, or changing `_hgcommand` to include
3166 # `--config` but that disrupts tests that print command lines and check expected
3168 # `--config` but that disrupts tests that print command lines and check expected
3167 # output.
3169 # output.
3168 osenvironb[b'RHG_ON_UNSUPPORTED'] = b'fallback'
3170 osenvironb[b'RHG_ON_UNSUPPORTED'] = b'fallback'
3169 osenvironb[b'RHG_FALLBACK_EXECUTABLE'] = os.path.join(
3171 osenvironb[b'RHG_FALLBACK_EXECUTABLE'] = os.path.join(
3170 self._bindir, self._hgcommand
3172 self._bindir, self._hgcommand
3171 )
3173 )
3172 if self.options.rhg:
3174 if self.options.rhg:
3173 self._hgcommand = b'rhg'
3175 self._hgcommand = b'rhg'
3174 elif self.options.with_rhg:
3176 elif self.options.with_rhg:
3175 rhgbindir = os.path.dirname(os.path.realpath(self.options.with_rhg))
3177 rhgbindir = os.path.dirname(os.path.realpath(self.options.with_rhg))
3176 self._hgcommand = os.path.basename(self.options.with_rhg)
3178 self._hgcommand = os.path.basename(self.options.with_rhg)
3177
3179
3178 osenvironb[b"BINDIR"] = self._bindir
3180 osenvironb[b"BINDIR"] = self._bindir
3179 osenvironb[b"PYTHON"] = PYTHON
3181 osenvironb[b"PYTHON"] = PYTHON
3180
3182
3181 fileb = _sys2bytes(__file__)
3183 fileb = _sys2bytes(__file__)
3182 runtestdir = os.path.abspath(os.path.dirname(fileb))
3184 runtestdir = os.path.abspath(os.path.dirname(fileb))
3183 osenvironb[b'RUNTESTDIR'] = runtestdir
3185 osenvironb[b'RUNTESTDIR'] = runtestdir
3184 if PYTHON3:
3186 if PYTHON3:
3185 sepb = _sys2bytes(os.pathsep)
3187 sepb = _sys2bytes(os.pathsep)
3186 else:
3188 else:
3187 sepb = os.pathsep
3189 sepb = os.pathsep
3188 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
3190 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
3189 if os.path.islink(__file__):
3191 if os.path.islink(__file__):
3190 # test helper will likely be at the end of the symlink
3192 # test helper will likely be at the end of the symlink
3191 realfile = os.path.realpath(fileb)
3193 realfile = os.path.realpath(fileb)
3192 realdir = os.path.abspath(os.path.dirname(realfile))
3194 realdir = os.path.abspath(os.path.dirname(realfile))
3193 path.insert(2, realdir)
3195 path.insert(2, realdir)
3194 if chgbindir != self._bindir:
3196 if chgbindir != self._bindir:
3195 path.insert(1, chgbindir)
3197 path.insert(1, chgbindir)
3196 if rhgbindir != self._bindir:
3198 if rhgbindir != self._bindir:
3197 path.insert(1, rhgbindir)
3199 path.insert(1, rhgbindir)
3198 if self._testdir != runtestdir:
3200 if self._testdir != runtestdir:
3199 path = [self._testdir] + path
3201 path = [self._testdir] + path
3200 if self._tmpbindir != self._bindir:
3202 if self._tmpbindir != self._bindir:
3201 path = [self._tmpbindir] + path
3203 path = [self._tmpbindir] + path
3202 osenvironb[b"PATH"] = sepb.join(path)
3204 osenvironb[b"PATH"] = sepb.join(path)
3203
3205
3204 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
3206 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
3205 # can run .../tests/run-tests.py test-foo where test-foo
3207 # can run .../tests/run-tests.py test-foo where test-foo
3206 # adds an extension to HGRC. Also include run-test.py directory to
3208 # adds an extension to HGRC. Also include run-test.py directory to
3207 # import modules like heredoctest.
3209 # import modules like heredoctest.
3208 pypath = [self._pythondir, self._testdir, runtestdir]
3210 pypath = [self._pythondir, self._testdir, runtestdir]
3209 # We have to augment PYTHONPATH, rather than simply replacing
3211 # We have to augment PYTHONPATH, rather than simply replacing
3210 # it, in case external libraries are only available via current
3212 # it, in case external libraries are only available via current
3211 # PYTHONPATH. (In particular, the Subversion bindings on OS X
3213 # PYTHONPATH. (In particular, the Subversion bindings on OS X
3212 # are in /opt/subversion.)
3214 # are in /opt/subversion.)
3213 oldpypath = osenvironb.get(IMPL_PATH)
3215 oldpypath = osenvironb.get(IMPL_PATH)
3214 if oldpypath:
3216 if oldpypath:
3215 pypath.append(oldpypath)
3217 pypath.append(oldpypath)
3216 osenvironb[IMPL_PATH] = sepb.join(pypath)
3218 osenvironb[IMPL_PATH] = sepb.join(pypath)
3217
3219
3218 if self.options.pure:
3220 if self.options.pure:
3219 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
3221 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
3220 os.environ["HGMODULEPOLICY"] = "py"
3222 os.environ["HGMODULEPOLICY"] = "py"
3221 if self.options.rust:
3223 if self.options.rust:
3222 os.environ["HGMODULEPOLICY"] = "rust+c"
3224 os.environ["HGMODULEPOLICY"] = "rust+c"
3223 if self.options.no_rust:
3225 if self.options.no_rust:
3224 current_policy = os.environ.get("HGMODULEPOLICY", "")
3226 current_policy = os.environ.get("HGMODULEPOLICY", "")
3225 if current_policy.startswith("rust+"):
3227 if current_policy.startswith("rust+"):
3226 os.environ["HGMODULEPOLICY"] = current_policy[len("rust+") :]
3228 os.environ["HGMODULEPOLICY"] = current_policy[len("rust+") :]
3227 os.environ.pop("HGWITHRUSTEXT", None)
3229 os.environ.pop("HGWITHRUSTEXT", None)
3228
3230
3229 if self.options.allow_slow_tests:
3231 if self.options.allow_slow_tests:
3230 os.environ["HGTEST_SLOW"] = "slow"
3232 os.environ["HGTEST_SLOW"] = "slow"
3231 elif 'HGTEST_SLOW' in os.environ:
3233 elif 'HGTEST_SLOW' in os.environ:
3232 del os.environ['HGTEST_SLOW']
3234 del os.environ['HGTEST_SLOW']
3233
3235
3234 self._coveragefile = os.path.join(self._testdir, b'.coverage')
3236 self._coveragefile = os.path.join(self._testdir, b'.coverage')
3235
3237
3236 if self.options.exceptions:
3238 if self.options.exceptions:
3237 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
3239 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
3238 try:
3240 try:
3239 os.makedirs(exceptionsdir)
3241 os.makedirs(exceptionsdir)
3240 except OSError as e:
3242 except OSError as e:
3241 if e.errno != errno.EEXIST:
3243 if e.errno != errno.EEXIST:
3242 raise
3244 raise
3243
3245
3244 # Remove all existing exception reports.
3246 # Remove all existing exception reports.
3245 for f in os.listdir(exceptionsdir):
3247 for f in os.listdir(exceptionsdir):
3246 os.unlink(os.path.join(exceptionsdir, f))
3248 os.unlink(os.path.join(exceptionsdir, f))
3247
3249
3248 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
3250 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
3249 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
3251 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
3250 self.options.extra_config_opt.append(
3252 self.options.extra_config_opt.append(
3251 'extensions.logexceptions=%s' % logexceptions.decode('utf-8')
3253 'extensions.logexceptions=%s' % logexceptions.decode('utf-8')
3252 )
3254 )
3253
3255
3254 vlog("# Using TESTDIR", _bytes2sys(self._testdir))
3256 vlog("# Using TESTDIR", _bytes2sys(self._testdir))
3255 vlog("# Using RUNTESTDIR", _bytes2sys(osenvironb[b'RUNTESTDIR']))
3257 vlog("# Using RUNTESTDIR", _bytes2sys(osenvironb[b'RUNTESTDIR']))
3256 vlog("# Using HGTMP", _bytes2sys(self._hgtmp))
3258 vlog("# Using HGTMP", _bytes2sys(self._hgtmp))
3257 vlog("# Using PATH", os.environ["PATH"])
3259 vlog("# Using PATH", os.environ["PATH"])
3258 vlog(
3260 vlog(
3259 "# Using",
3261 "# Using",
3260 _bytes2sys(IMPL_PATH),
3262 _bytes2sys(IMPL_PATH),
3261 _bytes2sys(osenvironb[IMPL_PATH]),
3263 _bytes2sys(osenvironb[IMPL_PATH]),
3262 )
3264 )
3263 vlog("# Writing to directory", _bytes2sys(self._outputdir))
3265 vlog("# Writing to directory", _bytes2sys(self._outputdir))
3264
3266
3265 try:
3267 try:
3266 return self._runtests(testdescs) or 0
3268 return self._runtests(testdescs) or 0
3267 finally:
3269 finally:
3268 time.sleep(0.1)
3270 time.sleep(0.1)
3269 self._cleanup()
3271 self._cleanup()
3270
3272
3271 def findtests(self, args):
3273 def findtests(self, args):
3272 """Finds possible test files from arguments.
3274 """Finds possible test files from arguments.
3273
3275
3274 If you wish to inject custom tests into the test harness, this would
3276 If you wish to inject custom tests into the test harness, this would
3275 be a good function to monkeypatch or override in a derived class.
3277 be a good function to monkeypatch or override in a derived class.
3276 """
3278 """
3277 if not args:
3279 if not args:
3278 if self.options.changed:
3280 if self.options.changed:
3279 proc = Popen4(
3281 proc = Popen4(
3280 b'hg st --rev "%s" -man0 .'
3282 b'hg st --rev "%s" -man0 .'
3281 % _sys2bytes(self.options.changed),
3283 % _sys2bytes(self.options.changed),
3282 None,
3284 None,
3283 0,
3285 0,
3284 )
3286 )
3285 stdout, stderr = proc.communicate()
3287 stdout, stderr = proc.communicate()
3286 args = stdout.strip(b'\0').split(b'\0')
3288 args = stdout.strip(b'\0').split(b'\0')
3287 else:
3289 else:
3288 args = os.listdir(b'.')
3290 args = os.listdir(b'.')
3289
3291
3290 expanded_args = []
3292 expanded_args = []
3291 for arg in args:
3293 for arg in args:
3292 if os.path.isdir(arg):
3294 if os.path.isdir(arg):
3293 if not arg.endswith(b'/'):
3295 if not arg.endswith(b'/'):
3294 arg += b'/'
3296 arg += b'/'
3295 expanded_args.extend([arg + a for a in os.listdir(arg)])
3297 expanded_args.extend([arg + a for a in os.listdir(arg)])
3296 else:
3298 else:
3297 expanded_args.append(arg)
3299 expanded_args.append(arg)
3298 args = expanded_args
3300 args = expanded_args
3299
3301
3300 testcasepattern = re.compile(br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-.#]+))')
3302 testcasepattern = re.compile(br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-.#]+))')
3301 tests = []
3303 tests = []
3302 for t in args:
3304 for t in args:
3303 case = []
3305 case = []
3304
3306
3305 if not (
3307 if not (
3306 os.path.basename(t).startswith(b'test-')
3308 os.path.basename(t).startswith(b'test-')
3307 and (t.endswith(b'.py') or t.endswith(b'.t'))
3309 and (t.endswith(b'.py') or t.endswith(b'.t'))
3308 ):
3310 ):
3309
3311
3310 m = testcasepattern.match(os.path.basename(t))
3312 m = testcasepattern.match(os.path.basename(t))
3311 if m is not None:
3313 if m is not None:
3312 t_basename, casestr = m.groups()
3314 t_basename, casestr = m.groups()
3313 t = os.path.join(os.path.dirname(t), t_basename)
3315 t = os.path.join(os.path.dirname(t), t_basename)
3314 if casestr:
3316 if casestr:
3315 case = casestr.split(b'#')
3317 case = casestr.split(b'#')
3316 else:
3318 else:
3317 continue
3319 continue
3318
3320
3319 if t.endswith(b'.t'):
3321 if t.endswith(b'.t'):
3320 # .t file may contain multiple test cases
3322 # .t file may contain multiple test cases
3321 casedimensions = parsettestcases(t)
3323 casedimensions = parsettestcases(t)
3322 if casedimensions:
3324 if casedimensions:
3323 cases = []
3325 cases = []
3324
3326
3325 def addcases(case, casedimensions):
3327 def addcases(case, casedimensions):
3326 if not casedimensions:
3328 if not casedimensions:
3327 cases.append(case)
3329 cases.append(case)
3328 else:
3330 else:
3329 for c in casedimensions[0]:
3331 for c in casedimensions[0]:
3330 addcases(case + [c], casedimensions[1:])
3332 addcases(case + [c], casedimensions[1:])
3331
3333
3332 addcases([], casedimensions)
3334 addcases([], casedimensions)
3333 if case and case in cases:
3335 if case and case in cases:
3334 cases = [case]
3336 cases = [case]
3335 elif case:
3337 elif case:
3336 # Ignore invalid cases
3338 # Ignore invalid cases
3337 cases = []
3339 cases = []
3338 else:
3340 else:
3339 pass
3341 pass
3340 tests += [{'path': t, 'case': c} for c in sorted(cases)]
3342 tests += [{'path': t, 'case': c} for c in sorted(cases)]
3341 else:
3343 else:
3342 tests.append({'path': t})
3344 tests.append({'path': t})
3343 else:
3345 else:
3344 tests.append({'path': t})
3346 tests.append({'path': t})
3345
3347
3346 if self.options.retest:
3348 if self.options.retest:
3347 retest_args = []
3349 retest_args = []
3348 for test in tests:
3350 for test in tests:
3349 errpath = self._geterrpath(test)
3351 errpath = self._geterrpath(test)
3350 if os.path.exists(errpath):
3352 if os.path.exists(errpath):
3351 retest_args.append(test)
3353 retest_args.append(test)
3352 tests = retest_args
3354 tests = retest_args
3353 return tests
3355 return tests
3354
3356
3355 def _runtests(self, testdescs):
3357 def _runtests(self, testdescs):
3356 def _reloadtest(test, i):
3358 def _reloadtest(test, i):
3357 # convert a test back to its description dict
3359 # convert a test back to its description dict
3358 desc = {'path': test.path}
3360 desc = {'path': test.path}
3359 case = getattr(test, '_case', [])
3361 case = getattr(test, '_case', [])
3360 if case:
3362 if case:
3361 desc['case'] = case
3363 desc['case'] = case
3362 return self._gettest(desc, i)
3364 return self._gettest(desc, i)
3363
3365
3364 try:
3366 try:
3365 if self.options.restart:
3367 if self.options.restart:
3366 orig = list(testdescs)
3368 orig = list(testdescs)
3367 while testdescs:
3369 while testdescs:
3368 desc = testdescs[0]
3370 desc = testdescs[0]
3369 errpath = self._geterrpath(desc)
3371 errpath = self._geterrpath(desc)
3370 if os.path.exists(errpath):
3372 if os.path.exists(errpath):
3371 break
3373 break
3372 testdescs.pop(0)
3374 testdescs.pop(0)
3373 if not testdescs:
3375 if not testdescs:
3374 print("running all tests")
3376 print("running all tests")
3375 testdescs = orig
3377 testdescs = orig
3376
3378
3377 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
3379 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
3378 num_tests = len(tests) * self.options.runs_per_test
3380 num_tests = len(tests) * self.options.runs_per_test
3379
3381
3380 jobs = min(num_tests, self.options.jobs)
3382 jobs = min(num_tests, self.options.jobs)
3381
3383
3382 failed = False
3384 failed = False
3383 kws = self.options.keywords
3385 kws = self.options.keywords
3384 if kws is not None and PYTHON3:
3386 if kws is not None and PYTHON3:
3385 kws = kws.encode('utf-8')
3387 kws = kws.encode('utf-8')
3386
3388
3387 suite = TestSuite(
3389 suite = TestSuite(
3388 self._testdir,
3390 self._testdir,
3389 jobs=jobs,
3391 jobs=jobs,
3390 whitelist=self.options.whitelisted,
3392 whitelist=self.options.whitelisted,
3391 blacklist=self.options.blacklist,
3393 blacklist=self.options.blacklist,
3392 keywords=kws,
3394 keywords=kws,
3393 loop=self.options.loop,
3395 loop=self.options.loop,
3394 runs_per_test=self.options.runs_per_test,
3396 runs_per_test=self.options.runs_per_test,
3395 showchannels=self.options.showchannels,
3397 showchannels=self.options.showchannels,
3396 tests=tests,
3398 tests=tests,
3397 loadtest=_reloadtest,
3399 loadtest=_reloadtest,
3398 )
3400 )
3399 verbosity = 1
3401 verbosity = 1
3400 if self.options.list_tests:
3402 if self.options.list_tests:
3401 verbosity = 0
3403 verbosity = 0
3402 elif self.options.verbose:
3404 elif self.options.verbose:
3403 verbosity = 2
3405 verbosity = 2
3404 runner = TextTestRunner(self, verbosity=verbosity)
3406 runner = TextTestRunner(self, verbosity=verbosity)
3405
3407
3406 if self.options.list_tests:
3408 if self.options.list_tests:
3407 result = runner.listtests(suite)
3409 result = runner.listtests(suite)
3408 else:
3410 else:
3409 if self._installdir:
3411 if self._installdir:
3410 self._installhg()
3412 self._installhg()
3411 self._checkhglib("Testing")
3413 self._checkhglib("Testing")
3412 else:
3414 else:
3413 self._usecorrectpython()
3415 self._usecorrectpython()
3414 if self.options.chg:
3416 if self.options.chg:
3415 assert self._installdir
3417 assert self._installdir
3416 self._installchg()
3418 self._installchg()
3417 if self.options.rhg:
3419 if self.options.rhg:
3418 assert self._installdir
3420 assert self._installdir
3419 self._installrhg()
3421 self._installrhg()
3420
3422
3421 log(
3423 log(
3422 'running %d tests using %d parallel processes'
3424 'running %d tests using %d parallel processes'
3423 % (num_tests, jobs)
3425 % (num_tests, jobs)
3424 )
3426 )
3425
3427
3426 result = runner.run(suite)
3428 result = runner.run(suite)
3427
3429
3428 if result.failures or result.errors:
3430 if result.failures or result.errors:
3429 failed = True
3431 failed = True
3430
3432
3431 result.onEnd()
3433 result.onEnd()
3432
3434
3433 if self.options.anycoverage:
3435 if self.options.anycoverage:
3434 self._outputcoverage()
3436 self._outputcoverage()
3435 except KeyboardInterrupt:
3437 except KeyboardInterrupt:
3436 failed = True
3438 failed = True
3437 print("\ninterrupted!")
3439 print("\ninterrupted!")
3438
3440
3439 if failed:
3441 if failed:
3440 return 1
3442 return 1
3441
3443
3442 def _geterrpath(self, test):
3444 def _geterrpath(self, test):
3443 # test['path'] is a relative path
3445 # test['path'] is a relative path
3444 if 'case' in test:
3446 if 'case' in test:
3445 # for multiple dimensions test cases
3447 # for multiple dimensions test cases
3446 casestr = b'#'.join(test['case'])
3448 casestr = b'#'.join(test['case'])
3447 errpath = b'%s#%s.err' % (test['path'], casestr)
3449 errpath = b'%s#%s.err' % (test['path'], casestr)
3448 else:
3450 else:
3449 errpath = b'%s.err' % test['path']
3451 errpath = b'%s.err' % test['path']
3450 if self.options.outputdir:
3452 if self.options.outputdir:
3451 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3453 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3452 errpath = os.path.join(self._outputdir, errpath)
3454 errpath = os.path.join(self._outputdir, errpath)
3453 return errpath
3455 return errpath
3454
3456
3455 def _getport(self, count):
3457 def _getport(self, count):
3456 port = self._ports.get(count) # do we have a cached entry?
3458 port = self._ports.get(count) # do we have a cached entry?
3457 if port is None:
3459 if port is None:
3458 portneeded = 3
3460 portneeded = 3
3459 # above 100 tries we just give up and let test reports failure
3461 # above 100 tries we just give up and let test reports failure
3460 for tries in xrange(100):
3462 for tries in xrange(100):
3461 allfree = True
3463 allfree = True
3462 port = self.options.port + self._portoffset
3464 port = self.options.port + self._portoffset
3463 for idx in xrange(portneeded):
3465 for idx in xrange(portneeded):
3464 if not checkportisavailable(port + idx):
3466 if not checkportisavailable(port + idx):
3465 allfree = False
3467 allfree = False
3466 break
3468 break
3467 self._portoffset += portneeded
3469 self._portoffset += portneeded
3468 if allfree:
3470 if allfree:
3469 break
3471 break
3470 self._ports[count] = port
3472 self._ports[count] = port
3471 return port
3473 return port
3472
3474
3473 def _gettest(self, testdesc, count):
3475 def _gettest(self, testdesc, count):
3474 """Obtain a Test by looking at its filename.
3476 """Obtain a Test by looking at its filename.
3475
3477
3476 Returns a Test instance. The Test may not be runnable if it doesn't
3478 Returns a Test instance. The Test may not be runnable if it doesn't
3477 map to a known type.
3479 map to a known type.
3478 """
3480 """
3479 path = testdesc['path']
3481 path = testdesc['path']
3480 lctest = path.lower()
3482 lctest = path.lower()
3481 testcls = Test
3483 testcls = Test
3482
3484
3483 for ext, cls in self.TESTTYPES:
3485 for ext, cls in self.TESTTYPES:
3484 if lctest.endswith(ext):
3486 if lctest.endswith(ext):
3485 testcls = cls
3487 testcls = cls
3486 break
3488 break
3487
3489
3488 refpath = os.path.join(getcwdb(), path)
3490 refpath = os.path.join(getcwdb(), path)
3489 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
3491 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
3490
3492
3491 # extra keyword parameters. 'case' is used by .t tests
3493 # extra keyword parameters. 'case' is used by .t tests
3492 kwds = {k: testdesc[k] for k in ['case'] if k in testdesc}
3494 kwds = {k: testdesc[k] for k in ['case'] if k in testdesc}
3493
3495
3494 t = testcls(
3496 t = testcls(
3495 refpath,
3497 refpath,
3496 self._outputdir,
3498 self._outputdir,
3497 tmpdir,
3499 tmpdir,
3498 keeptmpdir=self.options.keep_tmpdir,
3500 keeptmpdir=self.options.keep_tmpdir,
3499 debug=self.options.debug,
3501 debug=self.options.debug,
3500 first=self.options.first,
3502 first=self.options.first,
3501 timeout=self.options.timeout,
3503 timeout=self.options.timeout,
3502 startport=self._getport(count),
3504 startport=self._getport(count),
3503 extraconfigopts=self.options.extra_config_opt,
3505 extraconfigopts=self.options.extra_config_opt,
3504 shell=self.options.shell,
3506 shell=self.options.shell,
3505 hgcommand=self._hgcommand,
3507 hgcommand=self._hgcommand,
3506 usechg=bool(self.options.with_chg or self.options.chg),
3508 usechg=bool(self.options.with_chg or self.options.chg),
3507 chgdebug=self.options.chg_debug,
3509 chgdebug=self.options.chg_debug,
3508 useipv6=useipv6,
3510 useipv6=useipv6,
3509 **kwds
3511 **kwds
3510 )
3512 )
3511 t.should_reload = True
3513 t.should_reload = True
3512 return t
3514 return t
3513
3515
3514 def _cleanup(self):
3516 def _cleanup(self):
3515 """Clean up state from this test invocation."""
3517 """Clean up state from this test invocation."""
3516 if self.options.keep_tmpdir:
3518 if self.options.keep_tmpdir:
3517 return
3519 return
3518
3520
3519 vlog("# Cleaning up HGTMP", _bytes2sys(self._hgtmp))
3521 vlog("# Cleaning up HGTMP", _bytes2sys(self._hgtmp))
3520 shutil.rmtree(self._hgtmp, True)
3522 shutil.rmtree(self._hgtmp, True)
3521 for f in self._createdfiles:
3523 for f in self._createdfiles:
3522 try:
3524 try:
3523 os.remove(f)
3525 os.remove(f)
3524 except OSError:
3526 except OSError:
3525 pass
3527 pass
3526
3528
3527 def _usecorrectpython(self):
3529 def _usecorrectpython(self):
3528 """Configure the environment to use the appropriate Python in tests."""
3530 """Configure the environment to use the appropriate Python in tests."""
3529 # Tests must use the same interpreter as us or bad things will happen.
3531 # Tests must use the same interpreter as us or bad things will happen.
3530 pyexename = sys.platform == 'win32' and b'python.exe' or b'python3'
3532 pyexename = sys.platform == 'win32' and b'python.exe' or b'python3'
3531
3533
3532 # os.symlink() is a thing with py3 on Windows, but it requires
3534 # os.symlink() is a thing with py3 on Windows, but it requires
3533 # Administrator rights.
3535 # Administrator rights.
3534 if getattr(os, 'symlink', None) and os.name != 'nt':
3536 if getattr(os, 'symlink', None) and os.name != 'nt':
3535 vlog(
3537 vlog(
3536 "# Making python executable in test path a symlink to '%s'"
3538 "# Making python executable in test path a symlink to '%s'"
3537 % sysexecutable
3539 % sysexecutable
3538 )
3540 )
3539 mypython = os.path.join(self._tmpbindir, pyexename)
3541 mypython = os.path.join(self._tmpbindir, pyexename)
3540 try:
3542 try:
3541 if os.readlink(mypython) == sysexecutable:
3543 if os.readlink(mypython) == sysexecutable:
3542 return
3544 return
3543 os.unlink(mypython)
3545 os.unlink(mypython)
3544 except OSError as err:
3546 except OSError as err:
3545 if err.errno != errno.ENOENT:
3547 if err.errno != errno.ENOENT:
3546 raise
3548 raise
3547 if self._findprogram(pyexename) != sysexecutable:
3549 if self._findprogram(pyexename) != sysexecutable:
3548 try:
3550 try:
3549 os.symlink(sysexecutable, mypython)
3551 os.symlink(sysexecutable, mypython)
3550 self._createdfiles.append(mypython)
3552 self._createdfiles.append(mypython)
3551 except OSError as err:
3553 except OSError as err:
3552 # child processes may race, which is harmless
3554 # child processes may race, which is harmless
3553 if err.errno != errno.EEXIST:
3555 if err.errno != errno.EEXIST:
3554 raise
3556 raise
3555 else:
3557 else:
3556 # Windows doesn't have `python3.exe`, and MSYS cannot understand the
3558 # Windows doesn't have `python3.exe`, and MSYS cannot understand the
3557 # reparse point with that name provided by Microsoft. Create a
3559 # reparse point with that name provided by Microsoft. Create a
3558 # simple script on PATH with that name that delegates to the py3
3560 # simple script on PATH with that name that delegates to the py3
3559 # launcher so the shebang lines work.
3561 # launcher so the shebang lines work.
3560 if os.getenv('MSYSTEM'):
3562 if os.getenv('MSYSTEM'):
3561 with open(osenvironb[b'RUNTESTDIR'] + b'/python3', 'wb') as f:
3563 with open(osenvironb[b'RUNTESTDIR'] + b'/python3', 'wb') as f:
3562 f.write(b'#!/bin/sh\n')
3564 f.write(b'#!/bin/sh\n')
3563 f.write(b'py -3.%d "$@"\n' % sys.version_info[1])
3565 f.write(b'py -3.%d "$@"\n' % sys.version_info[1])
3564
3566
3565 exedir, exename = os.path.split(sysexecutable)
3567 exedir, exename = os.path.split(sysexecutable)
3566 vlog(
3568 vlog(
3567 "# Modifying search path to find %s as %s in '%s'"
3569 "# Modifying search path to find %s as %s in '%s'"
3568 % (exename, pyexename, exedir)
3570 % (exename, pyexename, exedir)
3569 )
3571 )
3570 path = os.environ['PATH'].split(os.pathsep)
3572 path = os.environ['PATH'].split(os.pathsep)
3571 while exedir in path:
3573 while exedir in path:
3572 path.remove(exedir)
3574 path.remove(exedir)
3573
3575
3574 # Binaries installed by pip into the user area like pylint.exe may
3576 # Binaries installed by pip into the user area like pylint.exe may
3575 # not be in PATH by default.
3577 # not be in PATH by default.
3576 extra_paths = [exedir]
3578 extra_paths = [exedir]
3577 vi = sys.version_info
3579 vi = sys.version_info
3578 if 'APPDATA' in os.environ:
3580 if 'APPDATA' in os.environ:
3579 scripts_dir = os.path.join(
3581 scripts_dir = os.path.join(
3580 os.environ['APPDATA'],
3582 os.environ['APPDATA'],
3581 'Python',
3583 'Python',
3582 'Python%d%d' % (vi[0], vi[1]),
3584 'Python%d%d' % (vi[0], vi[1]),
3583 'Scripts',
3585 'Scripts',
3584 )
3586 )
3585
3587
3586 if vi.major == 2:
3588 if vi.major == 2:
3587 scripts_dir = os.path.join(
3589 scripts_dir = os.path.join(
3588 os.environ['APPDATA'],
3590 os.environ['APPDATA'],
3589 'Python',
3591 'Python',
3590 'Scripts',
3592 'Scripts',
3591 )
3593 )
3592
3594
3593 extra_paths.append(scripts_dir)
3595 extra_paths.append(scripts_dir)
3594
3596
3595 os.environ['PATH'] = os.pathsep.join(extra_paths + path)
3597 os.environ['PATH'] = os.pathsep.join(extra_paths + path)
3596 if not self._findprogram(pyexename):
3598 if not self._findprogram(pyexename):
3597 print("WARNING: Cannot find %s in search path" % pyexename)
3599 print("WARNING: Cannot find %s in search path" % pyexename)
3598
3600
3599 def _installhg(self):
3601 def _installhg(self):
3600 """Install hg into the test environment.
3602 """Install hg into the test environment.
3601
3603
3602 This will also configure hg with the appropriate testing settings.
3604 This will also configure hg with the appropriate testing settings.
3603 """
3605 """
3604 vlog("# Performing temporary installation of HG")
3606 vlog("# Performing temporary installation of HG")
3605 installerrs = os.path.join(self._hgtmp, b"install.err")
3607 installerrs = os.path.join(self._hgtmp, b"install.err")
3606 compiler = ''
3608 compiler = ''
3607 if self.options.compiler:
3609 if self.options.compiler:
3608 compiler = '--compiler ' + self.options.compiler
3610 compiler = '--compiler ' + self.options.compiler
3609 setup_opts = b""
3611 setup_opts = b""
3610 if self.options.pure:
3612 if self.options.pure:
3611 setup_opts = b"--pure"
3613 setup_opts = b"--pure"
3612 elif self.options.rust:
3614 elif self.options.rust:
3613 setup_opts = b"--rust"
3615 setup_opts = b"--rust"
3614 elif self.options.no_rust:
3616 elif self.options.no_rust:
3615 setup_opts = b"--no-rust"
3617 setup_opts = b"--no-rust"
3616
3618
3617 # Run installer in hg root
3619 # Run installer in hg root
3618 script = os.path.realpath(sys.argv[0])
3620 script = os.path.realpath(sys.argv[0])
3619 exe = sysexecutable
3621 exe = sysexecutable
3620 if PYTHON3:
3622 if PYTHON3:
3621 compiler = _sys2bytes(compiler)
3623 compiler = _sys2bytes(compiler)
3622 script = _sys2bytes(script)
3624 script = _sys2bytes(script)
3623 exe = _sys2bytes(exe)
3625 exe = _sys2bytes(exe)
3624 hgroot = os.path.dirname(os.path.dirname(script))
3626 hgroot = os.path.dirname(os.path.dirname(script))
3625 self._hgroot = hgroot
3627 self._hgroot = hgroot
3626 os.chdir(hgroot)
3628 os.chdir(hgroot)
3627 nohome = b'--home=""'
3629 nohome = b'--home=""'
3628 if os.name == 'nt':
3630 if os.name == 'nt':
3629 # The --home="" trick works only on OS where os.sep == '/'
3631 # The --home="" trick works only on OS where os.sep == '/'
3630 # because of a distutils convert_path() fast-path. Avoid it at
3632 # because of a distutils convert_path() fast-path. Avoid it at
3631 # least on Windows for now, deal with .pydistutils.cfg bugs
3633 # least on Windows for now, deal with .pydistutils.cfg bugs
3632 # when they happen.
3634 # when they happen.
3633 nohome = b''
3635 nohome = b''
3634 cmd = (
3636 cmd = (
3635 b'"%(exe)s" setup.py %(setup_opts)s clean --all'
3637 b'"%(exe)s" setup.py %(setup_opts)s clean --all'
3636 b' build %(compiler)s --build-base="%(base)s"'
3638 b' build %(compiler)s --build-base="%(base)s"'
3637 b' install --force --prefix="%(prefix)s"'
3639 b' install --force --prefix="%(prefix)s"'
3638 b' --install-lib="%(libdir)s"'
3640 b' --install-lib="%(libdir)s"'
3639 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
3641 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
3640 % {
3642 % {
3641 b'exe': exe,
3643 b'exe': exe,
3642 b'setup_opts': setup_opts,
3644 b'setup_opts': setup_opts,
3643 b'compiler': compiler,
3645 b'compiler': compiler,
3644 b'base': os.path.join(self._hgtmp, b"build"),
3646 b'base': os.path.join(self._hgtmp, b"build"),
3645 b'prefix': self._installdir,
3647 b'prefix': self._installdir,
3646 b'libdir': self._pythondir,
3648 b'libdir': self._pythondir,
3647 b'bindir': self._bindir,
3649 b'bindir': self._bindir,
3648 b'nohome': nohome,
3650 b'nohome': nohome,
3649 b'logfile': installerrs,
3651 b'logfile': installerrs,
3650 }
3652 }
3651 )
3653 )
3652
3654
3653 # setuptools requires install directories to exist.
3655 # setuptools requires install directories to exist.
3654 def makedirs(p):
3656 def makedirs(p):
3655 try:
3657 try:
3656 os.makedirs(p)
3658 os.makedirs(p)
3657 except OSError as e:
3659 except OSError as e:
3658 if e.errno != errno.EEXIST:
3660 if e.errno != errno.EEXIST:
3659 raise
3661 raise
3660
3662
3661 makedirs(self._pythondir)
3663 makedirs(self._pythondir)
3662 makedirs(self._bindir)
3664 makedirs(self._bindir)
3663
3665
3664 vlog("# Running", cmd.decode("utf-8"))
3666 vlog("# Running", cmd.decode("utf-8"))
3665 if subprocess.call(_bytes2sys(cmd), shell=True) == 0:
3667 if subprocess.call(_bytes2sys(cmd), shell=True) == 0:
3666 if not self.options.verbose:
3668 if not self.options.verbose:
3667 try:
3669 try:
3668 os.remove(installerrs)
3670 os.remove(installerrs)
3669 except OSError as e:
3671 except OSError as e:
3670 if e.errno != errno.ENOENT:
3672 if e.errno != errno.ENOENT:
3671 raise
3673 raise
3672 else:
3674 else:
3673 with open(installerrs, 'rb') as f:
3675 with open(installerrs, 'rb') as f:
3674 for line in f:
3676 for line in f:
3675 if PYTHON3:
3677 if PYTHON3:
3676 sys.stdout.buffer.write(line)
3678 sys.stdout.buffer.write(line)
3677 else:
3679 else:
3678 sys.stdout.write(line)
3680 sys.stdout.write(line)
3679 sys.exit(1)
3681 sys.exit(1)
3680 os.chdir(self._testdir)
3682 os.chdir(self._testdir)
3681
3683
3682 self._usecorrectpython()
3684 self._usecorrectpython()
3683
3685
3684 hgbat = os.path.join(self._bindir, b'hg.bat')
3686 hgbat = os.path.join(self._bindir, b'hg.bat')
3685 if os.path.isfile(hgbat):
3687 if os.path.isfile(hgbat):
3686 # hg.bat expects to be put in bin/scripts while run-tests.py
3688 # hg.bat expects to be put in bin/scripts while run-tests.py
3687 # installation layout put it in bin/ directly. Fix it
3689 # installation layout put it in bin/ directly. Fix it
3688 with open(hgbat, 'rb') as f:
3690 with open(hgbat, 'rb') as f:
3689 data = f.read()
3691 data = f.read()
3690 if br'"%~dp0..\python" "%~dp0hg" %*' in data:
3692 if br'"%~dp0..\python" "%~dp0hg" %*' in data:
3691 data = data.replace(
3693 data = data.replace(
3692 br'"%~dp0..\python" "%~dp0hg" %*',
3694 br'"%~dp0..\python" "%~dp0hg" %*',
3693 b'"%~dp0python" "%~dp0hg" %*',
3695 b'"%~dp0python" "%~dp0hg" %*',
3694 )
3696 )
3695 with open(hgbat, 'wb') as f:
3697 with open(hgbat, 'wb') as f:
3696 f.write(data)
3698 f.write(data)
3697 else:
3699 else:
3698 print('WARNING: cannot fix hg.bat reference to python.exe')
3700 print('WARNING: cannot fix hg.bat reference to python.exe')
3699
3701
3700 if self.options.anycoverage:
3702 if self.options.anycoverage:
3701 custom = os.path.join(
3703 custom = os.path.join(
3702 osenvironb[b'RUNTESTDIR'], b'sitecustomize.py'
3704 osenvironb[b'RUNTESTDIR'], b'sitecustomize.py'
3703 )
3705 )
3704 target = os.path.join(self._pythondir, b'sitecustomize.py')
3706 target = os.path.join(self._pythondir, b'sitecustomize.py')
3705 vlog('# Installing coverage trigger to %s' % target)
3707 vlog('# Installing coverage trigger to %s' % target)
3706 shutil.copyfile(custom, target)
3708 shutil.copyfile(custom, target)
3707 rc = os.path.join(self._testdir, b'.coveragerc')
3709 rc = os.path.join(self._testdir, b'.coveragerc')
3708 vlog('# Installing coverage rc to %s' % rc)
3710 vlog('# Installing coverage rc to %s' % rc)
3709 osenvironb[b'COVERAGE_PROCESS_START'] = rc
3711 osenvironb[b'COVERAGE_PROCESS_START'] = rc
3710 covdir = os.path.join(self._installdir, b'..', b'coverage')
3712 covdir = os.path.join(self._installdir, b'..', b'coverage')
3711 try:
3713 try:
3712 os.mkdir(covdir)
3714 os.mkdir(covdir)
3713 except OSError as e:
3715 except OSError as e:
3714 if e.errno != errno.EEXIST:
3716 if e.errno != errno.EEXIST:
3715 raise
3717 raise
3716
3718
3717 osenvironb[b'COVERAGE_DIR'] = covdir
3719 osenvironb[b'COVERAGE_DIR'] = covdir
3718
3720
3719 def _checkhglib(self, verb):
3721 def _checkhglib(self, verb):
3720 """Ensure that the 'mercurial' package imported by python is
3722 """Ensure that the 'mercurial' package imported by python is
3721 the one we expect it to be. If not, print a warning to stderr."""
3723 the one we expect it to be. If not, print a warning to stderr."""
3722 if (self._bindir == self._pythondir) and (
3724 if (self._bindir == self._pythondir) and (
3723 self._bindir != self._tmpbindir
3725 self._bindir != self._tmpbindir
3724 ):
3726 ):
3725 # The pythondir has been inferred from --with-hg flag.
3727 # The pythondir has been inferred from --with-hg flag.
3726 # We cannot expect anything sensible here.
3728 # We cannot expect anything sensible here.
3727 return
3729 return
3728 expecthg = os.path.join(self._pythondir, b'mercurial')
3730 expecthg = os.path.join(self._pythondir, b'mercurial')
3729 actualhg = self._gethgpath()
3731 actualhg = self._gethgpath()
3730 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3732 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3731 sys.stderr.write(
3733 sys.stderr.write(
3732 'warning: %s with unexpected mercurial lib: %s\n'
3734 'warning: %s with unexpected mercurial lib: %s\n'
3733 ' (expected %s)\n' % (verb, actualhg, expecthg)
3735 ' (expected %s)\n' % (verb, actualhg, expecthg)
3734 )
3736 )
3735
3737
3736 def _gethgpath(self):
3738 def _gethgpath(self):
3737 """Return the path to the mercurial package that is actually found by
3739 """Return the path to the mercurial package that is actually found by
3738 the current Python interpreter."""
3740 the current Python interpreter."""
3739 if self._hgpath is not None:
3741 if self._hgpath is not None:
3740 return self._hgpath
3742 return self._hgpath
3741
3743
3742 cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
3744 cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
3743 cmd = cmd % PYTHON
3745 cmd = cmd % PYTHON
3744 if PYTHON3:
3746 if PYTHON3:
3745 cmd = _bytes2sys(cmd)
3747 cmd = _bytes2sys(cmd)
3746
3748
3747 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
3749 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
3748 out, err = p.communicate()
3750 out, err = p.communicate()
3749
3751
3750 self._hgpath = out.strip()
3752 self._hgpath = out.strip()
3751
3753
3752 return self._hgpath
3754 return self._hgpath
3753
3755
3754 def _installchg(self):
3756 def _installchg(self):
3755 """Install chg into the test environment"""
3757 """Install chg into the test environment"""
3756 vlog('# Performing temporary installation of CHG')
3758 vlog('# Performing temporary installation of CHG')
3757 assert os.path.dirname(self._bindir) == self._installdir
3759 assert os.path.dirname(self._bindir) == self._installdir
3758 assert self._hgroot, 'must be called after _installhg()'
3760 assert self._hgroot, 'must be called after _installhg()'
3759 cmd = b'"%(make)s" clean install PREFIX="%(prefix)s"' % {
3761 cmd = b'"%(make)s" clean install PREFIX="%(prefix)s"' % {
3760 b'make': b'make', # TODO: switch by option or environment?
3762 b'make': b'make', # TODO: switch by option or environment?
3761 b'prefix': self._installdir,
3763 b'prefix': self._installdir,
3762 }
3764 }
3763 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3765 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3764 vlog("# Running", cmd)
3766 vlog("# Running", cmd)
3765 proc = subprocess.Popen(
3767 proc = subprocess.Popen(
3766 cmd,
3768 cmd,
3767 shell=True,
3769 shell=True,
3768 cwd=cwd,
3770 cwd=cwd,
3769 stdin=subprocess.PIPE,
3771 stdin=subprocess.PIPE,
3770 stdout=subprocess.PIPE,
3772 stdout=subprocess.PIPE,
3771 stderr=subprocess.STDOUT,
3773 stderr=subprocess.STDOUT,
3772 )
3774 )
3773 out, _err = proc.communicate()
3775 out, _err = proc.communicate()
3774 if proc.returncode != 0:
3776 if proc.returncode != 0:
3775 if PYTHON3:
3777 if PYTHON3:
3776 sys.stdout.buffer.write(out)
3778 sys.stdout.buffer.write(out)
3777 else:
3779 else:
3778 sys.stdout.write(out)
3780 sys.stdout.write(out)
3779 sys.exit(1)
3781 sys.exit(1)
3780
3782
3781 def _installrhg(self):
3783 def _installrhg(self):
3782 """Install rhg into the test environment"""
3784 """Install rhg into the test environment"""
3783 vlog('# Performing temporary installation of rhg')
3785 vlog('# Performing temporary installation of rhg')
3784 assert os.path.dirname(self._bindir) == self._installdir
3786 assert os.path.dirname(self._bindir) == self._installdir
3785 assert self._hgroot, 'must be called after _installhg()'
3787 assert self._hgroot, 'must be called after _installhg()'
3786 cmd = b'"%(make)s" install-rhg PREFIX="%(prefix)s"' % {
3788 cmd = b'"%(make)s" install-rhg PREFIX="%(prefix)s"' % {
3787 b'make': b'make', # TODO: switch by option or environment?
3789 b'make': b'make', # TODO: switch by option or environment?
3788 b'prefix': self._installdir,
3790 b'prefix': self._installdir,
3789 }
3791 }
3790 cwd = self._hgroot
3792 cwd = self._hgroot
3791 vlog("# Running", cmd)
3793 vlog("# Running", cmd)
3792 proc = subprocess.Popen(
3794 proc = subprocess.Popen(
3793 cmd,
3795 cmd,
3794 shell=True,
3796 shell=True,
3795 cwd=cwd,
3797 cwd=cwd,
3796 stdin=subprocess.PIPE,
3798 stdin=subprocess.PIPE,
3797 stdout=subprocess.PIPE,
3799 stdout=subprocess.PIPE,
3798 stderr=subprocess.STDOUT,
3800 stderr=subprocess.STDOUT,
3799 )
3801 )
3800 out, _err = proc.communicate()
3802 out, _err = proc.communicate()
3801 if proc.returncode != 0:
3803 if proc.returncode != 0:
3802 if PYTHON3:
3804 if PYTHON3:
3803 sys.stdout.buffer.write(out)
3805 sys.stdout.buffer.write(out)
3804 else:
3806 else:
3805 sys.stdout.write(out)
3807 sys.stdout.write(out)
3806 sys.exit(1)
3808 sys.exit(1)
3807
3809
3808 def _outputcoverage(self):
3810 def _outputcoverage(self):
3809 """Produce code coverage output."""
3811 """Produce code coverage output."""
3810 import coverage
3812 import coverage
3811
3813
3812 coverage = coverage.coverage
3814 coverage = coverage.coverage
3813
3815
3814 vlog('# Producing coverage report')
3816 vlog('# Producing coverage report')
3815 # chdir is the easiest way to get short, relative paths in the
3817 # chdir is the easiest way to get short, relative paths in the
3816 # output.
3818 # output.
3817 os.chdir(self._hgroot)
3819 os.chdir(self._hgroot)
3818 covdir = os.path.join(_bytes2sys(self._installdir), '..', 'coverage')
3820 covdir = os.path.join(_bytes2sys(self._installdir), '..', 'coverage')
3819 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3821 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3820
3822
3821 # Map install directory paths back to source directory.
3823 # Map install directory paths back to source directory.
3822 cov.config.paths['srcdir'] = ['.', _bytes2sys(self._pythondir)]
3824 cov.config.paths['srcdir'] = ['.', _bytes2sys(self._pythondir)]
3823
3825
3824 cov.combine()
3826 cov.combine()
3825
3827
3826 omit = [
3828 omit = [
3827 _bytes2sys(os.path.join(x, b'*'))
3829 _bytes2sys(os.path.join(x, b'*'))
3828 for x in [self._bindir, self._testdir]
3830 for x in [self._bindir, self._testdir]
3829 ]
3831 ]
3830 cov.report(ignore_errors=True, omit=omit)
3832 cov.report(ignore_errors=True, omit=omit)
3831
3833
3832 if self.options.htmlcov:
3834 if self.options.htmlcov:
3833 htmldir = os.path.join(_bytes2sys(self._outputdir), 'htmlcov')
3835 htmldir = os.path.join(_bytes2sys(self._outputdir), 'htmlcov')
3834 cov.html_report(directory=htmldir, omit=omit)
3836 cov.html_report(directory=htmldir, omit=omit)
3835 if self.options.annotate:
3837 if self.options.annotate:
3836 adir = os.path.join(_bytes2sys(self._outputdir), 'annotated')
3838 adir = os.path.join(_bytes2sys(self._outputdir), 'annotated')
3837 if not os.path.isdir(adir):
3839 if not os.path.isdir(adir):
3838 os.mkdir(adir)
3840 os.mkdir(adir)
3839 cov.annotate(directory=adir, omit=omit)
3841 cov.annotate(directory=adir, omit=omit)
3840
3842
3841 def _findprogram(self, program):
3843 def _findprogram(self, program):
3842 """Search PATH for a executable program"""
3844 """Search PATH for a executable program"""
3843 dpb = _sys2bytes(os.defpath)
3845 dpb = _sys2bytes(os.defpath)
3844 sepb = _sys2bytes(os.pathsep)
3846 sepb = _sys2bytes(os.pathsep)
3845 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3847 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3846 name = os.path.join(p, program)
3848 name = os.path.join(p, program)
3847 if os.name == 'nt' or os.access(name, os.X_OK):
3849 if os.name == 'nt' or os.access(name, os.X_OK):
3848 return _bytes2sys(name)
3850 return _bytes2sys(name)
3849 return None
3851 return None
3850
3852
3851 def _checktools(self):
3853 def _checktools(self):
3852 """Ensure tools required to run tests are present."""
3854 """Ensure tools required to run tests are present."""
3853 for p in self.REQUIREDTOOLS:
3855 for p in self.REQUIREDTOOLS:
3854 if os.name == 'nt' and not p.endswith(b'.exe'):
3856 if os.name == 'nt' and not p.endswith(b'.exe'):
3855 p += b'.exe'
3857 p += b'.exe'
3856 found = self._findprogram(p)
3858 found = self._findprogram(p)
3857 p = p.decode("utf-8")
3859 p = p.decode("utf-8")
3858 if found:
3860 if found:
3859 vlog("# Found prerequisite", p, "at", found)
3861 vlog("# Found prerequisite", p, "at", found)
3860 else:
3862 else:
3861 print("WARNING: Did not find prerequisite tool: %s " % p)
3863 print("WARNING: Did not find prerequisite tool: %s " % p)
3862
3864
3863
3865
3864 def aggregateexceptions(path):
3866 def aggregateexceptions(path):
3865 exceptioncounts = collections.Counter()
3867 exceptioncounts = collections.Counter()
3866 testsbyfailure = collections.defaultdict(set)
3868 testsbyfailure = collections.defaultdict(set)
3867 failuresbytest = collections.defaultdict(set)
3869 failuresbytest = collections.defaultdict(set)
3868
3870
3869 for f in os.listdir(path):
3871 for f in os.listdir(path):
3870 with open(os.path.join(path, f), 'rb') as fh:
3872 with open(os.path.join(path, f), 'rb') as fh:
3871 data = fh.read().split(b'\0')
3873 data = fh.read().split(b'\0')
3872 if len(data) != 5:
3874 if len(data) != 5:
3873 continue
3875 continue
3874
3876
3875 exc, mainframe, hgframe, hgline, testname = data
3877 exc, mainframe, hgframe, hgline, testname = data
3876 exc = exc.decode('utf-8')
3878 exc = exc.decode('utf-8')
3877 mainframe = mainframe.decode('utf-8')
3879 mainframe = mainframe.decode('utf-8')
3878 hgframe = hgframe.decode('utf-8')
3880 hgframe = hgframe.decode('utf-8')
3879 hgline = hgline.decode('utf-8')
3881 hgline = hgline.decode('utf-8')
3880 testname = testname.decode('utf-8')
3882 testname = testname.decode('utf-8')
3881
3883
3882 key = (hgframe, hgline, exc)
3884 key = (hgframe, hgline, exc)
3883 exceptioncounts[key] += 1
3885 exceptioncounts[key] += 1
3884 testsbyfailure[key].add(testname)
3886 testsbyfailure[key].add(testname)
3885 failuresbytest[testname].add(key)
3887 failuresbytest[testname].add(key)
3886
3888
3887 # Find test having fewest failures for each failure.
3889 # Find test having fewest failures for each failure.
3888 leastfailing = {}
3890 leastfailing = {}
3889 for key, tests in testsbyfailure.items():
3891 for key, tests in testsbyfailure.items():
3890 fewesttest = None
3892 fewesttest = None
3891 fewestcount = 99999999
3893 fewestcount = 99999999
3892 for test in sorted(tests):
3894 for test in sorted(tests):
3893 if len(failuresbytest[test]) < fewestcount:
3895 if len(failuresbytest[test]) < fewestcount:
3894 fewesttest = test
3896 fewesttest = test
3895 fewestcount = len(failuresbytest[test])
3897 fewestcount = len(failuresbytest[test])
3896
3898
3897 leastfailing[key] = (fewestcount, fewesttest)
3899 leastfailing[key] = (fewestcount, fewesttest)
3898
3900
3899 # Create a combined counter so we can sort by total occurrences and
3901 # Create a combined counter so we can sort by total occurrences and
3900 # impacted tests.
3902 # impacted tests.
3901 combined = {}
3903 combined = {}
3902 for key in exceptioncounts:
3904 for key in exceptioncounts:
3903 combined[key] = (
3905 combined[key] = (
3904 exceptioncounts[key],
3906 exceptioncounts[key],
3905 len(testsbyfailure[key]),
3907 len(testsbyfailure[key]),
3906 leastfailing[key][0],
3908 leastfailing[key][0],
3907 leastfailing[key][1],
3909 leastfailing[key][1],
3908 )
3910 )
3909
3911
3910 return {
3912 return {
3911 'exceptioncounts': exceptioncounts,
3913 'exceptioncounts': exceptioncounts,
3912 'total': sum(exceptioncounts.values()),
3914 'total': sum(exceptioncounts.values()),
3913 'combined': combined,
3915 'combined': combined,
3914 'leastfailing': leastfailing,
3916 'leastfailing': leastfailing,
3915 'byfailure': testsbyfailure,
3917 'byfailure': testsbyfailure,
3916 'bytest': failuresbytest,
3918 'bytest': failuresbytest,
3917 }
3919 }
3918
3920
3919
3921
3920 if __name__ == '__main__':
3922 if __name__ == '__main__':
3921 runner = TestRunner()
3923 runner = TestRunner()
3922
3924
3923 try:
3925 try:
3924 import msvcrt
3926 import msvcrt
3925
3927
3926 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3928 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
3927 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3929 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
3928 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3930 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
3929 except ImportError:
3931 except ImportError:
3930 pass
3932 pass
3931
3933
3932 sys.exit(runner.run(sys.argv[1:]))
3934 sys.exit(runner.run(sys.argv[1:]))
General Comments 0
You need to be logged in to leave comments. Login now