##// END OF EJS Templates
rhg: enable `rhg status` by default, without config or env opt-in...
Simon Sapin -
r49586:29eb80d1 default
parent child Browse files
Show More
@@ -1,560 +1,559
1 // config.rs
1 // config.rs
2 //
2 //
3 // Copyright 2020
3 // Copyright 2020
4 // Valentin Gatien-Baron,
4 // Valentin Gatien-Baron,
5 // Raphaël Gomès <rgomes@octobus.net>
5 // Raphaël Gomès <rgomes@octobus.net>
6 //
6 //
7 // This software may be used and distributed according to the terms of the
7 // This software may be used and distributed according to the terms of the
8 // GNU General Public License version 2 or any later version.
8 // GNU General Public License version 2 or any later version.
9
9
10 use super::layer;
10 use super::layer;
11 use super::values;
11 use super::values;
12 use crate::config::layer::{
12 use crate::config::layer::{
13 ConfigError, ConfigLayer, ConfigOrigin, ConfigValue,
13 ConfigError, ConfigLayer, ConfigOrigin, ConfigValue,
14 };
14 };
15 use crate::utils::files::get_bytes_from_os_str;
15 use crate::utils::files::get_bytes_from_os_str;
16 use format_bytes::{write_bytes, DisplayBytes};
16 use format_bytes::{write_bytes, DisplayBytes};
17 use std::collections::HashSet;
17 use std::collections::HashSet;
18 use std::env;
18 use std::env;
19 use std::fmt;
19 use std::fmt;
20 use std::path::{Path, PathBuf};
20 use std::path::{Path, PathBuf};
21 use std::str;
21 use std::str;
22
22
23 use crate::errors::{HgResultExt, IoResultExt};
23 use crate::errors::{HgResultExt, IoResultExt};
24
24
25 /// Holds the config values for the current repository
25 /// Holds the config values for the current repository
26 /// TODO update this docstring once we support more sources
26 /// TODO update this docstring once we support more sources
27 #[derive(Clone)]
27 #[derive(Clone)]
28 pub struct Config {
28 pub struct Config {
29 layers: Vec<layer::ConfigLayer>,
29 layers: Vec<layer::ConfigLayer>,
30 }
30 }
31
31
32 impl DisplayBytes for Config {
32 impl DisplayBytes for Config {
33 fn display_bytes(
33 fn display_bytes(
34 &self,
34 &self,
35 out: &mut dyn std::io::Write,
35 out: &mut dyn std::io::Write,
36 ) -> std::io::Result<()> {
36 ) -> std::io::Result<()> {
37 for (index, layer) in self.layers.iter().rev().enumerate() {
37 for (index, layer) in self.layers.iter().rev().enumerate() {
38 write_bytes!(
38 write_bytes!(
39 out,
39 out,
40 b"==== Layer {} (trusted: {}) ====\n{}",
40 b"==== Layer {} (trusted: {}) ====\n{}",
41 index,
41 index,
42 if layer.trusted {
42 if layer.trusted {
43 &b"yes"[..]
43 &b"yes"[..]
44 } else {
44 } else {
45 &b"no"[..]
45 &b"no"[..]
46 },
46 },
47 layer
47 layer
48 )?;
48 )?;
49 }
49 }
50 Ok(())
50 Ok(())
51 }
51 }
52 }
52 }
53
53
54 pub enum ConfigSource {
54 pub enum ConfigSource {
55 /// Absolute path to a config file
55 /// Absolute path to a config file
56 AbsPath(PathBuf),
56 AbsPath(PathBuf),
57 /// Already parsed (from the CLI, env, Python resources, etc.)
57 /// Already parsed (from the CLI, env, Python resources, etc.)
58 Parsed(layer::ConfigLayer),
58 Parsed(layer::ConfigLayer),
59 }
59 }
60
60
61 #[derive(Debug)]
61 #[derive(Debug)]
62 pub struct ConfigValueParseError {
62 pub struct ConfigValueParseError {
63 pub origin: ConfigOrigin,
63 pub origin: ConfigOrigin,
64 pub line: Option<usize>,
64 pub line: Option<usize>,
65 pub section: Vec<u8>,
65 pub section: Vec<u8>,
66 pub item: Vec<u8>,
66 pub item: Vec<u8>,
67 pub value: Vec<u8>,
67 pub value: Vec<u8>,
68 pub expected_type: &'static str,
68 pub expected_type: &'static str,
69 }
69 }
70
70
71 impl fmt::Display for ConfigValueParseError {
71 impl fmt::Display for ConfigValueParseError {
72 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
72 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
73 // TODO: add origin and line number information, here and in
73 // TODO: add origin and line number information, here and in
74 // corresponding python code
74 // corresponding python code
75 write!(
75 write!(
76 f,
76 f,
77 "config error: {}.{} is not a {} ('{}')",
77 "config error: {}.{} is not a {} ('{}')",
78 String::from_utf8_lossy(&self.section),
78 String::from_utf8_lossy(&self.section),
79 String::from_utf8_lossy(&self.item),
79 String::from_utf8_lossy(&self.item),
80 self.expected_type,
80 self.expected_type,
81 String::from_utf8_lossy(&self.value)
81 String::from_utf8_lossy(&self.value)
82 )
82 )
83 }
83 }
84 }
84 }
85
85
86 impl Config {
86 impl Config {
87 /// The configuration to use when printing configuration-loading errors
87 /// The configuration to use when printing configuration-loading errors
88 pub fn empty() -> Self {
88 pub fn empty() -> Self {
89 Self { layers: Vec::new() }
89 Self { layers: Vec::new() }
90 }
90 }
91
91
92 /// Load system and user configuration from various files.
92 /// Load system and user configuration from various files.
93 ///
93 ///
94 /// This is also affected by some environment variables.
94 /// This is also affected by some environment variables.
95 pub fn load_non_repo() -> Result<Self, ConfigError> {
95 pub fn load_non_repo() -> Result<Self, ConfigError> {
96 let mut config = Self { layers: Vec::new() };
96 let mut config = Self { layers: Vec::new() };
97 let opt_rc_path = env::var_os("HGRCPATH");
97 let opt_rc_path = env::var_os("HGRCPATH");
98 // HGRCPATH replaces system config
98 // HGRCPATH replaces system config
99 if opt_rc_path.is_none() {
99 if opt_rc_path.is_none() {
100 config.add_system_config()?
100 config.add_system_config()?
101 }
101 }
102
102
103 config.add_for_environment_variable("EDITOR", b"ui", b"editor");
103 config.add_for_environment_variable("EDITOR", b"ui", b"editor");
104 config.add_for_environment_variable("VISUAL", b"ui", b"editor");
104 config.add_for_environment_variable("VISUAL", b"ui", b"editor");
105 config.add_for_environment_variable("PAGER", b"pager", b"pager");
105 config.add_for_environment_variable("PAGER", b"pager", b"pager");
106
106
107 // These are set by `run-tests.py --rhg` to enable fallback for the
107 // These are set by `run-tests.py --rhg` to enable fallback for the
108 // entire test suite. Alternatives would be setting configuration
108 // entire test suite. Alternatives would be setting configuration
109 // through `$HGRCPATH` but some tests override that, or changing the
109 // through `$HGRCPATH` but some tests override that, or changing the
110 // `hg` shell alias to include `--config` but that disrupts tests that
110 // `hg` shell alias to include `--config` but that disrupts tests that
111 // print command lines and check expected output.
111 // print command lines and check expected output.
112 config.add_for_environment_variable(
112 config.add_for_environment_variable(
113 "RHG_ON_UNSUPPORTED",
113 "RHG_ON_UNSUPPORTED",
114 b"rhg",
114 b"rhg",
115 b"on-unsupported",
115 b"on-unsupported",
116 );
116 );
117 config.add_for_environment_variable(
117 config.add_for_environment_variable(
118 "RHG_FALLBACK_EXECUTABLE",
118 "RHG_FALLBACK_EXECUTABLE",
119 b"rhg",
119 b"rhg",
120 b"fallback-executable",
120 b"fallback-executable",
121 );
121 );
122 config.add_for_environment_variable("RHG_STATUS", b"rhg", b"status");
123
122
124 // HGRCPATH replaces user config
123 // HGRCPATH replaces user config
125 if opt_rc_path.is_none() {
124 if opt_rc_path.is_none() {
126 config.add_user_config()?
125 config.add_user_config()?
127 }
126 }
128 if let Some(rc_path) = &opt_rc_path {
127 if let Some(rc_path) = &opt_rc_path {
129 for path in env::split_paths(rc_path) {
128 for path in env::split_paths(rc_path) {
130 if !path.as_os_str().is_empty() {
129 if !path.as_os_str().is_empty() {
131 if path.is_dir() {
130 if path.is_dir() {
132 config.add_trusted_dir(&path)?
131 config.add_trusted_dir(&path)?
133 } else {
132 } else {
134 config.add_trusted_file(&path)?
133 config.add_trusted_file(&path)?
135 }
134 }
136 }
135 }
137 }
136 }
138 }
137 }
139 Ok(config)
138 Ok(config)
140 }
139 }
141
140
142 pub fn load_cli_args(
141 pub fn load_cli_args(
143 &mut self,
142 &mut self,
144 cli_config_args: impl IntoIterator<Item = impl AsRef<[u8]>>,
143 cli_config_args: impl IntoIterator<Item = impl AsRef<[u8]>>,
145 color_arg: Option<Vec<u8>>,
144 color_arg: Option<Vec<u8>>,
146 ) -> Result<(), ConfigError> {
145 ) -> Result<(), ConfigError> {
147 if let Some(layer) = ConfigLayer::parse_cli_args(cli_config_args)? {
146 if let Some(layer) = ConfigLayer::parse_cli_args(cli_config_args)? {
148 self.layers.push(layer)
147 self.layers.push(layer)
149 }
148 }
150 if let Some(arg) = color_arg {
149 if let Some(arg) = color_arg {
151 let mut layer = ConfigLayer::new(ConfigOrigin::CommandLineColor);
150 let mut layer = ConfigLayer::new(ConfigOrigin::CommandLineColor);
152 layer.add(b"ui"[..].into(), b"color"[..].into(), arg, None);
151 layer.add(b"ui"[..].into(), b"color"[..].into(), arg, None);
153 self.layers.push(layer)
152 self.layers.push(layer)
154 }
153 }
155 Ok(())
154 Ok(())
156 }
155 }
157
156
158 fn add_trusted_dir(&mut self, path: &Path) -> Result<(), ConfigError> {
157 fn add_trusted_dir(&mut self, path: &Path) -> Result<(), ConfigError> {
159 if let Some(entries) = std::fs::read_dir(path)
158 if let Some(entries) = std::fs::read_dir(path)
160 .when_reading_file(path)
159 .when_reading_file(path)
161 .io_not_found_as_none()?
160 .io_not_found_as_none()?
162 {
161 {
163 let mut file_paths = entries
162 let mut file_paths = entries
164 .map(|result| {
163 .map(|result| {
165 result.when_reading_file(path).map(|entry| entry.path())
164 result.when_reading_file(path).map(|entry| entry.path())
166 })
165 })
167 .collect::<Result<Vec<_>, _>>()?;
166 .collect::<Result<Vec<_>, _>>()?;
168 file_paths.sort();
167 file_paths.sort();
169 for file_path in &file_paths {
168 for file_path in &file_paths {
170 if file_path.extension() == Some(std::ffi::OsStr::new("rc")) {
169 if file_path.extension() == Some(std::ffi::OsStr::new("rc")) {
171 self.add_trusted_file(&file_path)?
170 self.add_trusted_file(&file_path)?
172 }
171 }
173 }
172 }
174 }
173 }
175 Ok(())
174 Ok(())
176 }
175 }
177
176
178 fn add_trusted_file(&mut self, path: &Path) -> Result<(), ConfigError> {
177 fn add_trusted_file(&mut self, path: &Path) -> Result<(), ConfigError> {
179 if let Some(data) = std::fs::read(path)
178 if let Some(data) = std::fs::read(path)
180 .when_reading_file(path)
179 .when_reading_file(path)
181 .io_not_found_as_none()?
180 .io_not_found_as_none()?
182 {
181 {
183 self.layers.extend(ConfigLayer::parse(path, &data)?)
182 self.layers.extend(ConfigLayer::parse(path, &data)?)
184 }
183 }
185 Ok(())
184 Ok(())
186 }
185 }
187
186
188 fn add_for_environment_variable(
187 fn add_for_environment_variable(
189 &mut self,
188 &mut self,
190 var: &str,
189 var: &str,
191 section: &[u8],
190 section: &[u8],
192 key: &[u8],
191 key: &[u8],
193 ) {
192 ) {
194 if let Some(value) = env::var_os(var) {
193 if let Some(value) = env::var_os(var) {
195 let origin = layer::ConfigOrigin::Environment(var.into());
194 let origin = layer::ConfigOrigin::Environment(var.into());
196 let mut layer = ConfigLayer::new(origin);
195 let mut layer = ConfigLayer::new(origin);
197 layer.add(
196 layer.add(
198 section.to_owned(),
197 section.to_owned(),
199 key.to_owned(),
198 key.to_owned(),
200 get_bytes_from_os_str(value),
199 get_bytes_from_os_str(value),
201 None,
200 None,
202 );
201 );
203 self.layers.push(layer)
202 self.layers.push(layer)
204 }
203 }
205 }
204 }
206
205
207 #[cfg(unix)] // TODO: other platforms
206 #[cfg(unix)] // TODO: other platforms
208 fn add_system_config(&mut self) -> Result<(), ConfigError> {
207 fn add_system_config(&mut self) -> Result<(), ConfigError> {
209 let mut add_for_prefix = |prefix: &Path| -> Result<(), ConfigError> {
208 let mut add_for_prefix = |prefix: &Path| -> Result<(), ConfigError> {
210 let etc = prefix.join("etc").join("mercurial");
209 let etc = prefix.join("etc").join("mercurial");
211 self.add_trusted_file(&etc.join("hgrc"))?;
210 self.add_trusted_file(&etc.join("hgrc"))?;
212 self.add_trusted_dir(&etc.join("hgrc.d"))
211 self.add_trusted_dir(&etc.join("hgrc.d"))
213 };
212 };
214 let root = Path::new("/");
213 let root = Path::new("/");
215 // TODO: use `std::env::args_os().next().unwrap()` a.k.a. argv[0]
214 // TODO: use `std::env::args_os().next().unwrap()` a.k.a. argv[0]
216 // instead? TODO: can this be a relative path?
215 // instead? TODO: can this be a relative path?
217 let hg = crate::utils::current_exe()?;
216 let hg = crate::utils::current_exe()?;
218 // TODO: this order (per-installation then per-system) matches
217 // TODO: this order (per-installation then per-system) matches
219 // `systemrcpath()` in `mercurial/scmposix.py`, but
218 // `systemrcpath()` in `mercurial/scmposix.py`, but
220 // `mercurial/helptext/config.txt` suggests it should be reversed
219 // `mercurial/helptext/config.txt` suggests it should be reversed
221 if let Some(installation_prefix) = hg.parent().and_then(Path::parent) {
220 if let Some(installation_prefix) = hg.parent().and_then(Path::parent) {
222 if installation_prefix != root {
221 if installation_prefix != root {
223 add_for_prefix(&installation_prefix)?
222 add_for_prefix(&installation_prefix)?
224 }
223 }
225 }
224 }
226 add_for_prefix(root)?;
225 add_for_prefix(root)?;
227 Ok(())
226 Ok(())
228 }
227 }
229
228
230 #[cfg(unix)] // TODO: other plateforms
229 #[cfg(unix)] // TODO: other plateforms
231 fn add_user_config(&mut self) -> Result<(), ConfigError> {
230 fn add_user_config(&mut self) -> Result<(), ConfigError> {
232 let opt_home = home::home_dir();
231 let opt_home = home::home_dir();
233 if let Some(home) = &opt_home {
232 if let Some(home) = &opt_home {
234 self.add_trusted_file(&home.join(".hgrc"))?
233 self.add_trusted_file(&home.join(".hgrc"))?
235 }
234 }
236 let darwin = cfg!(any(target_os = "macos", target_os = "ios"));
235 let darwin = cfg!(any(target_os = "macos", target_os = "ios"));
237 if !darwin {
236 if !darwin {
238 if let Some(config_home) = env::var_os("XDG_CONFIG_HOME")
237 if let Some(config_home) = env::var_os("XDG_CONFIG_HOME")
239 .map(PathBuf::from)
238 .map(PathBuf::from)
240 .or_else(|| opt_home.map(|home| home.join(".config")))
239 .or_else(|| opt_home.map(|home| home.join(".config")))
241 {
240 {
242 self.add_trusted_file(&config_home.join("hg").join("hgrc"))?
241 self.add_trusted_file(&config_home.join("hg").join("hgrc"))?
243 }
242 }
244 }
243 }
245 Ok(())
244 Ok(())
246 }
245 }
247
246
248 /// Loads in order, which means that the precedence is the same
247 /// Loads in order, which means that the precedence is the same
249 /// as the order of `sources`.
248 /// as the order of `sources`.
250 pub fn load_from_explicit_sources(
249 pub fn load_from_explicit_sources(
251 sources: Vec<ConfigSource>,
250 sources: Vec<ConfigSource>,
252 ) -> Result<Self, ConfigError> {
251 ) -> Result<Self, ConfigError> {
253 let mut layers = vec![];
252 let mut layers = vec![];
254
253
255 for source in sources.into_iter() {
254 for source in sources.into_iter() {
256 match source {
255 match source {
257 ConfigSource::Parsed(c) => layers.push(c),
256 ConfigSource::Parsed(c) => layers.push(c),
258 ConfigSource::AbsPath(c) => {
257 ConfigSource::AbsPath(c) => {
259 // TODO check if it should be trusted
258 // TODO check if it should be trusted
260 // mercurial/ui.py:427
259 // mercurial/ui.py:427
261 let data = match std::fs::read(&c) {
260 let data = match std::fs::read(&c) {
262 Err(_) => continue, // same as the python code
261 Err(_) => continue, // same as the python code
263 Ok(data) => data,
262 Ok(data) => data,
264 };
263 };
265 layers.extend(ConfigLayer::parse(&c, &data)?)
264 layers.extend(ConfigLayer::parse(&c, &data)?)
266 }
265 }
267 }
266 }
268 }
267 }
269
268
270 Ok(Config { layers })
269 Ok(Config { layers })
271 }
270 }
272
271
273 /// Loads the per-repository config into a new `Config` which is combined
272 /// Loads the per-repository config into a new `Config` which is combined
274 /// with `self`.
273 /// with `self`.
275 pub(crate) fn combine_with_repo(
274 pub(crate) fn combine_with_repo(
276 &self,
275 &self,
277 repo_config_files: &[PathBuf],
276 repo_config_files: &[PathBuf],
278 ) -> Result<Self, ConfigError> {
277 ) -> Result<Self, ConfigError> {
279 let (cli_layers, other_layers) = self
278 let (cli_layers, other_layers) = self
280 .layers
279 .layers
281 .iter()
280 .iter()
282 .cloned()
281 .cloned()
283 .partition(ConfigLayer::is_from_command_line);
282 .partition(ConfigLayer::is_from_command_line);
284
283
285 let mut repo_config = Self {
284 let mut repo_config = Self {
286 layers: other_layers,
285 layers: other_layers,
287 };
286 };
288 for path in repo_config_files {
287 for path in repo_config_files {
289 // TODO: check if this file should be trusted:
288 // TODO: check if this file should be trusted:
290 // `mercurial/ui.py:427`
289 // `mercurial/ui.py:427`
291 repo_config.add_trusted_file(path)?;
290 repo_config.add_trusted_file(path)?;
292 }
291 }
293 repo_config.layers.extend(cli_layers);
292 repo_config.layers.extend(cli_layers);
294 Ok(repo_config)
293 Ok(repo_config)
295 }
294 }
296
295
297 fn get_parse<'config, T: 'config>(
296 fn get_parse<'config, T: 'config>(
298 &'config self,
297 &'config self,
299 section: &[u8],
298 section: &[u8],
300 item: &[u8],
299 item: &[u8],
301 expected_type: &'static str,
300 expected_type: &'static str,
302 parse: impl Fn(&'config [u8]) -> Option<T>,
301 parse: impl Fn(&'config [u8]) -> Option<T>,
303 ) -> Result<Option<T>, ConfigValueParseError> {
302 ) -> Result<Option<T>, ConfigValueParseError> {
304 match self.get_inner(&section, &item) {
303 match self.get_inner(&section, &item) {
305 Some((layer, v)) => match parse(&v.bytes) {
304 Some((layer, v)) => match parse(&v.bytes) {
306 Some(b) => Ok(Some(b)),
305 Some(b) => Ok(Some(b)),
307 None => Err(ConfigValueParseError {
306 None => Err(ConfigValueParseError {
308 origin: layer.origin.to_owned(),
307 origin: layer.origin.to_owned(),
309 line: v.line,
308 line: v.line,
310 value: v.bytes.to_owned(),
309 value: v.bytes.to_owned(),
311 section: section.to_owned(),
310 section: section.to_owned(),
312 item: item.to_owned(),
311 item: item.to_owned(),
313 expected_type,
312 expected_type,
314 }),
313 }),
315 },
314 },
316 None => Ok(None),
315 None => Ok(None),
317 }
316 }
318 }
317 }
319
318
320 /// Returns an `Err` if the first value found is not a valid UTF-8 string.
319 /// Returns an `Err` if the first value found is not a valid UTF-8 string.
321 /// Otherwise, returns an `Ok(value)` if found, or `None`.
320 /// Otherwise, returns an `Ok(value)` if found, or `None`.
322 pub fn get_str(
321 pub fn get_str(
323 &self,
322 &self,
324 section: &[u8],
323 section: &[u8],
325 item: &[u8],
324 item: &[u8],
326 ) -> Result<Option<&str>, ConfigValueParseError> {
325 ) -> Result<Option<&str>, ConfigValueParseError> {
327 self.get_parse(section, item, "ASCII or UTF-8 string", |value| {
326 self.get_parse(section, item, "ASCII or UTF-8 string", |value| {
328 str::from_utf8(value).ok()
327 str::from_utf8(value).ok()
329 })
328 })
330 }
329 }
331
330
332 /// Returns an `Err` if the first value found is not a valid unsigned
331 /// Returns an `Err` if the first value found is not a valid unsigned
333 /// integer. Otherwise, returns an `Ok(value)` if found, or `None`.
332 /// integer. Otherwise, returns an `Ok(value)` if found, or `None`.
334 pub fn get_u32(
333 pub fn get_u32(
335 &self,
334 &self,
336 section: &[u8],
335 section: &[u8],
337 item: &[u8],
336 item: &[u8],
338 ) -> Result<Option<u32>, ConfigValueParseError> {
337 ) -> Result<Option<u32>, ConfigValueParseError> {
339 self.get_parse(section, item, "valid integer", |value| {
338 self.get_parse(section, item, "valid integer", |value| {
340 str::from_utf8(value).ok()?.parse().ok()
339 str::from_utf8(value).ok()?.parse().ok()
341 })
340 })
342 }
341 }
343
342
344 /// Returns an `Err` if the first value found is not a valid file size
343 /// Returns an `Err` if the first value found is not a valid file size
345 /// value such as `30` (default unit is bytes), `7 MB`, or `42.5 kb`.
344 /// value such as `30` (default unit is bytes), `7 MB`, or `42.5 kb`.
346 /// Otherwise, returns an `Ok(value_in_bytes)` if found, or `None`.
345 /// Otherwise, returns an `Ok(value_in_bytes)` if found, or `None`.
347 pub fn get_byte_size(
346 pub fn get_byte_size(
348 &self,
347 &self,
349 section: &[u8],
348 section: &[u8],
350 item: &[u8],
349 item: &[u8],
351 ) -> Result<Option<u64>, ConfigValueParseError> {
350 ) -> Result<Option<u64>, ConfigValueParseError> {
352 self.get_parse(section, item, "byte quantity", values::parse_byte_size)
351 self.get_parse(section, item, "byte quantity", values::parse_byte_size)
353 }
352 }
354
353
355 /// Returns an `Err` if the first value found is not a valid boolean.
354 /// Returns an `Err` if the first value found is not a valid boolean.
356 /// Otherwise, returns an `Ok(option)`, where `option` is the boolean if
355 /// Otherwise, returns an `Ok(option)`, where `option` is the boolean if
357 /// found, or `None`.
356 /// found, or `None`.
358 pub fn get_option(
357 pub fn get_option(
359 &self,
358 &self,
360 section: &[u8],
359 section: &[u8],
361 item: &[u8],
360 item: &[u8],
362 ) -> Result<Option<bool>, ConfigValueParseError> {
361 ) -> Result<Option<bool>, ConfigValueParseError> {
363 self.get_parse(section, item, "boolean", values::parse_bool)
362 self.get_parse(section, item, "boolean", values::parse_bool)
364 }
363 }
365
364
366 /// Returns the corresponding boolean in the config. Returns `Ok(false)`
365 /// Returns the corresponding boolean in the config. Returns `Ok(false)`
367 /// if the value is not found, an `Err` if it's not a valid boolean.
366 /// if the value is not found, an `Err` if it's not a valid boolean.
368 pub fn get_bool(
367 pub fn get_bool(
369 &self,
368 &self,
370 section: &[u8],
369 section: &[u8],
371 item: &[u8],
370 item: &[u8],
372 ) -> Result<bool, ConfigValueParseError> {
371 ) -> Result<bool, ConfigValueParseError> {
373 Ok(self.get_option(section, item)?.unwrap_or(false))
372 Ok(self.get_option(section, item)?.unwrap_or(false))
374 }
373 }
375
374
376 /// Returns `true` if the extension is enabled, `false` otherwise
375 /// Returns `true` if the extension is enabled, `false` otherwise
377 pub fn is_extension_enabled(&self, extension: &[u8]) -> bool {
376 pub fn is_extension_enabled(&self, extension: &[u8]) -> bool {
378 let value = self.get(b"extensions", extension);
377 let value = self.get(b"extensions", extension);
379 match value {
378 match value {
380 Some(c) => !c.starts_with(b"!"),
379 Some(c) => !c.starts_with(b"!"),
381 None => false,
380 None => false,
382 }
381 }
383 }
382 }
384
383
385 /// If there is an `item` value in `section`, parse and return a list of
384 /// If there is an `item` value in `section`, parse and return a list of
386 /// byte strings.
385 /// byte strings.
387 pub fn get_list(
386 pub fn get_list(
388 &self,
387 &self,
389 section: &[u8],
388 section: &[u8],
390 item: &[u8],
389 item: &[u8],
391 ) -> Option<Vec<Vec<u8>>> {
390 ) -> Option<Vec<Vec<u8>>> {
392 self.get(section, item).map(values::parse_list)
391 self.get(section, item).map(values::parse_list)
393 }
392 }
394
393
395 /// Returns the raw value bytes of the first one found, or `None`.
394 /// Returns the raw value bytes of the first one found, or `None`.
396 pub fn get(&self, section: &[u8], item: &[u8]) -> Option<&[u8]> {
395 pub fn get(&self, section: &[u8], item: &[u8]) -> Option<&[u8]> {
397 self.get_inner(section, item)
396 self.get_inner(section, item)
398 .map(|(_, value)| value.bytes.as_ref())
397 .map(|(_, value)| value.bytes.as_ref())
399 }
398 }
400
399
401 /// Returns the raw value bytes of the first one found, or `None`.
400 /// Returns the raw value bytes of the first one found, or `None`.
402 pub fn get_with_origin(
401 pub fn get_with_origin(
403 &self,
402 &self,
404 section: &[u8],
403 section: &[u8],
405 item: &[u8],
404 item: &[u8],
406 ) -> Option<(&[u8], &ConfigOrigin)> {
405 ) -> Option<(&[u8], &ConfigOrigin)> {
407 self.get_inner(section, item)
406 self.get_inner(section, item)
408 .map(|(layer, value)| (value.bytes.as_ref(), &layer.origin))
407 .map(|(layer, value)| (value.bytes.as_ref(), &layer.origin))
409 }
408 }
410
409
411 /// Returns the layer and the value of the first one found, or `None`.
410 /// Returns the layer and the value of the first one found, or `None`.
412 fn get_inner(
411 fn get_inner(
413 &self,
412 &self,
414 section: &[u8],
413 section: &[u8],
415 item: &[u8],
414 item: &[u8],
416 ) -> Option<(&ConfigLayer, &ConfigValue)> {
415 ) -> Option<(&ConfigLayer, &ConfigValue)> {
417 for layer in self.layers.iter().rev() {
416 for layer in self.layers.iter().rev() {
418 if !layer.trusted {
417 if !layer.trusted {
419 continue;
418 continue;
420 }
419 }
421 if let Some(v) = layer.get(&section, &item) {
420 if let Some(v) = layer.get(&section, &item) {
422 return Some((&layer, v));
421 return Some((&layer, v));
423 }
422 }
424 }
423 }
425 None
424 None
426 }
425 }
427
426
428 /// Return all keys defined for the given section
427 /// Return all keys defined for the given section
429 pub fn get_section_keys(&self, section: &[u8]) -> HashSet<&[u8]> {
428 pub fn get_section_keys(&self, section: &[u8]) -> HashSet<&[u8]> {
430 self.layers
429 self.layers
431 .iter()
430 .iter()
432 .flat_map(|layer| layer.iter_keys(section))
431 .flat_map(|layer| layer.iter_keys(section))
433 .collect()
432 .collect()
434 }
433 }
435
434
436 /// Returns whether any key is defined in the given section
435 /// Returns whether any key is defined in the given section
437 pub fn has_non_empty_section(&self, section: &[u8]) -> bool {
436 pub fn has_non_empty_section(&self, section: &[u8]) -> bool {
438 self.layers
437 self.layers
439 .iter()
438 .iter()
440 .any(|layer| layer.has_non_empty_section(section))
439 .any(|layer| layer.has_non_empty_section(section))
441 }
440 }
442
441
443 /// Yields (key, value) pairs for everything in the given section
442 /// Yields (key, value) pairs for everything in the given section
444 pub fn iter_section<'a>(
443 pub fn iter_section<'a>(
445 &'a self,
444 &'a self,
446 section: &'a [u8],
445 section: &'a [u8],
447 ) -> impl Iterator<Item = (&[u8], &[u8])> + 'a {
446 ) -> impl Iterator<Item = (&[u8], &[u8])> + 'a {
448 // TODO: Use `Iterator`’s `.peekable()` when its `peek_mut` is
447 // TODO: Use `Iterator`’s `.peekable()` when its `peek_mut` is
449 // available:
448 // available:
450 // https://doc.rust-lang.org/nightly/std/iter/struct.Peekable.html#method.peek_mut
449 // https://doc.rust-lang.org/nightly/std/iter/struct.Peekable.html#method.peek_mut
451 struct Peekable<I: Iterator> {
450 struct Peekable<I: Iterator> {
452 iter: I,
451 iter: I,
453 /// Remember a peeked value, even if it was None.
452 /// Remember a peeked value, even if it was None.
454 peeked: Option<Option<I::Item>>,
453 peeked: Option<Option<I::Item>>,
455 }
454 }
456
455
457 impl<I: Iterator> Peekable<I> {
456 impl<I: Iterator> Peekable<I> {
458 fn new(iter: I) -> Self {
457 fn new(iter: I) -> Self {
459 Self { iter, peeked: None }
458 Self { iter, peeked: None }
460 }
459 }
461
460
462 fn next(&mut self) {
461 fn next(&mut self) {
463 self.peeked = None
462 self.peeked = None
464 }
463 }
465
464
466 fn peek_mut(&mut self) -> Option<&mut I::Item> {
465 fn peek_mut(&mut self) -> Option<&mut I::Item> {
467 let iter = &mut self.iter;
466 let iter = &mut self.iter;
468 self.peeked.get_or_insert_with(|| iter.next()).as_mut()
467 self.peeked.get_or_insert_with(|| iter.next()).as_mut()
469 }
468 }
470 }
469 }
471
470
472 // Deduplicate keys redefined in multiple layers
471 // Deduplicate keys redefined in multiple layers
473 let mut keys_already_seen = HashSet::new();
472 let mut keys_already_seen = HashSet::new();
474 let mut key_is_new =
473 let mut key_is_new =
475 move |&(key, _value): &(&'a [u8], &'a [u8])| -> bool {
474 move |&(key, _value): &(&'a [u8], &'a [u8])| -> bool {
476 keys_already_seen.insert(key)
475 keys_already_seen.insert(key)
477 };
476 };
478 // This is similar to `flat_map` + `filter_map`, except with a single
477 // This is similar to `flat_map` + `filter_map`, except with a single
479 // closure that owns `key_is_new` (and therefore the
478 // closure that owns `key_is_new` (and therefore the
480 // `keys_already_seen` set):
479 // `keys_already_seen` set):
481 let mut layer_iters = Peekable::new(
480 let mut layer_iters = Peekable::new(
482 self.layers
481 self.layers
483 .iter()
482 .iter()
484 .rev()
483 .rev()
485 .map(move |layer| layer.iter_section(section)),
484 .map(move |layer| layer.iter_section(section)),
486 );
485 );
487 std::iter::from_fn(move || loop {
486 std::iter::from_fn(move || loop {
488 if let Some(pair) = layer_iters.peek_mut()?.find(&mut key_is_new) {
487 if let Some(pair) = layer_iters.peek_mut()?.find(&mut key_is_new) {
489 return Some(pair);
488 return Some(pair);
490 } else {
489 } else {
491 layer_iters.next();
490 layer_iters.next();
492 }
491 }
493 })
492 })
494 }
493 }
495
494
496 /// Get raw values bytes from all layers (even untrusted ones) in order
495 /// Get raw values bytes from all layers (even untrusted ones) in order
497 /// of precedence.
496 /// of precedence.
498 #[cfg(test)]
497 #[cfg(test)]
499 fn get_all(&self, section: &[u8], item: &[u8]) -> Vec<&[u8]> {
498 fn get_all(&self, section: &[u8], item: &[u8]) -> Vec<&[u8]> {
500 let mut res = vec![];
499 let mut res = vec![];
501 for layer in self.layers.iter().rev() {
500 for layer in self.layers.iter().rev() {
502 if let Some(v) = layer.get(&section, &item) {
501 if let Some(v) = layer.get(&section, &item) {
503 res.push(v.bytes.as_ref());
502 res.push(v.bytes.as_ref());
504 }
503 }
505 }
504 }
506 res
505 res
507 }
506 }
508 }
507 }
509
508
510 #[cfg(test)]
509 #[cfg(test)]
511 mod tests {
510 mod tests {
512 use super::*;
511 use super::*;
513 use pretty_assertions::assert_eq;
512 use pretty_assertions::assert_eq;
514 use std::fs::File;
513 use std::fs::File;
515 use std::io::Write;
514 use std::io::Write;
516
515
517 #[test]
516 #[test]
518 fn test_include_layer_ordering() {
517 fn test_include_layer_ordering() {
519 let tmpdir = tempfile::tempdir().unwrap();
518 let tmpdir = tempfile::tempdir().unwrap();
520 let tmpdir_path = tmpdir.path();
519 let tmpdir_path = tmpdir.path();
521 let mut included_file =
520 let mut included_file =
522 File::create(&tmpdir_path.join("included.rc")).unwrap();
521 File::create(&tmpdir_path.join("included.rc")).unwrap();
523
522
524 included_file.write_all(b"[section]\nitem=value1").unwrap();
523 included_file.write_all(b"[section]\nitem=value1").unwrap();
525 let base_config_path = tmpdir_path.join("base.rc");
524 let base_config_path = tmpdir_path.join("base.rc");
526 let mut config_file = File::create(&base_config_path).unwrap();
525 let mut config_file = File::create(&base_config_path).unwrap();
527 let data =
526 let data =
528 b"[section]\nitem=value0\n%include included.rc\nitem=value2\n\
527 b"[section]\nitem=value0\n%include included.rc\nitem=value2\n\
529 [section2]\ncount = 4\nsize = 1.5 KB\nnot-count = 1.5\nnot-size = 1 ub";
528 [section2]\ncount = 4\nsize = 1.5 KB\nnot-count = 1.5\nnot-size = 1 ub";
530 config_file.write_all(data).unwrap();
529 config_file.write_all(data).unwrap();
531
530
532 let sources = vec![ConfigSource::AbsPath(base_config_path)];
531 let sources = vec![ConfigSource::AbsPath(base_config_path)];
533 let config = Config::load_from_explicit_sources(sources)
532 let config = Config::load_from_explicit_sources(sources)
534 .expect("expected valid config");
533 .expect("expected valid config");
535
534
536 let (_, value) = config.get_inner(b"section", b"item").unwrap();
535 let (_, value) = config.get_inner(b"section", b"item").unwrap();
537 assert_eq!(
536 assert_eq!(
538 value,
537 value,
539 &ConfigValue {
538 &ConfigValue {
540 bytes: b"value2".to_vec(),
539 bytes: b"value2".to_vec(),
541 line: Some(4)
540 line: Some(4)
542 }
541 }
543 );
542 );
544
543
545 let value = config.get(b"section", b"item").unwrap();
544 let value = config.get(b"section", b"item").unwrap();
546 assert_eq!(value, b"value2",);
545 assert_eq!(value, b"value2",);
547 assert_eq!(
546 assert_eq!(
548 config.get_all(b"section", b"item"),
547 config.get_all(b"section", b"item"),
549 [b"value2", b"value1", b"value0"]
548 [b"value2", b"value1", b"value0"]
550 );
549 );
551
550
552 assert_eq!(config.get_u32(b"section2", b"count").unwrap(), Some(4));
551 assert_eq!(config.get_u32(b"section2", b"count").unwrap(), Some(4));
553 assert_eq!(
552 assert_eq!(
554 config.get_byte_size(b"section2", b"size").unwrap(),
553 config.get_byte_size(b"section2", b"size").unwrap(),
555 Some(1024 + 512)
554 Some(1024 + 512)
556 );
555 );
557 assert!(config.get_u32(b"section2", b"not-count").is_err());
556 assert!(config.get_u32(b"section2", b"not-count").is_err());
558 assert!(config.get_byte_size(b"section2", b"not-size").is_err());
557 assert!(config.get_byte_size(b"section2", b"not-size").is_err());
559 }
558 }
560 }
559 }
@@ -1,539 +1,530
1 // status.rs
1 // status.rs
2 //
2 //
3 // Copyright 2020, Georges Racinet <georges.racinets@octobus.net>
3 // Copyright 2020, Georges Racinet <georges.racinets@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use crate::error::CommandError;
8 use crate::error::CommandError;
9 use crate::ui::Ui;
9 use crate::ui::Ui;
10 use crate::utils::path_utils::RelativizePaths;
10 use crate::utils::path_utils::RelativizePaths;
11 use clap::{Arg, SubCommand};
11 use clap::{Arg, SubCommand};
12 use format_bytes::format_bytes;
12 use format_bytes::format_bytes;
13 use hg;
13 use hg;
14 use hg::config::Config;
14 use hg::config::Config;
15 use hg::dirstate::has_exec_bit;
15 use hg::dirstate::has_exec_bit;
16 use hg::dirstate::status::StatusPath;
16 use hg::dirstate::status::StatusPath;
17 use hg::dirstate::TruncatedTimestamp;
17 use hg::dirstate::TruncatedTimestamp;
18 use hg::dirstate::RANGE_MASK_31BIT;
18 use hg::dirstate::RANGE_MASK_31BIT;
19 use hg::errors::{HgError, IoResultExt};
19 use hg::errors::{HgError, IoResultExt};
20 use hg::lock::LockError;
20 use hg::lock::LockError;
21 use hg::manifest::Manifest;
21 use hg::manifest::Manifest;
22 use hg::matchers::AlwaysMatcher;
22 use hg::matchers::AlwaysMatcher;
23 use hg::repo::Repo;
23 use hg::repo::Repo;
24 use hg::utils::files::get_bytes_from_os_string;
24 use hg::utils::files::get_bytes_from_os_string;
25 use hg::utils::files::get_bytes_from_path;
25 use hg::utils::files::get_bytes_from_path;
26 use hg::utils::files::get_path_from_bytes;
26 use hg::utils::files::get_path_from_bytes;
27 use hg::utils::hg_path::{hg_path_to_path_buf, HgPath};
27 use hg::utils::hg_path::{hg_path_to_path_buf, HgPath};
28 use hg::StatusOptions;
28 use hg::StatusOptions;
29 use log::info;
29 use log::info;
30 use std::io;
30 use std::io;
31 use std::path::PathBuf;
31 use std::path::PathBuf;
32
32
33 pub const HELP_TEXT: &str = "
33 pub const HELP_TEXT: &str = "
34 Show changed files in the working directory
34 Show changed files in the working directory
35
35
36 This is a pure Rust version of `hg status`.
36 This is a pure Rust version of `hg status`.
37
37
38 Some options might be missing, check the list below.
38 Some options might be missing, check the list below.
39 ";
39 ";
40
40
41 pub fn args() -> clap::App<'static, 'static> {
41 pub fn args() -> clap::App<'static, 'static> {
42 SubCommand::with_name("status")
42 SubCommand::with_name("status")
43 .alias("st")
43 .alias("st")
44 .about(HELP_TEXT)
44 .about(HELP_TEXT)
45 .arg(
45 .arg(
46 Arg::with_name("all")
46 Arg::with_name("all")
47 .help("show status of all files")
47 .help("show status of all files")
48 .short("-A")
48 .short("-A")
49 .long("--all"),
49 .long("--all"),
50 )
50 )
51 .arg(
51 .arg(
52 Arg::with_name("modified")
52 Arg::with_name("modified")
53 .help("show only modified files")
53 .help("show only modified files")
54 .short("-m")
54 .short("-m")
55 .long("--modified"),
55 .long("--modified"),
56 )
56 )
57 .arg(
57 .arg(
58 Arg::with_name("added")
58 Arg::with_name("added")
59 .help("show only added files")
59 .help("show only added files")
60 .short("-a")
60 .short("-a")
61 .long("--added"),
61 .long("--added"),
62 )
62 )
63 .arg(
63 .arg(
64 Arg::with_name("removed")
64 Arg::with_name("removed")
65 .help("show only removed files")
65 .help("show only removed files")
66 .short("-r")
66 .short("-r")
67 .long("--removed"),
67 .long("--removed"),
68 )
68 )
69 .arg(
69 .arg(
70 Arg::with_name("clean")
70 Arg::with_name("clean")
71 .help("show only clean files")
71 .help("show only clean files")
72 .short("-c")
72 .short("-c")
73 .long("--clean"),
73 .long("--clean"),
74 )
74 )
75 .arg(
75 .arg(
76 Arg::with_name("deleted")
76 Arg::with_name("deleted")
77 .help("show only deleted files")
77 .help("show only deleted files")
78 .short("-d")
78 .short("-d")
79 .long("--deleted"),
79 .long("--deleted"),
80 )
80 )
81 .arg(
81 .arg(
82 Arg::with_name("unknown")
82 Arg::with_name("unknown")
83 .help("show only unknown (not tracked) files")
83 .help("show only unknown (not tracked) files")
84 .short("-u")
84 .short("-u")
85 .long("--unknown"),
85 .long("--unknown"),
86 )
86 )
87 .arg(
87 .arg(
88 Arg::with_name("ignored")
88 Arg::with_name("ignored")
89 .help("show only ignored files")
89 .help("show only ignored files")
90 .short("-i")
90 .short("-i")
91 .long("--ignored"),
91 .long("--ignored"),
92 )
92 )
93 .arg(
93 .arg(
94 Arg::with_name("copies")
94 Arg::with_name("copies")
95 .help("show source of copied files (DEFAULT: ui.statuscopies)")
95 .help("show source of copied files (DEFAULT: ui.statuscopies)")
96 .short("-C")
96 .short("-C")
97 .long("--copies"),
97 .long("--copies"),
98 )
98 )
99 .arg(
99 .arg(
100 Arg::with_name("no-status")
100 Arg::with_name("no-status")
101 .help("hide status prefix")
101 .help("hide status prefix")
102 .short("-n")
102 .short("-n")
103 .long("--no-status"),
103 .long("--no-status"),
104 )
104 )
105 }
105 }
106
106
107 /// Pure data type allowing the caller to specify file states to display
107 /// Pure data type allowing the caller to specify file states to display
108 #[derive(Copy, Clone, Debug)]
108 #[derive(Copy, Clone, Debug)]
109 pub struct DisplayStates {
109 pub struct DisplayStates {
110 pub modified: bool,
110 pub modified: bool,
111 pub added: bool,
111 pub added: bool,
112 pub removed: bool,
112 pub removed: bool,
113 pub clean: bool,
113 pub clean: bool,
114 pub deleted: bool,
114 pub deleted: bool,
115 pub unknown: bool,
115 pub unknown: bool,
116 pub ignored: bool,
116 pub ignored: bool,
117 }
117 }
118
118
119 pub const DEFAULT_DISPLAY_STATES: DisplayStates = DisplayStates {
119 pub const DEFAULT_DISPLAY_STATES: DisplayStates = DisplayStates {
120 modified: true,
120 modified: true,
121 added: true,
121 added: true,
122 removed: true,
122 removed: true,
123 clean: false,
123 clean: false,
124 deleted: true,
124 deleted: true,
125 unknown: true,
125 unknown: true,
126 ignored: false,
126 ignored: false,
127 };
127 };
128
128
129 pub const ALL_DISPLAY_STATES: DisplayStates = DisplayStates {
129 pub const ALL_DISPLAY_STATES: DisplayStates = DisplayStates {
130 modified: true,
130 modified: true,
131 added: true,
131 added: true,
132 removed: true,
132 removed: true,
133 clean: true,
133 clean: true,
134 deleted: true,
134 deleted: true,
135 unknown: true,
135 unknown: true,
136 ignored: true,
136 ignored: true,
137 };
137 };
138
138
139 impl DisplayStates {
139 impl DisplayStates {
140 pub fn is_empty(&self) -> bool {
140 pub fn is_empty(&self) -> bool {
141 !(self.modified
141 !(self.modified
142 || self.added
142 || self.added
143 || self.removed
143 || self.removed
144 || self.clean
144 || self.clean
145 || self.deleted
145 || self.deleted
146 || self.unknown
146 || self.unknown
147 || self.ignored)
147 || self.ignored)
148 }
148 }
149 }
149 }
150
150
151 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
151 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
152 let status_enabled_default = false;
153 let status_enabled = invocation.config.get_option(b"rhg", b"status")?;
154 if !status_enabled.unwrap_or(status_enabled_default) {
155 return Err(CommandError::unsupported(
156 "status is experimental in rhg (enable it with 'rhg.status = true' \
157 or enable fallback with 'rhg.on-unsupported = fallback')"
158 ));
159 }
160
161 // TODO: lift these limitations
152 // TODO: lift these limitations
162 if invocation.config.get_bool(b"ui", b"tweakdefaults")? {
153 if invocation.config.get_bool(b"ui", b"tweakdefaults")? {
163 return Err(CommandError::unsupported(
154 return Err(CommandError::unsupported(
164 "ui.tweakdefaults is not yet supported with rhg status",
155 "ui.tweakdefaults is not yet supported with rhg status",
165 ));
156 ));
166 }
157 }
167 if invocation.config.get_bool(b"ui", b"statuscopies")? {
158 if invocation.config.get_bool(b"ui", b"statuscopies")? {
168 return Err(CommandError::unsupported(
159 return Err(CommandError::unsupported(
169 "ui.statuscopies is not yet supported with rhg status",
160 "ui.statuscopies is not yet supported with rhg status",
170 ));
161 ));
171 }
162 }
172 if invocation
163 if invocation
173 .config
164 .config
174 .get(b"commands", b"status.terse")
165 .get(b"commands", b"status.terse")
175 .is_some()
166 .is_some()
176 {
167 {
177 return Err(CommandError::unsupported(
168 return Err(CommandError::unsupported(
178 "status.terse is not yet supported with rhg status",
169 "status.terse is not yet supported with rhg status",
179 ));
170 ));
180 }
171 }
181
172
182 let ui = invocation.ui;
173 let ui = invocation.ui;
183 let config = invocation.config;
174 let config = invocation.config;
184 let args = invocation.subcommand_args;
175 let args = invocation.subcommand_args;
185
176
186 let verbose = !ui.plain(None)
177 let verbose = !ui.plain(None)
187 && !args.is_present("print0")
178 && !args.is_present("print0")
188 && (config.get_bool(b"ui", b"verbose")?
179 && (config.get_bool(b"ui", b"verbose")?
189 || config.get_bool(b"commands", b"status.verbose")?);
180 || config.get_bool(b"commands", b"status.verbose")?);
190 if verbose {
181 if verbose {
191 return Err(CommandError::unsupported(
182 return Err(CommandError::unsupported(
192 "verbose status is not supported yet",
183 "verbose status is not supported yet",
193 ));
184 ));
194 }
185 }
195
186
196 let all = args.is_present("all");
187 let all = args.is_present("all");
197 let display_states = if all {
188 let display_states = if all {
198 // TODO when implementing `--quiet`: it excludes clean files
189 // TODO when implementing `--quiet`: it excludes clean files
199 // from `--all`
190 // from `--all`
200 ALL_DISPLAY_STATES
191 ALL_DISPLAY_STATES
201 } else {
192 } else {
202 let requested = DisplayStates {
193 let requested = DisplayStates {
203 modified: args.is_present("modified"),
194 modified: args.is_present("modified"),
204 added: args.is_present("added"),
195 added: args.is_present("added"),
205 removed: args.is_present("removed"),
196 removed: args.is_present("removed"),
206 clean: args.is_present("clean"),
197 clean: args.is_present("clean"),
207 deleted: args.is_present("deleted"),
198 deleted: args.is_present("deleted"),
208 unknown: args.is_present("unknown"),
199 unknown: args.is_present("unknown"),
209 ignored: args.is_present("ignored"),
200 ignored: args.is_present("ignored"),
210 };
201 };
211 if requested.is_empty() {
202 if requested.is_empty() {
212 DEFAULT_DISPLAY_STATES
203 DEFAULT_DISPLAY_STATES
213 } else {
204 } else {
214 requested
205 requested
215 }
206 }
216 };
207 };
217 let no_status = args.is_present("no-status");
208 let no_status = args.is_present("no-status");
218 let list_copies = all
209 let list_copies = all
219 || args.is_present("copies")
210 || args.is_present("copies")
220 || config.get_bool(b"ui", b"statuscopies")?;
211 || config.get_bool(b"ui", b"statuscopies")?;
221
212
222 let repo = invocation.repo?;
213 let repo = invocation.repo?;
223
214
224 if repo.has_sparse() || repo.has_narrow() {
215 if repo.has_sparse() || repo.has_narrow() {
225 return Err(CommandError::unsupported(
216 return Err(CommandError::unsupported(
226 "rhg status is not supported for sparse checkouts or narrow clones yet"
217 "rhg status is not supported for sparse checkouts or narrow clones yet"
227 ));
218 ));
228 }
219 }
229
220
230 let mut dmap = repo.dirstate_map_mut()?;
221 let mut dmap = repo.dirstate_map_mut()?;
231
222
232 let options = StatusOptions {
223 let options = StatusOptions {
233 // we're currently supporting file systems with exec flags only
224 // we're currently supporting file systems with exec flags only
234 // anyway
225 // anyway
235 check_exec: true,
226 check_exec: true,
236 list_clean: display_states.clean,
227 list_clean: display_states.clean,
237 list_unknown: display_states.unknown,
228 list_unknown: display_states.unknown,
238 list_ignored: display_states.ignored,
229 list_ignored: display_states.ignored,
239 list_copies,
230 list_copies,
240 collect_traversed_dirs: false,
231 collect_traversed_dirs: false,
241 };
232 };
242 let (mut ds_status, pattern_warnings) = dmap.status(
233 let (mut ds_status, pattern_warnings) = dmap.status(
243 &AlwaysMatcher,
234 &AlwaysMatcher,
244 repo.working_directory_path().to_owned(),
235 repo.working_directory_path().to_owned(),
245 ignore_files(repo, config),
236 ignore_files(repo, config),
246 options,
237 options,
247 )?;
238 )?;
248 for warning in pattern_warnings {
239 for warning in pattern_warnings {
249 match warning {
240 match warning {
250 hg::PatternFileWarning::InvalidSyntax(path, syntax) => ui
241 hg::PatternFileWarning::InvalidSyntax(path, syntax) => ui
251 .write_stderr(&format_bytes!(
242 .write_stderr(&format_bytes!(
252 b"{}: ignoring invalid syntax '{}'\n",
243 b"{}: ignoring invalid syntax '{}'\n",
253 get_bytes_from_path(path),
244 get_bytes_from_path(path),
254 &*syntax
245 &*syntax
255 ))?,
246 ))?,
256 hg::PatternFileWarning::NoSuchFile(path) => {
247 hg::PatternFileWarning::NoSuchFile(path) => {
257 let path = if let Ok(relative) =
248 let path = if let Ok(relative) =
258 path.strip_prefix(repo.working_directory_path())
249 path.strip_prefix(repo.working_directory_path())
259 {
250 {
260 relative
251 relative
261 } else {
252 } else {
262 &*path
253 &*path
263 };
254 };
264 ui.write_stderr(&format_bytes!(
255 ui.write_stderr(&format_bytes!(
265 b"skipping unreadable pattern file '{}': \
256 b"skipping unreadable pattern file '{}': \
266 No such file or directory\n",
257 No such file or directory\n",
267 get_bytes_from_path(path),
258 get_bytes_from_path(path),
268 ))?
259 ))?
269 }
260 }
270 }
261 }
271 }
262 }
272
263
273 for (path, error) in ds_status.bad {
264 for (path, error) in ds_status.bad {
274 let error = match error {
265 let error = match error {
275 hg::BadMatch::OsError(code) => {
266 hg::BadMatch::OsError(code) => {
276 std::io::Error::from_raw_os_error(code).to_string()
267 std::io::Error::from_raw_os_error(code).to_string()
277 }
268 }
278 hg::BadMatch::BadType(ty) => {
269 hg::BadMatch::BadType(ty) => {
279 format!("unsupported file type (type is {})", ty)
270 format!("unsupported file type (type is {})", ty)
280 }
271 }
281 };
272 };
282 ui.write_stderr(&format_bytes!(
273 ui.write_stderr(&format_bytes!(
283 b"{}: {}\n",
274 b"{}: {}\n",
284 path.as_bytes(),
275 path.as_bytes(),
285 error.as_bytes()
276 error.as_bytes()
286 ))?
277 ))?
287 }
278 }
288 if !ds_status.unsure.is_empty() {
279 if !ds_status.unsure.is_empty() {
289 info!(
280 info!(
290 "Files to be rechecked by retrieval from filelog: {:?}",
281 "Files to be rechecked by retrieval from filelog: {:?}",
291 ds_status.unsure.iter().map(|s| &s.path).collect::<Vec<_>>()
282 ds_status.unsure.iter().map(|s| &s.path).collect::<Vec<_>>()
292 );
283 );
293 }
284 }
294 let mut fixup = Vec::new();
285 let mut fixup = Vec::new();
295 if !ds_status.unsure.is_empty()
286 if !ds_status.unsure.is_empty()
296 && (display_states.modified || display_states.clean)
287 && (display_states.modified || display_states.clean)
297 {
288 {
298 let p1 = repo.dirstate_parents()?.p1;
289 let p1 = repo.dirstate_parents()?.p1;
299 let manifest = repo.manifest_for_node(p1).map_err(|e| {
290 let manifest = repo.manifest_for_node(p1).map_err(|e| {
300 CommandError::from((e, &*format!("{:x}", p1.short())))
291 CommandError::from((e, &*format!("{:x}", p1.short())))
301 })?;
292 })?;
302 for to_check in ds_status.unsure {
293 for to_check in ds_status.unsure {
303 if unsure_is_modified(repo, &manifest, &to_check.path)? {
294 if unsure_is_modified(repo, &manifest, &to_check.path)? {
304 if display_states.modified {
295 if display_states.modified {
305 ds_status.modified.push(to_check);
296 ds_status.modified.push(to_check);
306 }
297 }
307 } else {
298 } else {
308 if display_states.clean {
299 if display_states.clean {
309 ds_status.clean.push(to_check.clone());
300 ds_status.clean.push(to_check.clone());
310 }
301 }
311 fixup.push(to_check.path.into_owned())
302 fixup.push(to_check.path.into_owned())
312 }
303 }
313 }
304 }
314 }
305 }
315 let relative_paths = (!ui.plain(None))
306 let relative_paths = (!ui.plain(None))
316 && config
307 && config
317 .get_option(b"commands", b"status.relative")?
308 .get_option(b"commands", b"status.relative")?
318 .unwrap_or(config.get_bool(b"ui", b"relative-paths")?);
309 .unwrap_or(config.get_bool(b"ui", b"relative-paths")?);
319 let output = DisplayStatusPaths {
310 let output = DisplayStatusPaths {
320 ui,
311 ui,
321 no_status,
312 no_status,
322 relativize: if relative_paths {
313 relativize: if relative_paths {
323 Some(RelativizePaths::new(repo)?)
314 Some(RelativizePaths::new(repo)?)
324 } else {
315 } else {
325 None
316 None
326 },
317 },
327 };
318 };
328 if display_states.modified {
319 if display_states.modified {
329 output.display(b"M ", "status.modified", ds_status.modified)?;
320 output.display(b"M ", "status.modified", ds_status.modified)?;
330 }
321 }
331 if display_states.added {
322 if display_states.added {
332 output.display(b"A ", "status.added", ds_status.added)?;
323 output.display(b"A ", "status.added", ds_status.added)?;
333 }
324 }
334 if display_states.removed {
325 if display_states.removed {
335 output.display(b"R ", "status.removed", ds_status.removed)?;
326 output.display(b"R ", "status.removed", ds_status.removed)?;
336 }
327 }
337 if display_states.deleted {
328 if display_states.deleted {
338 output.display(b"! ", "status.deleted", ds_status.deleted)?;
329 output.display(b"! ", "status.deleted", ds_status.deleted)?;
339 }
330 }
340 if display_states.unknown {
331 if display_states.unknown {
341 output.display(b"? ", "status.unknown", ds_status.unknown)?;
332 output.display(b"? ", "status.unknown", ds_status.unknown)?;
342 }
333 }
343 if display_states.ignored {
334 if display_states.ignored {
344 output.display(b"I ", "status.ignored", ds_status.ignored)?;
335 output.display(b"I ", "status.ignored", ds_status.ignored)?;
345 }
336 }
346 if display_states.clean {
337 if display_states.clean {
347 output.display(b"C ", "status.clean", ds_status.clean)?;
338 output.display(b"C ", "status.clean", ds_status.clean)?;
348 }
339 }
349
340
350 let mut dirstate_write_needed = ds_status.dirty;
341 let mut dirstate_write_needed = ds_status.dirty;
351 let filesystem_time_at_status_start =
342 let filesystem_time_at_status_start =
352 ds_status.filesystem_time_at_status_start;
343 ds_status.filesystem_time_at_status_start;
353
344
354 if (fixup.is_empty() || filesystem_time_at_status_start.is_none())
345 if (fixup.is_empty() || filesystem_time_at_status_start.is_none())
355 && !dirstate_write_needed
346 && !dirstate_write_needed
356 {
347 {
357 // Nothing to update
348 // Nothing to update
358 return Ok(());
349 return Ok(());
359 }
350 }
360
351
361 // Update the dirstate on disk if we can
352 // Update the dirstate on disk if we can
362 let with_lock_result =
353 let with_lock_result =
363 repo.try_with_wlock_no_wait(|| -> Result<(), CommandError> {
354 repo.try_with_wlock_no_wait(|| -> Result<(), CommandError> {
364 if let Some(mtime_boundary) = filesystem_time_at_status_start {
355 if let Some(mtime_boundary) = filesystem_time_at_status_start {
365 for hg_path in fixup {
356 for hg_path in fixup {
366 use std::os::unix::fs::MetadataExt;
357 use std::os::unix::fs::MetadataExt;
367 let fs_path = hg_path_to_path_buf(&hg_path)
358 let fs_path = hg_path_to_path_buf(&hg_path)
368 .expect("HgPath conversion");
359 .expect("HgPath conversion");
369 // Specifically do not reuse `fs_metadata` from
360 // Specifically do not reuse `fs_metadata` from
370 // `unsure_is_clean` which was needed before reading
361 // `unsure_is_clean` which was needed before reading
371 // contents. Here we access metadata again after reading
362 // contents. Here we access metadata again after reading
372 // content, in case it changed in the meantime.
363 // content, in case it changed in the meantime.
373 let fs_metadata = repo
364 let fs_metadata = repo
374 .working_directory_vfs()
365 .working_directory_vfs()
375 .symlink_metadata(&fs_path)?;
366 .symlink_metadata(&fs_path)?;
376 if let Some(mtime) =
367 if let Some(mtime) =
377 TruncatedTimestamp::for_reliable_mtime_of(
368 TruncatedTimestamp::for_reliable_mtime_of(
378 &fs_metadata,
369 &fs_metadata,
379 &mtime_boundary,
370 &mtime_boundary,
380 )
371 )
381 .when_reading_file(&fs_path)?
372 .when_reading_file(&fs_path)?
382 {
373 {
383 let mode = fs_metadata.mode();
374 let mode = fs_metadata.mode();
384 let size = fs_metadata.len() as u32 & RANGE_MASK_31BIT;
375 let size = fs_metadata.len() as u32 & RANGE_MASK_31BIT;
385 let mut entry = dmap
376 let mut entry = dmap
386 .get(&hg_path)?
377 .get(&hg_path)?
387 .expect("ambiguous file not in dirstate");
378 .expect("ambiguous file not in dirstate");
388 entry.set_clean(mode, size, mtime);
379 entry.set_clean(mode, size, mtime);
389 dmap.add_file(&hg_path, entry)?;
380 dmap.add_file(&hg_path, entry)?;
390 dirstate_write_needed = true
381 dirstate_write_needed = true
391 }
382 }
392 }
383 }
393 }
384 }
394 drop(dmap); // Avoid "already mutably borrowed" RefCell panics
385 drop(dmap); // Avoid "already mutably borrowed" RefCell panics
395 if dirstate_write_needed {
386 if dirstate_write_needed {
396 repo.write_dirstate()?
387 repo.write_dirstate()?
397 }
388 }
398 Ok(())
389 Ok(())
399 });
390 });
400 match with_lock_result {
391 match with_lock_result {
401 Ok(closure_result) => closure_result?,
392 Ok(closure_result) => closure_result?,
402 Err(LockError::AlreadyHeld) => {
393 Err(LockError::AlreadyHeld) => {
403 // Not updating the dirstate is not ideal but not critical:
394 // Not updating the dirstate is not ideal but not critical:
404 // don’t keep our caller waiting until some other Mercurial
395 // don’t keep our caller waiting until some other Mercurial
405 // process releases the lock.
396 // process releases the lock.
406 }
397 }
407 Err(LockError::Other(HgError::IoError { error, .. }))
398 Err(LockError::Other(HgError::IoError { error, .. }))
408 if error.kind() == io::ErrorKind::PermissionDenied =>
399 if error.kind() == io::ErrorKind::PermissionDenied =>
409 {
400 {
410 // `hg status` on a read-only repository is fine
401 // `hg status` on a read-only repository is fine
411 }
402 }
412 Err(LockError::Other(error)) => {
403 Err(LockError::Other(error)) => {
413 // Report other I/O errors
404 // Report other I/O errors
414 Err(error)?
405 Err(error)?
415 }
406 }
416 }
407 }
417 Ok(())
408 Ok(())
418 }
409 }
419
410
420 fn ignore_files(repo: &Repo, config: &Config) -> Vec<PathBuf> {
411 fn ignore_files(repo: &Repo, config: &Config) -> Vec<PathBuf> {
421 let mut ignore_files = Vec::new();
412 let mut ignore_files = Vec::new();
422 let repo_ignore = repo.working_directory_vfs().join(".hgignore");
413 let repo_ignore = repo.working_directory_vfs().join(".hgignore");
423 if repo_ignore.exists() {
414 if repo_ignore.exists() {
424 ignore_files.push(repo_ignore)
415 ignore_files.push(repo_ignore)
425 }
416 }
426 for (key, value) in config.iter_section(b"ui") {
417 for (key, value) in config.iter_section(b"ui") {
427 if key == b"ignore" || key.starts_with(b"ignore.") {
418 if key == b"ignore" || key.starts_with(b"ignore.") {
428 let path = get_path_from_bytes(value);
419 let path = get_path_from_bytes(value);
429 // TODO:Β expand "~/" and environment variable here, like Python
420 // TODO:Β expand "~/" and environment variable here, like Python
430 // does with `os.path.expanduser` and `os.path.expandvars`
421 // does with `os.path.expanduser` and `os.path.expandvars`
431
422
432 let joined = repo.working_directory_path().join(path);
423 let joined = repo.working_directory_path().join(path);
433 ignore_files.push(joined);
424 ignore_files.push(joined);
434 }
425 }
435 }
426 }
436 ignore_files
427 ignore_files
437 }
428 }
438
429
439 struct DisplayStatusPaths<'a> {
430 struct DisplayStatusPaths<'a> {
440 ui: &'a Ui,
431 ui: &'a Ui,
441 no_status: bool,
432 no_status: bool,
442 relativize: Option<RelativizePaths>,
433 relativize: Option<RelativizePaths>,
443 }
434 }
444
435
445 impl DisplayStatusPaths<'_> {
436 impl DisplayStatusPaths<'_> {
446 // Probably more elegant to use a Deref or Borrow trait rather than
437 // Probably more elegant to use a Deref or Borrow trait rather than
447 // harcode HgPathBuf, but probably not really useful at this point
438 // harcode HgPathBuf, but probably not really useful at this point
448 fn display(
439 fn display(
449 &self,
440 &self,
450 status_prefix: &[u8],
441 status_prefix: &[u8],
451 label: &'static str,
442 label: &'static str,
452 mut paths: Vec<StatusPath<'_>>,
443 mut paths: Vec<StatusPath<'_>>,
453 ) -> Result<(), CommandError> {
444 ) -> Result<(), CommandError> {
454 paths.sort_unstable();
445 paths.sort_unstable();
455 // TODO:Β get the stdout lock once for the whole loop instead of in each write
446 // TODO:Β get the stdout lock once for the whole loop instead of in each write
456 for StatusPath { path, copy_source } in paths {
447 for StatusPath { path, copy_source } in paths {
457 let relative;
448 let relative;
458 let path = if let Some(relativize) = &self.relativize {
449 let path = if let Some(relativize) = &self.relativize {
459 relative = relativize.relativize(&path);
450 relative = relativize.relativize(&path);
460 &*relative
451 &*relative
461 } else {
452 } else {
462 path.as_bytes()
453 path.as_bytes()
463 };
454 };
464 // TODO: Add a way to use `write_bytes!` instead of `format_bytes!`
455 // TODO: Add a way to use `write_bytes!` instead of `format_bytes!`
465 // in order to stream to stdout instead of allocating an
456 // in order to stream to stdout instead of allocating an
466 // itermediate `Vec<u8>`.
457 // itermediate `Vec<u8>`.
467 if !self.no_status {
458 if !self.no_status {
468 self.ui.write_stdout_labelled(status_prefix, label)?
459 self.ui.write_stdout_labelled(status_prefix, label)?
469 }
460 }
470 self.ui
461 self.ui
471 .write_stdout_labelled(&format_bytes!(b"{}\n", path), label)?;
462 .write_stdout_labelled(&format_bytes!(b"{}\n", path), label)?;
472 if let Some(source) = copy_source {
463 if let Some(source) = copy_source {
473 let label = "status.copied";
464 let label = "status.copied";
474 self.ui.write_stdout_labelled(
465 self.ui.write_stdout_labelled(
475 &format_bytes!(b" {}\n", source.as_bytes()),
466 &format_bytes!(b" {}\n", source.as_bytes()),
476 label,
467 label,
477 )?
468 )?
478 }
469 }
479 }
470 }
480 Ok(())
471 Ok(())
481 }
472 }
482 }
473 }
483
474
484 /// Check if a file is modified by comparing actual repo store and file system.
475 /// Check if a file is modified by comparing actual repo store and file system.
485 ///
476 ///
486 /// This meant to be used for those that the dirstate cannot resolve, due
477 /// This meant to be used for those that the dirstate cannot resolve, due
487 /// to time resolution limits.
478 /// to time resolution limits.
488 fn unsure_is_modified(
479 fn unsure_is_modified(
489 repo: &Repo,
480 repo: &Repo,
490 manifest: &Manifest,
481 manifest: &Manifest,
491 hg_path: &HgPath,
482 hg_path: &HgPath,
492 ) -> Result<bool, HgError> {
483 ) -> Result<bool, HgError> {
493 let vfs = repo.working_directory_vfs();
484 let vfs = repo.working_directory_vfs();
494 let fs_path = hg_path_to_path_buf(hg_path).expect("HgPath conversion");
485 let fs_path = hg_path_to_path_buf(hg_path).expect("HgPath conversion");
495 let fs_metadata = vfs.symlink_metadata(&fs_path)?;
486 let fs_metadata = vfs.symlink_metadata(&fs_path)?;
496 let is_symlink = fs_metadata.file_type().is_symlink();
487 let is_symlink = fs_metadata.file_type().is_symlink();
497 // TODO: Also account for `FALLBACK_SYMLINK` and `FALLBACK_EXEC` from the
488 // TODO: Also account for `FALLBACK_SYMLINK` and `FALLBACK_EXEC` from the
498 // dirstate
489 // dirstate
499 let fs_flags = if is_symlink {
490 let fs_flags = if is_symlink {
500 Some(b'l')
491 Some(b'l')
501 } else if has_exec_bit(&fs_metadata) {
492 } else if has_exec_bit(&fs_metadata) {
502 Some(b'x')
493 Some(b'x')
503 } else {
494 } else {
504 None
495 None
505 };
496 };
506
497
507 let entry = manifest
498 let entry = manifest
508 .find_by_path(hg_path)?
499 .find_by_path(hg_path)?
509 .expect("ambgious file not in p1");
500 .expect("ambgious file not in p1");
510 if entry.flags != fs_flags {
501 if entry.flags != fs_flags {
511 return Ok(true);
502 return Ok(true);
512 }
503 }
513 let filelog = repo.filelog(hg_path)?;
504 let filelog = repo.filelog(hg_path)?;
514 let fs_len = fs_metadata.len();
505 let fs_len = fs_metadata.len();
515 let filelog_entry =
506 let filelog_entry =
516 filelog.entry_for_node(entry.node_id()?).map_err(|_| {
507 filelog.entry_for_node(entry.node_id()?).map_err(|_| {
517 HgError::corrupted("filelog missing node from manifest")
508 HgError::corrupted("filelog missing node from manifest")
518 })?;
509 })?;
519 if filelog_entry.file_data_len_not_equal_to(fs_len) {
510 if filelog_entry.file_data_len_not_equal_to(fs_len) {
520 // No need to read file contents:
511 // No need to read file contents:
521 // it cannot be equal if it has a different length.
512 // it cannot be equal if it has a different length.
522 return Ok(true);
513 return Ok(true);
523 }
514 }
524
515
525 let p1_filelog_data = filelog_entry.data()?;
516 let p1_filelog_data = filelog_entry.data()?;
526 let p1_contents = p1_filelog_data.file_data()?;
517 let p1_contents = p1_filelog_data.file_data()?;
527 if p1_contents.len() as u64 != fs_len {
518 if p1_contents.len() as u64 != fs_len {
528 // No need to read file contents:
519 // No need to read file contents:
529 // it cannot be equal if it has a different length.
520 // it cannot be equal if it has a different length.
530 return Ok(true);
521 return Ok(true);
531 }
522 }
532
523
533 let fs_contents = if is_symlink {
524 let fs_contents = if is_symlink {
534 get_bytes_from_os_string(vfs.read_link(fs_path)?.into_os_string())
525 get_bytes_from_os_string(vfs.read_link(fs_path)?.into_os_string())
535 } else {
526 } else {
536 vfs.read(fs_path)?
527 vfs.read(fs_path)?
537 };
528 };
538 Ok(p1_contents != &*fs_contents)
529 Ok(p1_contents != &*fs_contents)
539 }
530 }
@@ -1,4073 +1,4072
1 #!/usr/bin/env python3
1 #!/usr/bin/env python3
2 #
2 #
3 # run-tests.py - Run a set of tests on Mercurial
3 # run-tests.py - Run a set of tests on Mercurial
4 #
4 #
5 # Copyright 2006 Olivia Mackall <olivia@selenic.com>
5 # Copyright 2006 Olivia Mackall <olivia@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 # Modifying this script is tricky because it has many modes:
10 # Modifying this script is tricky because it has many modes:
11 # - serial (default) vs parallel (-jN, N > 1)
11 # - serial (default) vs parallel (-jN, N > 1)
12 # - no coverage (default) vs coverage (-c, -C, -s)
12 # - no coverage (default) vs coverage (-c, -C, -s)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
13 # - temp install (default) vs specific hg script (--with-hg, --local)
14 # - tests are a mix of shell scripts and Python scripts
14 # - tests are a mix of shell scripts and Python scripts
15 #
15 #
16 # If you change this script, it is recommended that you ensure you
16 # If you change this script, it is recommended that you ensure you
17 # haven't broken it by running it in various modes with a representative
17 # haven't broken it by running it in various modes with a representative
18 # sample of test scripts. For example:
18 # sample of test scripts. For example:
19 #
19 #
20 # 1) serial, no coverage, temp install:
20 # 1) serial, no coverage, temp install:
21 # ./run-tests.py test-s*
21 # ./run-tests.py test-s*
22 # 2) serial, no coverage, local hg:
22 # 2) serial, no coverage, local hg:
23 # ./run-tests.py --local test-s*
23 # ./run-tests.py --local test-s*
24 # 3) serial, coverage, temp install:
24 # 3) serial, coverage, temp install:
25 # ./run-tests.py -c test-s*
25 # ./run-tests.py -c test-s*
26 # 4) serial, coverage, local hg:
26 # 4) serial, coverage, local hg:
27 # ./run-tests.py -c --local test-s* # unsupported
27 # ./run-tests.py -c --local test-s* # unsupported
28 # 5) parallel, no coverage, temp install:
28 # 5) parallel, no coverage, temp install:
29 # ./run-tests.py -j2 test-s*
29 # ./run-tests.py -j2 test-s*
30 # 6) parallel, no coverage, local hg:
30 # 6) parallel, no coverage, local hg:
31 # ./run-tests.py -j2 --local test-s*
31 # ./run-tests.py -j2 --local test-s*
32 # 7) parallel, coverage, temp install:
32 # 7) parallel, coverage, temp install:
33 # ./run-tests.py -j2 -c test-s* # currently broken
33 # ./run-tests.py -j2 -c test-s* # currently broken
34 # 8) parallel, coverage, local install:
34 # 8) parallel, coverage, local install:
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
35 # ./run-tests.py -j2 -c --local test-s* # unsupported (and broken)
36 # 9) parallel, custom tmp dir:
36 # 9) parallel, custom tmp dir:
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
37 # ./run-tests.py -j2 --tmpdir /tmp/myhgtests
38 # 10) parallel, pure, tests that call run-tests:
38 # 10) parallel, pure, tests that call run-tests:
39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
39 # ./run-tests.py --pure `grep -l run-tests.py *.t`
40 #
40 #
41 # (You could use any subset of the tests: test-s* happens to match
41 # (You could use any subset of the tests: test-s* happens to match
42 # enough that it's worth doing parallel runs, few enough that it
42 # enough that it's worth doing parallel runs, few enough that it
43 # completes fairly quickly, includes both shell and Python scripts, and
43 # completes fairly quickly, includes both shell and Python scripts, and
44 # includes some scripts that run daemon processes.)
44 # includes some scripts that run daemon processes.)
45
45
46 from __future__ import absolute_import, print_function
46 from __future__ import absolute_import, print_function
47
47
48 import argparse
48 import argparse
49 import collections
49 import collections
50 import contextlib
50 import contextlib
51 import difflib
51 import difflib
52 import distutils.version as version
52 import distutils.version as version
53 import errno
53 import errno
54 import json
54 import json
55 import multiprocessing
55 import multiprocessing
56 import os
56 import os
57 import platform
57 import platform
58 import random
58 import random
59 import re
59 import re
60 import shutil
60 import shutil
61 import signal
61 import signal
62 import socket
62 import socket
63 import subprocess
63 import subprocess
64 import sys
64 import sys
65 import sysconfig
65 import sysconfig
66 import tempfile
66 import tempfile
67 import threading
67 import threading
68 import time
68 import time
69 import unittest
69 import unittest
70 import uuid
70 import uuid
71 import xml.dom.minidom as minidom
71 import xml.dom.minidom as minidom
72
72
73 WINDOWS = os.name == r'nt'
73 WINDOWS = os.name == r'nt'
74
74
75 try:
75 try:
76 import Queue as queue
76 import Queue as queue
77 except ImportError:
77 except ImportError:
78 import queue
78 import queue
79
79
80 try:
80 try:
81 import shlex
81 import shlex
82
82
83 shellquote = shlex.quote
83 shellquote = shlex.quote
84 except (ImportError, AttributeError):
84 except (ImportError, AttributeError):
85 import pipes
85 import pipes
86
86
87 shellquote = pipes.quote
87 shellquote = pipes.quote
88
88
89
89
90 processlock = threading.Lock()
90 processlock = threading.Lock()
91
91
92 pygmentspresent = False
92 pygmentspresent = False
93 try: # is pygments installed
93 try: # is pygments installed
94 import pygments
94 import pygments
95 import pygments.lexers as lexers
95 import pygments.lexers as lexers
96 import pygments.lexer as lexer
96 import pygments.lexer as lexer
97 import pygments.formatters as formatters
97 import pygments.formatters as formatters
98 import pygments.token as token
98 import pygments.token as token
99 import pygments.style as style
99 import pygments.style as style
100
100
101 if WINDOWS:
101 if WINDOWS:
102 hgpath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
102 hgpath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
103 sys.path.append(hgpath)
103 sys.path.append(hgpath)
104 try:
104 try:
105 from mercurial import win32 # pytype: disable=import-error
105 from mercurial import win32 # pytype: disable=import-error
106
106
107 # Don't check the result code because it fails on heptapod, but
107 # Don't check the result code because it fails on heptapod, but
108 # something is able to convert to color anyway.
108 # something is able to convert to color anyway.
109 win32.enablevtmode()
109 win32.enablevtmode()
110 finally:
110 finally:
111 sys.path = sys.path[:-1]
111 sys.path = sys.path[:-1]
112
112
113 pygmentspresent = True
113 pygmentspresent = True
114 difflexer = lexers.DiffLexer()
114 difflexer = lexers.DiffLexer()
115 terminal256formatter = formatters.Terminal256Formatter()
115 terminal256formatter = formatters.Terminal256Formatter()
116 except ImportError:
116 except ImportError:
117 pass
117 pass
118
118
119 if pygmentspresent:
119 if pygmentspresent:
120
120
121 class TestRunnerStyle(style.Style):
121 class TestRunnerStyle(style.Style):
122 default_style = ""
122 default_style = ""
123 skipped = token.string_to_tokentype("Token.Generic.Skipped")
123 skipped = token.string_to_tokentype("Token.Generic.Skipped")
124 failed = token.string_to_tokentype("Token.Generic.Failed")
124 failed = token.string_to_tokentype("Token.Generic.Failed")
125 skippedname = token.string_to_tokentype("Token.Generic.SName")
125 skippedname = token.string_to_tokentype("Token.Generic.SName")
126 failedname = token.string_to_tokentype("Token.Generic.FName")
126 failedname = token.string_to_tokentype("Token.Generic.FName")
127 styles = {
127 styles = {
128 skipped: '#e5e5e5',
128 skipped: '#e5e5e5',
129 skippedname: '#00ffff',
129 skippedname: '#00ffff',
130 failed: '#7f0000',
130 failed: '#7f0000',
131 failedname: '#ff0000',
131 failedname: '#ff0000',
132 }
132 }
133
133
134 class TestRunnerLexer(lexer.RegexLexer):
134 class TestRunnerLexer(lexer.RegexLexer):
135 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
135 testpattern = r'[\w-]+\.(t|py)(#[a-zA-Z0-9_\-\.]+)?'
136 tokens = {
136 tokens = {
137 'root': [
137 'root': [
138 (r'^Skipped', token.Generic.Skipped, 'skipped'),
138 (r'^Skipped', token.Generic.Skipped, 'skipped'),
139 (r'^Failed ', token.Generic.Failed, 'failed'),
139 (r'^Failed ', token.Generic.Failed, 'failed'),
140 (r'^ERROR: ', token.Generic.Failed, 'failed'),
140 (r'^ERROR: ', token.Generic.Failed, 'failed'),
141 ],
141 ],
142 'skipped': [
142 'skipped': [
143 (testpattern, token.Generic.SName),
143 (testpattern, token.Generic.SName),
144 (r':.*', token.Generic.Skipped),
144 (r':.*', token.Generic.Skipped),
145 ],
145 ],
146 'failed': [
146 'failed': [
147 (testpattern, token.Generic.FName),
147 (testpattern, token.Generic.FName),
148 (r'(:| ).*', token.Generic.Failed),
148 (r'(:| ).*', token.Generic.Failed),
149 ],
149 ],
150 }
150 }
151
151
152 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
152 runnerformatter = formatters.Terminal256Formatter(style=TestRunnerStyle)
153 runnerlexer = TestRunnerLexer()
153 runnerlexer = TestRunnerLexer()
154
154
155 origenviron = os.environ.copy()
155 origenviron = os.environ.copy()
156
156
157
157
158 if sys.version_info > (3, 5, 0):
158 if sys.version_info > (3, 5, 0):
159 PYTHON3 = True
159 PYTHON3 = True
160 xrange = range # we use xrange in one place, and we'd rather not use range
160 xrange = range # we use xrange in one place, and we'd rather not use range
161
161
162 def _sys2bytes(p):
162 def _sys2bytes(p):
163 if p is None:
163 if p is None:
164 return p
164 return p
165 return p.encode('utf-8')
165 return p.encode('utf-8')
166
166
167 def _bytes2sys(p):
167 def _bytes2sys(p):
168 if p is None:
168 if p is None:
169 return p
169 return p
170 return p.decode('utf-8')
170 return p.decode('utf-8')
171
171
172 osenvironb = getattr(os, 'environb', None)
172 osenvironb = getattr(os, 'environb', None)
173 if osenvironb is None:
173 if osenvironb is None:
174 # Windows lacks os.environb, for instance. A proxy over the real thing
174 # Windows lacks os.environb, for instance. A proxy over the real thing
175 # instead of a copy allows the environment to be updated via bytes on
175 # instead of a copy allows the environment to be updated via bytes on
176 # all platforms.
176 # all platforms.
177 class environbytes(object):
177 class environbytes(object):
178 def __init__(self, strenv):
178 def __init__(self, strenv):
179 self.__len__ = strenv.__len__
179 self.__len__ = strenv.__len__
180 self.clear = strenv.clear
180 self.clear = strenv.clear
181 self._strenv = strenv
181 self._strenv = strenv
182
182
183 def __getitem__(self, k):
183 def __getitem__(self, k):
184 v = self._strenv.__getitem__(_bytes2sys(k))
184 v = self._strenv.__getitem__(_bytes2sys(k))
185 return _sys2bytes(v)
185 return _sys2bytes(v)
186
186
187 def __setitem__(self, k, v):
187 def __setitem__(self, k, v):
188 self._strenv.__setitem__(_bytes2sys(k), _bytes2sys(v))
188 self._strenv.__setitem__(_bytes2sys(k), _bytes2sys(v))
189
189
190 def __delitem__(self, k):
190 def __delitem__(self, k):
191 self._strenv.__delitem__(_bytes2sys(k))
191 self._strenv.__delitem__(_bytes2sys(k))
192
192
193 def __contains__(self, k):
193 def __contains__(self, k):
194 return self._strenv.__contains__(_bytes2sys(k))
194 return self._strenv.__contains__(_bytes2sys(k))
195
195
196 def __iter__(self):
196 def __iter__(self):
197 return iter([_sys2bytes(k) for k in iter(self._strenv)])
197 return iter([_sys2bytes(k) for k in iter(self._strenv)])
198
198
199 def get(self, k, default=None):
199 def get(self, k, default=None):
200 v = self._strenv.get(_bytes2sys(k), _bytes2sys(default))
200 v = self._strenv.get(_bytes2sys(k), _bytes2sys(default))
201 return _sys2bytes(v)
201 return _sys2bytes(v)
202
202
203 def pop(self, k, default=None):
203 def pop(self, k, default=None):
204 v = self._strenv.pop(_bytes2sys(k), _bytes2sys(default))
204 v = self._strenv.pop(_bytes2sys(k), _bytes2sys(default))
205 return _sys2bytes(v)
205 return _sys2bytes(v)
206
206
207 osenvironb = environbytes(os.environ)
207 osenvironb = environbytes(os.environ)
208
208
209 getcwdb = getattr(os, 'getcwdb')
209 getcwdb = getattr(os, 'getcwdb')
210 if not getcwdb or WINDOWS:
210 if not getcwdb or WINDOWS:
211 getcwdb = lambda: _sys2bytes(os.getcwd())
211 getcwdb = lambda: _sys2bytes(os.getcwd())
212
212
213 elif sys.version_info >= (3, 0, 0):
213 elif sys.version_info >= (3, 0, 0):
214 print(
214 print(
215 '%s is only supported on Python 3.5+ and 2.7, not %s'
215 '%s is only supported on Python 3.5+ and 2.7, not %s'
216 % (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))
216 % (sys.argv[0], '.'.join(str(v) for v in sys.version_info[:3]))
217 )
217 )
218 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
218 sys.exit(70) # EX_SOFTWARE from `man 3 sysexit`
219 else:
219 else:
220 PYTHON3 = False
220 PYTHON3 = False
221
221
222 # In python 2.x, path operations are generally done using
222 # In python 2.x, path operations are generally done using
223 # bytestrings by default, so we don't have to do any extra
223 # bytestrings by default, so we don't have to do any extra
224 # fiddling there. We define the wrapper functions anyway just to
224 # fiddling there. We define the wrapper functions anyway just to
225 # help keep code consistent between platforms.
225 # help keep code consistent between platforms.
226 def _sys2bytes(p):
226 def _sys2bytes(p):
227 return p
227 return p
228
228
229 _bytes2sys = _sys2bytes
229 _bytes2sys = _sys2bytes
230 osenvironb = os.environ
230 osenvironb = os.environ
231 getcwdb = os.getcwd
231 getcwdb = os.getcwd
232
232
233 if WINDOWS:
233 if WINDOWS:
234 _getcwdb = getcwdb
234 _getcwdb = getcwdb
235
235
236 def getcwdb():
236 def getcwdb():
237 cwd = _getcwdb()
237 cwd = _getcwdb()
238 if re.match(b'^[a-z]:', cwd):
238 if re.match(b'^[a-z]:', cwd):
239 # os.getcwd() is inconsistent on the capitalization of the drive
239 # os.getcwd() is inconsistent on the capitalization of the drive
240 # letter, so adjust it. see https://bugs.python.org/issue40368
240 # letter, so adjust it. see https://bugs.python.org/issue40368
241 cwd = cwd[0:1].upper() + cwd[1:]
241 cwd = cwd[0:1].upper() + cwd[1:]
242 return cwd
242 return cwd
243
243
244
244
245 # For Windows support
245 # For Windows support
246 wifexited = getattr(os, "WIFEXITED", lambda x: False)
246 wifexited = getattr(os, "WIFEXITED", lambda x: False)
247
247
248 # Whether to use IPv6
248 # Whether to use IPv6
249 def checksocketfamily(name, port=20058):
249 def checksocketfamily(name, port=20058):
250 """return true if we can listen on localhost using family=name
250 """return true if we can listen on localhost using family=name
251
251
252 name should be either 'AF_INET', or 'AF_INET6'.
252 name should be either 'AF_INET', or 'AF_INET6'.
253 port being used is okay - EADDRINUSE is considered as successful.
253 port being used is okay - EADDRINUSE is considered as successful.
254 """
254 """
255 family = getattr(socket, name, None)
255 family = getattr(socket, name, None)
256 if family is None:
256 if family is None:
257 return False
257 return False
258 try:
258 try:
259 s = socket.socket(family, socket.SOCK_STREAM)
259 s = socket.socket(family, socket.SOCK_STREAM)
260 s.bind(('localhost', port))
260 s.bind(('localhost', port))
261 s.close()
261 s.close()
262 return True
262 return True
263 except socket.error as exc:
263 except socket.error as exc:
264 if exc.errno == errno.EADDRINUSE:
264 if exc.errno == errno.EADDRINUSE:
265 return True
265 return True
266 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
266 elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
267 return False
267 return False
268 else:
268 else:
269 raise
269 raise
270 else:
270 else:
271 return False
271 return False
272
272
273
273
274 # useipv6 will be set by parseargs
274 # useipv6 will be set by parseargs
275 useipv6 = None
275 useipv6 = None
276
276
277
277
278 def checkportisavailable(port):
278 def checkportisavailable(port):
279 """return true if a port seems free to bind on localhost"""
279 """return true if a port seems free to bind on localhost"""
280 if useipv6:
280 if useipv6:
281 family = socket.AF_INET6
281 family = socket.AF_INET6
282 else:
282 else:
283 family = socket.AF_INET
283 family = socket.AF_INET
284 try:
284 try:
285 with contextlib.closing(socket.socket(family, socket.SOCK_STREAM)) as s:
285 with contextlib.closing(socket.socket(family, socket.SOCK_STREAM)) as s:
286 s.bind(('localhost', port))
286 s.bind(('localhost', port))
287 return True
287 return True
288 except socket.error as exc:
288 except socket.error as exc:
289 if WINDOWS and exc.errno == errno.WSAEACCES:
289 if WINDOWS and exc.errno == errno.WSAEACCES:
290 return False
290 return False
291 elif PYTHON3:
291 elif PYTHON3:
292 # TODO: make a proper exception handler after dropping py2. This
292 # TODO: make a proper exception handler after dropping py2. This
293 # works because socket.error is an alias for OSError on py3,
293 # works because socket.error is an alias for OSError on py3,
294 # which is also the baseclass of PermissionError.
294 # which is also the baseclass of PermissionError.
295 if isinstance(exc, PermissionError):
295 if isinstance(exc, PermissionError):
296 return False
296 return False
297 if exc.errno not in (
297 if exc.errno not in (
298 errno.EADDRINUSE,
298 errno.EADDRINUSE,
299 errno.EADDRNOTAVAIL,
299 errno.EADDRNOTAVAIL,
300 errno.EPROTONOSUPPORT,
300 errno.EPROTONOSUPPORT,
301 ):
301 ):
302 raise
302 raise
303 return False
303 return False
304
304
305
305
306 closefds = os.name == 'posix'
306 closefds = os.name == 'posix'
307
307
308
308
309 def Popen4(cmd, wd, timeout, env=None):
309 def Popen4(cmd, wd, timeout, env=None):
310 processlock.acquire()
310 processlock.acquire()
311 p = subprocess.Popen(
311 p = subprocess.Popen(
312 _bytes2sys(cmd),
312 _bytes2sys(cmd),
313 shell=True,
313 shell=True,
314 bufsize=-1,
314 bufsize=-1,
315 cwd=_bytes2sys(wd),
315 cwd=_bytes2sys(wd),
316 env=env,
316 env=env,
317 close_fds=closefds,
317 close_fds=closefds,
318 stdin=subprocess.PIPE,
318 stdin=subprocess.PIPE,
319 stdout=subprocess.PIPE,
319 stdout=subprocess.PIPE,
320 stderr=subprocess.STDOUT,
320 stderr=subprocess.STDOUT,
321 )
321 )
322 processlock.release()
322 processlock.release()
323
323
324 p.fromchild = p.stdout
324 p.fromchild = p.stdout
325 p.tochild = p.stdin
325 p.tochild = p.stdin
326 p.childerr = p.stderr
326 p.childerr = p.stderr
327
327
328 p.timeout = False
328 p.timeout = False
329 if timeout:
329 if timeout:
330
330
331 def t():
331 def t():
332 start = time.time()
332 start = time.time()
333 while time.time() - start < timeout and p.returncode is None:
333 while time.time() - start < timeout and p.returncode is None:
334 time.sleep(0.1)
334 time.sleep(0.1)
335 p.timeout = True
335 p.timeout = True
336 vlog('# Timout reached for process %d' % p.pid)
336 vlog('# Timout reached for process %d' % p.pid)
337 if p.returncode is None:
337 if p.returncode is None:
338 terminate(p)
338 terminate(p)
339
339
340 threading.Thread(target=t).start()
340 threading.Thread(target=t).start()
341
341
342 return p
342 return p
343
343
344
344
345 if sys.executable:
345 if sys.executable:
346 sysexecutable = sys.executable
346 sysexecutable = sys.executable
347 elif os.environ.get('PYTHONEXECUTABLE'):
347 elif os.environ.get('PYTHONEXECUTABLE'):
348 sysexecutable = os.environ['PYTHONEXECUTABLE']
348 sysexecutable = os.environ['PYTHONEXECUTABLE']
349 elif os.environ.get('PYTHON'):
349 elif os.environ.get('PYTHON'):
350 sysexecutable = os.environ['PYTHON']
350 sysexecutable = os.environ['PYTHON']
351 else:
351 else:
352 raise AssertionError('Could not find Python interpreter')
352 raise AssertionError('Could not find Python interpreter')
353
353
354 PYTHON = _sys2bytes(sysexecutable.replace('\\', '/'))
354 PYTHON = _sys2bytes(sysexecutable.replace('\\', '/'))
355 IMPL_PATH = b'PYTHONPATH'
355 IMPL_PATH = b'PYTHONPATH'
356 if 'java' in sys.platform:
356 if 'java' in sys.platform:
357 IMPL_PATH = b'JYTHONPATH'
357 IMPL_PATH = b'JYTHONPATH'
358
358
359 default_defaults = {
359 default_defaults = {
360 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
360 'jobs': ('HGTEST_JOBS', multiprocessing.cpu_count()),
361 'timeout': ('HGTEST_TIMEOUT', 360),
361 'timeout': ('HGTEST_TIMEOUT', 360),
362 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 1500),
362 'slowtimeout': ('HGTEST_SLOWTIMEOUT', 1500),
363 'port': ('HGTEST_PORT', 20059),
363 'port': ('HGTEST_PORT', 20059),
364 'shell': ('HGTEST_SHELL', 'sh'),
364 'shell': ('HGTEST_SHELL', 'sh'),
365 }
365 }
366
366
367 defaults = default_defaults.copy()
367 defaults = default_defaults.copy()
368
368
369
369
370 def canonpath(path):
370 def canonpath(path):
371 return os.path.realpath(os.path.expanduser(path))
371 return os.path.realpath(os.path.expanduser(path))
372
372
373
373
374 def which(exe):
374 def which(exe):
375 if PYTHON3:
375 if PYTHON3:
376 # shutil.which only accept bytes from 3.8
376 # shutil.which only accept bytes from 3.8
377 cmd = _bytes2sys(exe)
377 cmd = _bytes2sys(exe)
378 real_exec = shutil.which(cmd)
378 real_exec = shutil.which(cmd)
379 return _sys2bytes(real_exec)
379 return _sys2bytes(real_exec)
380 else:
380 else:
381 # let us do the os work
381 # let us do the os work
382 for p in osenvironb[b'PATH'].split(os.pathsep):
382 for p in osenvironb[b'PATH'].split(os.pathsep):
383 f = os.path.join(p, exe)
383 f = os.path.join(p, exe)
384 if os.path.isfile(f):
384 if os.path.isfile(f):
385 return f
385 return f
386 return None
386 return None
387
387
388
388
389 def parselistfiles(files, listtype, warn=True):
389 def parselistfiles(files, listtype, warn=True):
390 entries = dict()
390 entries = dict()
391 for filename in files:
391 for filename in files:
392 try:
392 try:
393 path = os.path.expanduser(os.path.expandvars(filename))
393 path = os.path.expanduser(os.path.expandvars(filename))
394 f = open(path, "rb")
394 f = open(path, "rb")
395 except IOError as err:
395 except IOError as err:
396 if err.errno != errno.ENOENT:
396 if err.errno != errno.ENOENT:
397 raise
397 raise
398 if warn:
398 if warn:
399 print("warning: no such %s file: %s" % (listtype, filename))
399 print("warning: no such %s file: %s" % (listtype, filename))
400 continue
400 continue
401
401
402 for line in f.readlines():
402 for line in f.readlines():
403 line = line.split(b'#', 1)[0].strip()
403 line = line.split(b'#', 1)[0].strip()
404 if line:
404 if line:
405 # Ensure path entries are compatible with os.path.relpath()
405 # Ensure path entries are compatible with os.path.relpath()
406 entries[os.path.normpath(line)] = filename
406 entries[os.path.normpath(line)] = filename
407
407
408 f.close()
408 f.close()
409 return entries
409 return entries
410
410
411
411
412 def parsettestcases(path):
412 def parsettestcases(path):
413 """read a .t test file, return a set of test case names
413 """read a .t test file, return a set of test case names
414
414
415 If path does not exist, return an empty set.
415 If path does not exist, return an empty set.
416 """
416 """
417 cases = []
417 cases = []
418 try:
418 try:
419 with open(path, 'rb') as f:
419 with open(path, 'rb') as f:
420 for l in f:
420 for l in f:
421 if l.startswith(b'#testcases '):
421 if l.startswith(b'#testcases '):
422 cases.append(sorted(l[11:].split()))
422 cases.append(sorted(l[11:].split()))
423 except IOError as ex:
423 except IOError as ex:
424 if ex.errno != errno.ENOENT:
424 if ex.errno != errno.ENOENT:
425 raise
425 raise
426 return cases
426 return cases
427
427
428
428
429 def getparser():
429 def getparser():
430 """Obtain the OptionParser used by the CLI."""
430 """Obtain the OptionParser used by the CLI."""
431 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
431 parser = argparse.ArgumentParser(usage='%(prog)s [options] [tests]')
432
432
433 selection = parser.add_argument_group('Test Selection')
433 selection = parser.add_argument_group('Test Selection')
434 selection.add_argument(
434 selection.add_argument(
435 '--allow-slow-tests',
435 '--allow-slow-tests',
436 action='store_true',
436 action='store_true',
437 help='allow extremely slow tests',
437 help='allow extremely slow tests',
438 )
438 )
439 selection.add_argument(
439 selection.add_argument(
440 "--blacklist",
440 "--blacklist",
441 action="append",
441 action="append",
442 help="skip tests listed in the specified blacklist file",
442 help="skip tests listed in the specified blacklist file",
443 )
443 )
444 selection.add_argument(
444 selection.add_argument(
445 "--changed",
445 "--changed",
446 help="run tests that are changed in parent rev or working directory",
446 help="run tests that are changed in parent rev or working directory",
447 )
447 )
448 selection.add_argument(
448 selection.add_argument(
449 "-k", "--keywords", help="run tests matching keywords"
449 "-k", "--keywords", help="run tests matching keywords"
450 )
450 )
451 selection.add_argument(
451 selection.add_argument(
452 "-r", "--retest", action="store_true", help="retest failed tests"
452 "-r", "--retest", action="store_true", help="retest failed tests"
453 )
453 )
454 selection.add_argument(
454 selection.add_argument(
455 "--test-list",
455 "--test-list",
456 action="append",
456 action="append",
457 help="read tests to run from the specified file",
457 help="read tests to run from the specified file",
458 )
458 )
459 selection.add_argument(
459 selection.add_argument(
460 "--whitelist",
460 "--whitelist",
461 action="append",
461 action="append",
462 help="always run tests listed in the specified whitelist file",
462 help="always run tests listed in the specified whitelist file",
463 )
463 )
464 selection.add_argument(
464 selection.add_argument(
465 'tests', metavar='TESTS', nargs='*', help='Tests to run'
465 'tests', metavar='TESTS', nargs='*', help='Tests to run'
466 )
466 )
467
467
468 harness = parser.add_argument_group('Test Harness Behavior')
468 harness = parser.add_argument_group('Test Harness Behavior')
469 harness.add_argument(
469 harness.add_argument(
470 '--bisect-repo',
470 '--bisect-repo',
471 metavar='bisect_repo',
471 metavar='bisect_repo',
472 help=(
472 help=(
473 "Path of a repo to bisect. Use together with " "--known-good-rev"
473 "Path of a repo to bisect. Use together with " "--known-good-rev"
474 ),
474 ),
475 )
475 )
476 harness.add_argument(
476 harness.add_argument(
477 "-d",
477 "-d",
478 "--debug",
478 "--debug",
479 action="store_true",
479 action="store_true",
480 help="debug mode: write output of test scripts to console"
480 help="debug mode: write output of test scripts to console"
481 " rather than capturing and diffing it (disables timeout)",
481 " rather than capturing and diffing it (disables timeout)",
482 )
482 )
483 harness.add_argument(
483 harness.add_argument(
484 "-f",
484 "-f",
485 "--first",
485 "--first",
486 action="store_true",
486 action="store_true",
487 help="exit on the first test failure",
487 help="exit on the first test failure",
488 )
488 )
489 harness.add_argument(
489 harness.add_argument(
490 "-i",
490 "-i",
491 "--interactive",
491 "--interactive",
492 action="store_true",
492 action="store_true",
493 help="prompt to accept changed output",
493 help="prompt to accept changed output",
494 )
494 )
495 harness.add_argument(
495 harness.add_argument(
496 "-j",
496 "-j",
497 "--jobs",
497 "--jobs",
498 type=int,
498 type=int,
499 help="number of jobs to run in parallel"
499 help="number of jobs to run in parallel"
500 " (default: $%s or %d)" % defaults['jobs'],
500 " (default: $%s or %d)" % defaults['jobs'],
501 )
501 )
502 harness.add_argument(
502 harness.add_argument(
503 "--keep-tmpdir",
503 "--keep-tmpdir",
504 action="store_true",
504 action="store_true",
505 help="keep temporary directory after running tests",
505 help="keep temporary directory after running tests",
506 )
506 )
507 harness.add_argument(
507 harness.add_argument(
508 '--known-good-rev',
508 '--known-good-rev',
509 metavar="known_good_rev",
509 metavar="known_good_rev",
510 help=(
510 help=(
511 "Automatically bisect any failures using this "
511 "Automatically bisect any failures using this "
512 "revision as a known-good revision."
512 "revision as a known-good revision."
513 ),
513 ),
514 )
514 )
515 harness.add_argument(
515 harness.add_argument(
516 "--list-tests",
516 "--list-tests",
517 action="store_true",
517 action="store_true",
518 help="list tests instead of running them",
518 help="list tests instead of running them",
519 )
519 )
520 harness.add_argument(
520 harness.add_argument(
521 "--loop", action="store_true", help="loop tests repeatedly"
521 "--loop", action="store_true", help="loop tests repeatedly"
522 )
522 )
523 harness.add_argument(
523 harness.add_argument(
524 '--random', action="store_true", help='run tests in random order'
524 '--random', action="store_true", help='run tests in random order'
525 )
525 )
526 harness.add_argument(
526 harness.add_argument(
527 '--order-by-runtime',
527 '--order-by-runtime',
528 action="store_true",
528 action="store_true",
529 help='run slowest tests first, according to .testtimes',
529 help='run slowest tests first, according to .testtimes',
530 )
530 )
531 harness.add_argument(
531 harness.add_argument(
532 "-p",
532 "-p",
533 "--port",
533 "--port",
534 type=int,
534 type=int,
535 help="port on which servers should listen"
535 help="port on which servers should listen"
536 " (default: $%s or %d)" % defaults['port'],
536 " (default: $%s or %d)" % defaults['port'],
537 )
537 )
538 harness.add_argument(
538 harness.add_argument(
539 '--profile-runner',
539 '--profile-runner',
540 action='store_true',
540 action='store_true',
541 help='run statprof on run-tests',
541 help='run statprof on run-tests',
542 )
542 )
543 harness.add_argument(
543 harness.add_argument(
544 "-R", "--restart", action="store_true", help="restart at last error"
544 "-R", "--restart", action="store_true", help="restart at last error"
545 )
545 )
546 harness.add_argument(
546 harness.add_argument(
547 "--runs-per-test",
547 "--runs-per-test",
548 type=int,
548 type=int,
549 dest="runs_per_test",
549 dest="runs_per_test",
550 help="run each test N times (default=1)",
550 help="run each test N times (default=1)",
551 default=1,
551 default=1,
552 )
552 )
553 harness.add_argument(
553 harness.add_argument(
554 "--shell", help="shell to use (default: $%s or %s)" % defaults['shell']
554 "--shell", help="shell to use (default: $%s or %s)" % defaults['shell']
555 )
555 )
556 harness.add_argument(
556 harness.add_argument(
557 '--showchannels', action='store_true', help='show scheduling channels'
557 '--showchannels', action='store_true', help='show scheduling channels'
558 )
558 )
559 harness.add_argument(
559 harness.add_argument(
560 "--slowtimeout",
560 "--slowtimeout",
561 type=int,
561 type=int,
562 help="kill errant slow tests after SLOWTIMEOUT seconds"
562 help="kill errant slow tests after SLOWTIMEOUT seconds"
563 " (default: $%s or %d)" % defaults['slowtimeout'],
563 " (default: $%s or %d)" % defaults['slowtimeout'],
564 )
564 )
565 harness.add_argument(
565 harness.add_argument(
566 "-t",
566 "-t",
567 "--timeout",
567 "--timeout",
568 type=int,
568 type=int,
569 help="kill errant tests after TIMEOUT seconds"
569 help="kill errant tests after TIMEOUT seconds"
570 " (default: $%s or %d)" % defaults['timeout'],
570 " (default: $%s or %d)" % defaults['timeout'],
571 )
571 )
572 harness.add_argument(
572 harness.add_argument(
573 "--tmpdir",
573 "--tmpdir",
574 help="run tests in the given temporary directory"
574 help="run tests in the given temporary directory"
575 " (implies --keep-tmpdir)",
575 " (implies --keep-tmpdir)",
576 )
576 )
577 harness.add_argument(
577 harness.add_argument(
578 "-v", "--verbose", action="store_true", help="output verbose messages"
578 "-v", "--verbose", action="store_true", help="output verbose messages"
579 )
579 )
580
580
581 hgconf = parser.add_argument_group('Mercurial Configuration')
581 hgconf = parser.add_argument_group('Mercurial Configuration')
582 hgconf.add_argument(
582 hgconf.add_argument(
583 "--chg",
583 "--chg",
584 action="store_true",
584 action="store_true",
585 help="install and use chg wrapper in place of hg",
585 help="install and use chg wrapper in place of hg",
586 )
586 )
587 hgconf.add_argument(
587 hgconf.add_argument(
588 "--chg-debug",
588 "--chg-debug",
589 action="store_true",
589 action="store_true",
590 help="show chg debug logs",
590 help="show chg debug logs",
591 )
591 )
592 hgconf.add_argument(
592 hgconf.add_argument(
593 "--rhg",
593 "--rhg",
594 action="store_true",
594 action="store_true",
595 help="install and use rhg Rust implementation in place of hg",
595 help="install and use rhg Rust implementation in place of hg",
596 )
596 )
597 hgconf.add_argument(
597 hgconf.add_argument(
598 "--pyoxidized",
598 "--pyoxidized",
599 action="store_true",
599 action="store_true",
600 help="build the hg binary using pyoxidizer",
600 help="build the hg binary using pyoxidizer",
601 )
601 )
602 hgconf.add_argument("--compiler", help="compiler to build with")
602 hgconf.add_argument("--compiler", help="compiler to build with")
603 hgconf.add_argument(
603 hgconf.add_argument(
604 '--extra-config-opt',
604 '--extra-config-opt',
605 action="append",
605 action="append",
606 default=[],
606 default=[],
607 help='set the given config opt in the test hgrc',
607 help='set the given config opt in the test hgrc',
608 )
608 )
609 hgconf.add_argument(
609 hgconf.add_argument(
610 "-l",
610 "-l",
611 "--local",
611 "--local",
612 action="store_true",
612 action="store_true",
613 help="shortcut for --with-hg=<testdir>/../hg, "
613 help="shortcut for --with-hg=<testdir>/../hg, "
614 "--with-rhg=<testdir>/../rust/target/release/rhg if --rhg is set, "
614 "--with-rhg=<testdir>/../rust/target/release/rhg if --rhg is set, "
615 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set",
615 "and --with-chg=<testdir>/../contrib/chg/chg if --chg is set",
616 )
616 )
617 hgconf.add_argument(
617 hgconf.add_argument(
618 "--ipv6",
618 "--ipv6",
619 action="store_true",
619 action="store_true",
620 help="prefer IPv6 to IPv4 for network related tests",
620 help="prefer IPv6 to IPv4 for network related tests",
621 )
621 )
622 hgconf.add_argument(
622 hgconf.add_argument(
623 "--pure",
623 "--pure",
624 action="store_true",
624 action="store_true",
625 help="use pure Python code instead of C extensions",
625 help="use pure Python code instead of C extensions",
626 )
626 )
627 hgconf.add_argument(
627 hgconf.add_argument(
628 "--rust",
628 "--rust",
629 action="store_true",
629 action="store_true",
630 help="use Rust code alongside C extensions",
630 help="use Rust code alongside C extensions",
631 )
631 )
632 hgconf.add_argument(
632 hgconf.add_argument(
633 "--no-rust",
633 "--no-rust",
634 action="store_true",
634 action="store_true",
635 help="do not use Rust code even if compiled",
635 help="do not use Rust code even if compiled",
636 )
636 )
637 hgconf.add_argument(
637 hgconf.add_argument(
638 "--with-chg",
638 "--with-chg",
639 metavar="CHG",
639 metavar="CHG",
640 help="use specified chg wrapper in place of hg",
640 help="use specified chg wrapper in place of hg",
641 )
641 )
642 hgconf.add_argument(
642 hgconf.add_argument(
643 "--with-rhg",
643 "--with-rhg",
644 metavar="RHG",
644 metavar="RHG",
645 help="use specified rhg Rust implementation in place of hg",
645 help="use specified rhg Rust implementation in place of hg",
646 )
646 )
647 hgconf.add_argument(
647 hgconf.add_argument(
648 "--with-hg",
648 "--with-hg",
649 metavar="HG",
649 metavar="HG",
650 help="test using specified hg script rather than a "
650 help="test using specified hg script rather than a "
651 "temporary installation",
651 "temporary installation",
652 )
652 )
653
653
654 reporting = parser.add_argument_group('Results Reporting')
654 reporting = parser.add_argument_group('Results Reporting')
655 reporting.add_argument(
655 reporting.add_argument(
656 "-C",
656 "-C",
657 "--annotate",
657 "--annotate",
658 action="store_true",
658 action="store_true",
659 help="output files annotated with coverage",
659 help="output files annotated with coverage",
660 )
660 )
661 reporting.add_argument(
661 reporting.add_argument(
662 "--color",
662 "--color",
663 choices=["always", "auto", "never"],
663 choices=["always", "auto", "never"],
664 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
664 default=os.environ.get('HGRUNTESTSCOLOR', 'auto'),
665 help="colorisation: always|auto|never (default: auto)",
665 help="colorisation: always|auto|never (default: auto)",
666 )
666 )
667 reporting.add_argument(
667 reporting.add_argument(
668 "-c",
668 "-c",
669 "--cover",
669 "--cover",
670 action="store_true",
670 action="store_true",
671 help="print a test coverage report",
671 help="print a test coverage report",
672 )
672 )
673 reporting.add_argument(
673 reporting.add_argument(
674 '--exceptions',
674 '--exceptions',
675 action='store_true',
675 action='store_true',
676 help='log all exceptions and generate an exception report',
676 help='log all exceptions and generate an exception report',
677 )
677 )
678 reporting.add_argument(
678 reporting.add_argument(
679 "-H",
679 "-H",
680 "--htmlcov",
680 "--htmlcov",
681 action="store_true",
681 action="store_true",
682 help="create an HTML report of the coverage of the files",
682 help="create an HTML report of the coverage of the files",
683 )
683 )
684 reporting.add_argument(
684 reporting.add_argument(
685 "--json",
685 "--json",
686 action="store_true",
686 action="store_true",
687 help="store test result data in 'report.json' file",
687 help="store test result data in 'report.json' file",
688 )
688 )
689 reporting.add_argument(
689 reporting.add_argument(
690 "--outputdir",
690 "--outputdir",
691 help="directory to write error logs to (default=test directory)",
691 help="directory to write error logs to (default=test directory)",
692 )
692 )
693 reporting.add_argument(
693 reporting.add_argument(
694 "-n", "--nodiff", action="store_true", help="skip showing test changes"
694 "-n", "--nodiff", action="store_true", help="skip showing test changes"
695 )
695 )
696 reporting.add_argument(
696 reporting.add_argument(
697 "-S",
697 "-S",
698 "--noskips",
698 "--noskips",
699 action="store_true",
699 action="store_true",
700 help="don't report skip tests verbosely",
700 help="don't report skip tests verbosely",
701 )
701 )
702 reporting.add_argument(
702 reporting.add_argument(
703 "--time", action="store_true", help="time how long each test takes"
703 "--time", action="store_true", help="time how long each test takes"
704 )
704 )
705 reporting.add_argument("--view", help="external diff viewer")
705 reporting.add_argument("--view", help="external diff viewer")
706 reporting.add_argument(
706 reporting.add_argument(
707 "--xunit", help="record xunit results at specified path"
707 "--xunit", help="record xunit results at specified path"
708 )
708 )
709
709
710 for option, (envvar, default) in defaults.items():
710 for option, (envvar, default) in defaults.items():
711 defaults[option] = type(default)(os.environ.get(envvar, default))
711 defaults[option] = type(default)(os.environ.get(envvar, default))
712 parser.set_defaults(**defaults)
712 parser.set_defaults(**defaults)
713
713
714 return parser
714 return parser
715
715
716
716
717 def parseargs(args, parser):
717 def parseargs(args, parser):
718 """Parse arguments with our OptionParser and validate results."""
718 """Parse arguments with our OptionParser and validate results."""
719 options = parser.parse_args(args)
719 options = parser.parse_args(args)
720
720
721 # jython is always pure
721 # jython is always pure
722 if 'java' in sys.platform or '__pypy__' in sys.modules:
722 if 'java' in sys.platform or '__pypy__' in sys.modules:
723 options.pure = True
723 options.pure = True
724
724
725 if platform.python_implementation() != 'CPython' and options.rust:
725 if platform.python_implementation() != 'CPython' and options.rust:
726 parser.error('Rust extensions are only available with CPython')
726 parser.error('Rust extensions are only available with CPython')
727
727
728 if options.pure and options.rust:
728 if options.pure and options.rust:
729 parser.error('--rust cannot be used with --pure')
729 parser.error('--rust cannot be used with --pure')
730
730
731 if options.rust and options.no_rust:
731 if options.rust and options.no_rust:
732 parser.error('--rust cannot be used with --no-rust')
732 parser.error('--rust cannot be used with --no-rust')
733
733
734 if options.local:
734 if options.local:
735 if options.with_hg or options.with_rhg or options.with_chg:
735 if options.with_hg or options.with_rhg or options.with_chg:
736 parser.error(
736 parser.error(
737 '--local cannot be used with --with-hg or --with-rhg or --with-chg'
737 '--local cannot be used with --with-hg or --with-rhg or --with-chg'
738 )
738 )
739 if options.pyoxidized:
739 if options.pyoxidized:
740 parser.error('--pyoxidized does not work with --local (yet)')
740 parser.error('--pyoxidized does not work with --local (yet)')
741 testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
741 testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
742 reporootdir = os.path.dirname(testdir)
742 reporootdir = os.path.dirname(testdir)
743 pathandattrs = [(b'hg', 'with_hg')]
743 pathandattrs = [(b'hg', 'with_hg')]
744 if options.chg:
744 if options.chg:
745 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
745 pathandattrs.append((b'contrib/chg/chg', 'with_chg'))
746 if options.rhg:
746 if options.rhg:
747 pathandattrs.append((b'rust/target/release/rhg', 'with_rhg'))
747 pathandattrs.append((b'rust/target/release/rhg', 'with_rhg'))
748 for relpath, attr in pathandattrs:
748 for relpath, attr in pathandattrs:
749 binpath = os.path.join(reporootdir, relpath)
749 binpath = os.path.join(reporootdir, relpath)
750 if not (WINDOWS or os.access(binpath, os.X_OK)):
750 if not (WINDOWS or os.access(binpath, os.X_OK)):
751 parser.error(
751 parser.error(
752 '--local specified, but %r not found or '
752 '--local specified, but %r not found or '
753 'not executable' % binpath
753 'not executable' % binpath
754 )
754 )
755 setattr(options, attr, _bytes2sys(binpath))
755 setattr(options, attr, _bytes2sys(binpath))
756
756
757 if options.with_hg:
757 if options.with_hg:
758 options.with_hg = canonpath(_sys2bytes(options.with_hg))
758 options.with_hg = canonpath(_sys2bytes(options.with_hg))
759 if not (
759 if not (
760 os.path.isfile(options.with_hg)
760 os.path.isfile(options.with_hg)
761 and os.access(options.with_hg, os.X_OK)
761 and os.access(options.with_hg, os.X_OK)
762 ):
762 ):
763 parser.error('--with-hg must specify an executable hg script')
763 parser.error('--with-hg must specify an executable hg script')
764 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
764 if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
765 msg = 'warning: --with-hg should specify an hg script, not: %s\n'
765 msg = 'warning: --with-hg should specify an hg script, not: %s\n'
766 msg %= _bytes2sys(os.path.basename(options.with_hg))
766 msg %= _bytes2sys(os.path.basename(options.with_hg))
767 sys.stderr.write(msg)
767 sys.stderr.write(msg)
768 sys.stderr.flush()
768 sys.stderr.flush()
769
769
770 if (options.chg or options.with_chg) and WINDOWS:
770 if (options.chg or options.with_chg) and WINDOWS:
771 parser.error('chg does not work on %s' % os.name)
771 parser.error('chg does not work on %s' % os.name)
772 if (options.rhg or options.with_rhg) and WINDOWS:
772 if (options.rhg or options.with_rhg) and WINDOWS:
773 parser.error('rhg does not work on %s' % os.name)
773 parser.error('rhg does not work on %s' % os.name)
774 if options.pyoxidized and not WINDOWS:
774 if options.pyoxidized and not WINDOWS:
775 parser.error('--pyoxidized is currently Windows only')
775 parser.error('--pyoxidized is currently Windows only')
776 if options.with_chg:
776 if options.with_chg:
777 options.chg = False # no installation to temporary location
777 options.chg = False # no installation to temporary location
778 options.with_chg = canonpath(_sys2bytes(options.with_chg))
778 options.with_chg = canonpath(_sys2bytes(options.with_chg))
779 if not (
779 if not (
780 os.path.isfile(options.with_chg)
780 os.path.isfile(options.with_chg)
781 and os.access(options.with_chg, os.X_OK)
781 and os.access(options.with_chg, os.X_OK)
782 ):
782 ):
783 parser.error('--with-chg must specify a chg executable')
783 parser.error('--with-chg must specify a chg executable')
784 if options.with_rhg:
784 if options.with_rhg:
785 options.rhg = False # no installation to temporary location
785 options.rhg = False # no installation to temporary location
786 options.with_rhg = canonpath(_sys2bytes(options.with_rhg))
786 options.with_rhg = canonpath(_sys2bytes(options.with_rhg))
787 if not (
787 if not (
788 os.path.isfile(options.with_rhg)
788 os.path.isfile(options.with_rhg)
789 and os.access(options.with_rhg, os.X_OK)
789 and os.access(options.with_rhg, os.X_OK)
790 ):
790 ):
791 parser.error('--with-rhg must specify a rhg executable')
791 parser.error('--with-rhg must specify a rhg executable')
792 if options.chg and options.with_hg:
792 if options.chg and options.with_hg:
793 # chg shares installation location with hg
793 # chg shares installation location with hg
794 parser.error(
794 parser.error(
795 '--chg does not work when --with-hg is specified '
795 '--chg does not work when --with-hg is specified '
796 '(use --with-chg instead)'
796 '(use --with-chg instead)'
797 )
797 )
798 if options.rhg and options.with_hg:
798 if options.rhg and options.with_hg:
799 # rhg shares installation location with hg
799 # rhg shares installation location with hg
800 parser.error(
800 parser.error(
801 '--rhg does not work when --with-hg is specified '
801 '--rhg does not work when --with-hg is specified '
802 '(use --with-rhg instead)'
802 '(use --with-rhg instead)'
803 )
803 )
804 if options.rhg and options.chg:
804 if options.rhg and options.chg:
805 parser.error('--rhg and --chg do not work together')
805 parser.error('--rhg and --chg do not work together')
806
806
807 if options.color == 'always' and not pygmentspresent:
807 if options.color == 'always' and not pygmentspresent:
808 sys.stderr.write(
808 sys.stderr.write(
809 'warning: --color=always ignored because '
809 'warning: --color=always ignored because '
810 'pygments is not installed\n'
810 'pygments is not installed\n'
811 )
811 )
812
812
813 if options.bisect_repo and not options.known_good_rev:
813 if options.bisect_repo and not options.known_good_rev:
814 parser.error("--bisect-repo cannot be used without --known-good-rev")
814 parser.error("--bisect-repo cannot be used without --known-good-rev")
815
815
816 global useipv6
816 global useipv6
817 if options.ipv6:
817 if options.ipv6:
818 useipv6 = checksocketfamily('AF_INET6')
818 useipv6 = checksocketfamily('AF_INET6')
819 else:
819 else:
820 # only use IPv6 if IPv4 is unavailable and IPv6 is available
820 # only use IPv6 if IPv4 is unavailable and IPv6 is available
821 useipv6 = (not checksocketfamily('AF_INET')) and checksocketfamily(
821 useipv6 = (not checksocketfamily('AF_INET')) and checksocketfamily(
822 'AF_INET6'
822 'AF_INET6'
823 )
823 )
824
824
825 options.anycoverage = options.cover or options.annotate or options.htmlcov
825 options.anycoverage = options.cover or options.annotate or options.htmlcov
826 if options.anycoverage:
826 if options.anycoverage:
827 try:
827 try:
828 import coverage
828 import coverage
829
829
830 covver = version.StrictVersion(coverage.__version__).version
830 covver = version.StrictVersion(coverage.__version__).version
831 if covver < (3, 3):
831 if covver < (3, 3):
832 parser.error('coverage options require coverage 3.3 or later')
832 parser.error('coverage options require coverage 3.3 or later')
833 except ImportError:
833 except ImportError:
834 parser.error('coverage options now require the coverage package')
834 parser.error('coverage options now require the coverage package')
835
835
836 if options.anycoverage and options.local:
836 if options.anycoverage and options.local:
837 # this needs some path mangling somewhere, I guess
837 # this needs some path mangling somewhere, I guess
838 parser.error(
838 parser.error(
839 "sorry, coverage options do not work when --local " "is specified"
839 "sorry, coverage options do not work when --local " "is specified"
840 )
840 )
841
841
842 if options.anycoverage and options.with_hg:
842 if options.anycoverage and options.with_hg:
843 parser.error(
843 parser.error(
844 "sorry, coverage options do not work when --with-hg " "is specified"
844 "sorry, coverage options do not work when --with-hg " "is specified"
845 )
845 )
846
846
847 global verbose
847 global verbose
848 if options.verbose:
848 if options.verbose:
849 verbose = ''
849 verbose = ''
850
850
851 if options.tmpdir:
851 if options.tmpdir:
852 options.tmpdir = canonpath(options.tmpdir)
852 options.tmpdir = canonpath(options.tmpdir)
853
853
854 if options.jobs < 1:
854 if options.jobs < 1:
855 parser.error('--jobs must be positive')
855 parser.error('--jobs must be positive')
856 if options.interactive and options.debug:
856 if options.interactive and options.debug:
857 parser.error("-i/--interactive and -d/--debug are incompatible")
857 parser.error("-i/--interactive and -d/--debug are incompatible")
858 if options.debug:
858 if options.debug:
859 if options.timeout != defaults['timeout']:
859 if options.timeout != defaults['timeout']:
860 sys.stderr.write('warning: --timeout option ignored with --debug\n')
860 sys.stderr.write('warning: --timeout option ignored with --debug\n')
861 if options.slowtimeout != defaults['slowtimeout']:
861 if options.slowtimeout != defaults['slowtimeout']:
862 sys.stderr.write(
862 sys.stderr.write(
863 'warning: --slowtimeout option ignored with --debug\n'
863 'warning: --slowtimeout option ignored with --debug\n'
864 )
864 )
865 options.timeout = 0
865 options.timeout = 0
866 options.slowtimeout = 0
866 options.slowtimeout = 0
867
867
868 if options.blacklist:
868 if options.blacklist:
869 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
869 options.blacklist = parselistfiles(options.blacklist, 'blacklist')
870 if options.whitelist:
870 if options.whitelist:
871 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
871 options.whitelisted = parselistfiles(options.whitelist, 'whitelist')
872 else:
872 else:
873 options.whitelisted = {}
873 options.whitelisted = {}
874
874
875 if options.showchannels:
875 if options.showchannels:
876 options.nodiff = True
876 options.nodiff = True
877
877
878 return options
878 return options
879
879
880
880
881 def rename(src, dst):
881 def rename(src, dst):
882 """Like os.rename(), trade atomicity and opened files friendliness
882 """Like os.rename(), trade atomicity and opened files friendliness
883 for existing destination support.
883 for existing destination support.
884 """
884 """
885 shutil.copy(src, dst)
885 shutil.copy(src, dst)
886 os.remove(src)
886 os.remove(src)
887
887
888
888
889 def makecleanable(path):
889 def makecleanable(path):
890 """Try to fix directory permission recursively so that the entire tree
890 """Try to fix directory permission recursively so that the entire tree
891 can be deleted"""
891 can be deleted"""
892 for dirpath, dirnames, _filenames in os.walk(path, topdown=True):
892 for dirpath, dirnames, _filenames in os.walk(path, topdown=True):
893 for d in dirnames:
893 for d in dirnames:
894 p = os.path.join(dirpath, d)
894 p = os.path.join(dirpath, d)
895 try:
895 try:
896 os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx
896 os.chmod(p, os.stat(p).st_mode & 0o777 | 0o700) # chmod u+rwx
897 except OSError:
897 except OSError:
898 pass
898 pass
899
899
900
900
901 _unified_diff = difflib.unified_diff
901 _unified_diff = difflib.unified_diff
902 if PYTHON3:
902 if PYTHON3:
903 import functools
903 import functools
904
904
905 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
905 _unified_diff = functools.partial(difflib.diff_bytes, difflib.unified_diff)
906
906
907
907
908 def getdiff(expected, output, ref, err):
908 def getdiff(expected, output, ref, err):
909 servefail = False
909 servefail = False
910 lines = []
910 lines = []
911 for line in _unified_diff(expected, output, ref, err):
911 for line in _unified_diff(expected, output, ref, err):
912 if line.startswith(b'+++') or line.startswith(b'---'):
912 if line.startswith(b'+++') or line.startswith(b'---'):
913 line = line.replace(b'\\', b'/')
913 line = line.replace(b'\\', b'/')
914 if line.endswith(b' \n'):
914 if line.endswith(b' \n'):
915 line = line[:-2] + b'\n'
915 line = line[:-2] + b'\n'
916 lines.append(line)
916 lines.append(line)
917 if not servefail and line.startswith(
917 if not servefail and line.startswith(
918 b'+ abort: child process failed to start'
918 b'+ abort: child process failed to start'
919 ):
919 ):
920 servefail = True
920 servefail = True
921
921
922 return servefail, lines
922 return servefail, lines
923
923
924
924
925 verbose = False
925 verbose = False
926
926
927
927
928 def vlog(*msg):
928 def vlog(*msg):
929 """Log only when in verbose mode."""
929 """Log only when in verbose mode."""
930 if verbose is False:
930 if verbose is False:
931 return
931 return
932
932
933 return log(*msg)
933 return log(*msg)
934
934
935
935
936 # Bytes that break XML even in a CDATA block: control characters 0-31
936 # Bytes that break XML even in a CDATA block: control characters 0-31
937 # sans \t, \n and \r
937 # sans \t, \n and \r
938 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
938 CDATA_EVIL = re.compile(br"[\000-\010\013\014\016-\037]")
939
939
940 # Match feature conditionalized output lines in the form, capturing the feature
940 # Match feature conditionalized output lines in the form, capturing the feature
941 # list in group 2, and the preceeding line output in group 1:
941 # list in group 2, and the preceeding line output in group 1:
942 #
942 #
943 # output..output (feature !)\n
943 # output..output (feature !)\n
944 optline = re.compile(br'(.*) \((.+?) !\)\n$')
944 optline = re.compile(br'(.*) \((.+?) !\)\n$')
945
945
946
946
947 def cdatasafe(data):
947 def cdatasafe(data):
948 """Make a string safe to include in a CDATA block.
948 """Make a string safe to include in a CDATA block.
949
949
950 Certain control characters are illegal in a CDATA block, and
950 Certain control characters are illegal in a CDATA block, and
951 there's no way to include a ]]> in a CDATA either. This function
951 there's no way to include a ]]> in a CDATA either. This function
952 replaces illegal bytes with ? and adds a space between the ]] so
952 replaces illegal bytes with ? and adds a space between the ]] so
953 that it won't break the CDATA block.
953 that it won't break the CDATA block.
954 """
954 """
955 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
955 return CDATA_EVIL.sub(b'?', data).replace(b']]>', b'] ]>')
956
956
957
957
958 def log(*msg):
958 def log(*msg):
959 """Log something to stdout.
959 """Log something to stdout.
960
960
961 Arguments are strings to print.
961 Arguments are strings to print.
962 """
962 """
963 with iolock:
963 with iolock:
964 if verbose:
964 if verbose:
965 print(verbose, end=' ')
965 print(verbose, end=' ')
966 for m in msg:
966 for m in msg:
967 print(m, end=' ')
967 print(m, end=' ')
968 print()
968 print()
969 sys.stdout.flush()
969 sys.stdout.flush()
970
970
971
971
972 def highlightdiff(line, color):
972 def highlightdiff(line, color):
973 if not color:
973 if not color:
974 return line
974 return line
975 assert pygmentspresent
975 assert pygmentspresent
976 return pygments.highlight(
976 return pygments.highlight(
977 line.decode('latin1'), difflexer, terminal256formatter
977 line.decode('latin1'), difflexer, terminal256formatter
978 ).encode('latin1')
978 ).encode('latin1')
979
979
980
980
981 def highlightmsg(msg, color):
981 def highlightmsg(msg, color):
982 if not color:
982 if not color:
983 return msg
983 return msg
984 assert pygmentspresent
984 assert pygmentspresent
985 return pygments.highlight(msg, runnerlexer, runnerformatter)
985 return pygments.highlight(msg, runnerlexer, runnerformatter)
986
986
987
987
988 def terminate(proc):
988 def terminate(proc):
989 """Terminate subprocess"""
989 """Terminate subprocess"""
990 vlog('# Terminating process %d' % proc.pid)
990 vlog('# Terminating process %d' % proc.pid)
991 try:
991 try:
992 proc.terminate()
992 proc.terminate()
993 except OSError:
993 except OSError:
994 pass
994 pass
995
995
996
996
997 def killdaemons(pidfile):
997 def killdaemons(pidfile):
998 import killdaemons as killmod
998 import killdaemons as killmod
999
999
1000 return killmod.killdaemons(pidfile, tryhard=False, remove=True, logfn=vlog)
1000 return killmod.killdaemons(pidfile, tryhard=False, remove=True, logfn=vlog)
1001
1001
1002
1002
1003 class Test(unittest.TestCase):
1003 class Test(unittest.TestCase):
1004 """Encapsulates a single, runnable test.
1004 """Encapsulates a single, runnable test.
1005
1005
1006 While this class conforms to the unittest.TestCase API, it differs in that
1006 While this class conforms to the unittest.TestCase API, it differs in that
1007 instances need to be instantiated manually. (Typically, unittest.TestCase
1007 instances need to be instantiated manually. (Typically, unittest.TestCase
1008 classes are instantiated automatically by scanning modules.)
1008 classes are instantiated automatically by scanning modules.)
1009 """
1009 """
1010
1010
1011 # Status code reserved for skipped tests (used by hghave).
1011 # Status code reserved for skipped tests (used by hghave).
1012 SKIPPED_STATUS = 80
1012 SKIPPED_STATUS = 80
1013
1013
1014 def __init__(
1014 def __init__(
1015 self,
1015 self,
1016 path,
1016 path,
1017 outputdir,
1017 outputdir,
1018 tmpdir,
1018 tmpdir,
1019 keeptmpdir=False,
1019 keeptmpdir=False,
1020 debug=False,
1020 debug=False,
1021 first=False,
1021 first=False,
1022 timeout=None,
1022 timeout=None,
1023 startport=None,
1023 startport=None,
1024 extraconfigopts=None,
1024 extraconfigopts=None,
1025 shell=None,
1025 shell=None,
1026 hgcommand=None,
1026 hgcommand=None,
1027 slowtimeout=None,
1027 slowtimeout=None,
1028 usechg=False,
1028 usechg=False,
1029 chgdebug=False,
1029 chgdebug=False,
1030 useipv6=False,
1030 useipv6=False,
1031 ):
1031 ):
1032 """Create a test from parameters.
1032 """Create a test from parameters.
1033
1033
1034 path is the full path to the file defining the test.
1034 path is the full path to the file defining the test.
1035
1035
1036 tmpdir is the main temporary directory to use for this test.
1036 tmpdir is the main temporary directory to use for this test.
1037
1037
1038 keeptmpdir determines whether to keep the test's temporary directory
1038 keeptmpdir determines whether to keep the test's temporary directory
1039 after execution. It defaults to removal (False).
1039 after execution. It defaults to removal (False).
1040
1040
1041 debug mode will make the test execute verbosely, with unfiltered
1041 debug mode will make the test execute verbosely, with unfiltered
1042 output.
1042 output.
1043
1043
1044 timeout controls the maximum run time of the test. It is ignored when
1044 timeout controls the maximum run time of the test. It is ignored when
1045 debug is True. See slowtimeout for tests with #require slow.
1045 debug is True. See slowtimeout for tests with #require slow.
1046
1046
1047 slowtimeout overrides timeout if the test has #require slow.
1047 slowtimeout overrides timeout if the test has #require slow.
1048
1048
1049 startport controls the starting port number to use for this test. Each
1049 startport controls the starting port number to use for this test. Each
1050 test will reserve 3 port numbers for execution. It is the caller's
1050 test will reserve 3 port numbers for execution. It is the caller's
1051 responsibility to allocate a non-overlapping port range to Test
1051 responsibility to allocate a non-overlapping port range to Test
1052 instances.
1052 instances.
1053
1053
1054 extraconfigopts is an iterable of extra hgrc config options. Values
1054 extraconfigopts is an iterable of extra hgrc config options. Values
1055 must have the form "key=value" (something understood by hgrc). Values
1055 must have the form "key=value" (something understood by hgrc). Values
1056 of the form "foo.key=value" will result in "[foo] key=value".
1056 of the form "foo.key=value" will result in "[foo] key=value".
1057
1057
1058 shell is the shell to execute tests in.
1058 shell is the shell to execute tests in.
1059 """
1059 """
1060 if timeout is None:
1060 if timeout is None:
1061 timeout = defaults['timeout']
1061 timeout = defaults['timeout']
1062 if startport is None:
1062 if startport is None:
1063 startport = defaults['port']
1063 startport = defaults['port']
1064 if slowtimeout is None:
1064 if slowtimeout is None:
1065 slowtimeout = defaults['slowtimeout']
1065 slowtimeout = defaults['slowtimeout']
1066 self.path = path
1066 self.path = path
1067 self.relpath = os.path.relpath(path)
1067 self.relpath = os.path.relpath(path)
1068 self.bname = os.path.basename(path)
1068 self.bname = os.path.basename(path)
1069 self.name = _bytes2sys(self.bname)
1069 self.name = _bytes2sys(self.bname)
1070 self._testdir = os.path.dirname(path)
1070 self._testdir = os.path.dirname(path)
1071 self._outputdir = outputdir
1071 self._outputdir = outputdir
1072 self._tmpname = os.path.basename(path)
1072 self._tmpname = os.path.basename(path)
1073 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
1073 self.errpath = os.path.join(self._outputdir, b'%s.err' % self.bname)
1074
1074
1075 self._threadtmp = tmpdir
1075 self._threadtmp = tmpdir
1076 self._keeptmpdir = keeptmpdir
1076 self._keeptmpdir = keeptmpdir
1077 self._debug = debug
1077 self._debug = debug
1078 self._first = first
1078 self._first = first
1079 self._timeout = timeout
1079 self._timeout = timeout
1080 self._slowtimeout = slowtimeout
1080 self._slowtimeout = slowtimeout
1081 self._startport = startport
1081 self._startport = startport
1082 self._extraconfigopts = extraconfigopts or []
1082 self._extraconfigopts = extraconfigopts or []
1083 self._shell = _sys2bytes(shell)
1083 self._shell = _sys2bytes(shell)
1084 self._hgcommand = hgcommand or b'hg'
1084 self._hgcommand = hgcommand or b'hg'
1085 self._usechg = usechg
1085 self._usechg = usechg
1086 self._chgdebug = chgdebug
1086 self._chgdebug = chgdebug
1087 self._useipv6 = useipv6
1087 self._useipv6 = useipv6
1088
1088
1089 self._aborted = False
1089 self._aborted = False
1090 self._daemonpids = []
1090 self._daemonpids = []
1091 self._finished = None
1091 self._finished = None
1092 self._ret = None
1092 self._ret = None
1093 self._out = None
1093 self._out = None
1094 self._skipped = None
1094 self._skipped = None
1095 self._testtmp = None
1095 self._testtmp = None
1096 self._chgsockdir = None
1096 self._chgsockdir = None
1097
1097
1098 self._refout = self.readrefout()
1098 self._refout = self.readrefout()
1099
1099
1100 def readrefout(self):
1100 def readrefout(self):
1101 """read reference output"""
1101 """read reference output"""
1102 # If we're not in --debug mode and reference output file exists,
1102 # If we're not in --debug mode and reference output file exists,
1103 # check test output against it.
1103 # check test output against it.
1104 if self._debug:
1104 if self._debug:
1105 return None # to match "out is None"
1105 return None # to match "out is None"
1106 elif os.path.exists(self.refpath):
1106 elif os.path.exists(self.refpath):
1107 with open(self.refpath, 'rb') as f:
1107 with open(self.refpath, 'rb') as f:
1108 return f.read().splitlines(True)
1108 return f.read().splitlines(True)
1109 else:
1109 else:
1110 return []
1110 return []
1111
1111
1112 # needed to get base class __repr__ running
1112 # needed to get base class __repr__ running
1113 @property
1113 @property
1114 def _testMethodName(self):
1114 def _testMethodName(self):
1115 return self.name
1115 return self.name
1116
1116
1117 def __str__(self):
1117 def __str__(self):
1118 return self.name
1118 return self.name
1119
1119
1120 def shortDescription(self):
1120 def shortDescription(self):
1121 return self.name
1121 return self.name
1122
1122
1123 def setUp(self):
1123 def setUp(self):
1124 """Tasks to perform before run()."""
1124 """Tasks to perform before run()."""
1125 self._finished = False
1125 self._finished = False
1126 self._ret = None
1126 self._ret = None
1127 self._out = None
1127 self._out = None
1128 self._skipped = None
1128 self._skipped = None
1129
1129
1130 try:
1130 try:
1131 os.mkdir(self._threadtmp)
1131 os.mkdir(self._threadtmp)
1132 except OSError as e:
1132 except OSError as e:
1133 if e.errno != errno.EEXIST:
1133 if e.errno != errno.EEXIST:
1134 raise
1134 raise
1135
1135
1136 name = self._tmpname
1136 name = self._tmpname
1137 self._testtmp = os.path.join(self._threadtmp, name)
1137 self._testtmp = os.path.join(self._threadtmp, name)
1138 os.mkdir(self._testtmp)
1138 os.mkdir(self._testtmp)
1139
1139
1140 # Remove any previous output files.
1140 # Remove any previous output files.
1141 if os.path.exists(self.errpath):
1141 if os.path.exists(self.errpath):
1142 try:
1142 try:
1143 os.remove(self.errpath)
1143 os.remove(self.errpath)
1144 except OSError as e:
1144 except OSError as e:
1145 # We might have raced another test to clean up a .err
1145 # We might have raced another test to clean up a .err
1146 # file, so ignore ENOENT when removing a previous .err
1146 # file, so ignore ENOENT when removing a previous .err
1147 # file.
1147 # file.
1148 if e.errno != errno.ENOENT:
1148 if e.errno != errno.ENOENT:
1149 raise
1149 raise
1150
1150
1151 if self._usechg:
1151 if self._usechg:
1152 self._chgsockdir = os.path.join(
1152 self._chgsockdir = os.path.join(
1153 self._threadtmp, b'%s.chgsock' % name
1153 self._threadtmp, b'%s.chgsock' % name
1154 )
1154 )
1155 os.mkdir(self._chgsockdir)
1155 os.mkdir(self._chgsockdir)
1156
1156
1157 def run(self, result):
1157 def run(self, result):
1158 """Run this test and report results against a TestResult instance."""
1158 """Run this test and report results against a TestResult instance."""
1159 # This function is extremely similar to unittest.TestCase.run(). Once
1159 # This function is extremely similar to unittest.TestCase.run(). Once
1160 # we require Python 2.7 (or at least its version of unittest), this
1160 # we require Python 2.7 (or at least its version of unittest), this
1161 # function can largely go away.
1161 # function can largely go away.
1162 self._result = result
1162 self._result = result
1163 result.startTest(self)
1163 result.startTest(self)
1164 try:
1164 try:
1165 try:
1165 try:
1166 self.setUp()
1166 self.setUp()
1167 except (KeyboardInterrupt, SystemExit):
1167 except (KeyboardInterrupt, SystemExit):
1168 self._aborted = True
1168 self._aborted = True
1169 raise
1169 raise
1170 except Exception:
1170 except Exception:
1171 result.addError(self, sys.exc_info())
1171 result.addError(self, sys.exc_info())
1172 return
1172 return
1173
1173
1174 success = False
1174 success = False
1175 try:
1175 try:
1176 self.runTest()
1176 self.runTest()
1177 except KeyboardInterrupt:
1177 except KeyboardInterrupt:
1178 self._aborted = True
1178 self._aborted = True
1179 raise
1179 raise
1180 except unittest.SkipTest as e:
1180 except unittest.SkipTest as e:
1181 result.addSkip(self, str(e))
1181 result.addSkip(self, str(e))
1182 # The base class will have already counted this as a
1182 # The base class will have already counted this as a
1183 # test we "ran", but we want to exclude skipped tests
1183 # test we "ran", but we want to exclude skipped tests
1184 # from those we count towards those run.
1184 # from those we count towards those run.
1185 result.testsRun -= 1
1185 result.testsRun -= 1
1186 except self.failureException as e:
1186 except self.failureException as e:
1187 # This differs from unittest in that we don't capture
1187 # This differs from unittest in that we don't capture
1188 # the stack trace. This is for historical reasons and
1188 # the stack trace. This is for historical reasons and
1189 # this decision could be revisited in the future,
1189 # this decision could be revisited in the future,
1190 # especially for PythonTest instances.
1190 # especially for PythonTest instances.
1191 if result.addFailure(self, str(e)):
1191 if result.addFailure(self, str(e)):
1192 success = True
1192 success = True
1193 except Exception:
1193 except Exception:
1194 result.addError(self, sys.exc_info())
1194 result.addError(self, sys.exc_info())
1195 else:
1195 else:
1196 success = True
1196 success = True
1197
1197
1198 try:
1198 try:
1199 self.tearDown()
1199 self.tearDown()
1200 except (KeyboardInterrupt, SystemExit):
1200 except (KeyboardInterrupt, SystemExit):
1201 self._aborted = True
1201 self._aborted = True
1202 raise
1202 raise
1203 except Exception:
1203 except Exception:
1204 result.addError(self, sys.exc_info())
1204 result.addError(self, sys.exc_info())
1205 success = False
1205 success = False
1206
1206
1207 if success:
1207 if success:
1208 result.addSuccess(self)
1208 result.addSuccess(self)
1209 finally:
1209 finally:
1210 result.stopTest(self, interrupted=self._aborted)
1210 result.stopTest(self, interrupted=self._aborted)
1211
1211
1212 def runTest(self):
1212 def runTest(self):
1213 """Run this test instance.
1213 """Run this test instance.
1214
1214
1215 This will return a tuple describing the result of the test.
1215 This will return a tuple describing the result of the test.
1216 """
1216 """
1217 env = self._getenv()
1217 env = self._getenv()
1218 self._genrestoreenv(env)
1218 self._genrestoreenv(env)
1219 self._daemonpids.append(env['DAEMON_PIDS'])
1219 self._daemonpids.append(env['DAEMON_PIDS'])
1220 self._createhgrc(env['HGRCPATH'])
1220 self._createhgrc(env['HGRCPATH'])
1221
1221
1222 vlog('# Test', self.name)
1222 vlog('# Test', self.name)
1223
1223
1224 ret, out = self._run(env)
1224 ret, out = self._run(env)
1225 self._finished = True
1225 self._finished = True
1226 self._ret = ret
1226 self._ret = ret
1227 self._out = out
1227 self._out = out
1228
1228
1229 def describe(ret):
1229 def describe(ret):
1230 if ret < 0:
1230 if ret < 0:
1231 return 'killed by signal: %d' % -ret
1231 return 'killed by signal: %d' % -ret
1232 return 'returned error code %d' % ret
1232 return 'returned error code %d' % ret
1233
1233
1234 self._skipped = False
1234 self._skipped = False
1235
1235
1236 if ret == self.SKIPPED_STATUS:
1236 if ret == self.SKIPPED_STATUS:
1237 if out is None: # Debug mode, nothing to parse.
1237 if out is None: # Debug mode, nothing to parse.
1238 missing = ['unknown']
1238 missing = ['unknown']
1239 failed = None
1239 failed = None
1240 else:
1240 else:
1241 missing, failed = TTest.parsehghaveoutput(out)
1241 missing, failed = TTest.parsehghaveoutput(out)
1242
1242
1243 if not missing:
1243 if not missing:
1244 missing = ['skipped']
1244 missing = ['skipped']
1245
1245
1246 if failed:
1246 if failed:
1247 self.fail('hg have failed checking for %s' % failed[-1])
1247 self.fail('hg have failed checking for %s' % failed[-1])
1248 else:
1248 else:
1249 self._skipped = True
1249 self._skipped = True
1250 raise unittest.SkipTest(missing[-1])
1250 raise unittest.SkipTest(missing[-1])
1251 elif ret == 'timeout':
1251 elif ret == 'timeout':
1252 self.fail('timed out')
1252 self.fail('timed out')
1253 elif ret is False:
1253 elif ret is False:
1254 self.fail('no result code from test')
1254 self.fail('no result code from test')
1255 elif out != self._refout:
1255 elif out != self._refout:
1256 # Diff generation may rely on written .err file.
1256 # Diff generation may rely on written .err file.
1257 if (
1257 if (
1258 (ret != 0 or out != self._refout)
1258 (ret != 0 or out != self._refout)
1259 and not self._skipped
1259 and not self._skipped
1260 and not self._debug
1260 and not self._debug
1261 ):
1261 ):
1262 with open(self.errpath, 'wb') as f:
1262 with open(self.errpath, 'wb') as f:
1263 for line in out:
1263 for line in out:
1264 f.write(line)
1264 f.write(line)
1265
1265
1266 # The result object handles diff calculation for us.
1266 # The result object handles diff calculation for us.
1267 with firstlock:
1267 with firstlock:
1268 if self._result.addOutputMismatch(self, ret, out, self._refout):
1268 if self._result.addOutputMismatch(self, ret, out, self._refout):
1269 # change was accepted, skip failing
1269 # change was accepted, skip failing
1270 return
1270 return
1271 if self._first:
1271 if self._first:
1272 global firsterror
1272 global firsterror
1273 firsterror = True
1273 firsterror = True
1274
1274
1275 if ret:
1275 if ret:
1276 msg = 'output changed and ' + describe(ret)
1276 msg = 'output changed and ' + describe(ret)
1277 else:
1277 else:
1278 msg = 'output changed'
1278 msg = 'output changed'
1279
1279
1280 self.fail(msg)
1280 self.fail(msg)
1281 elif ret:
1281 elif ret:
1282 self.fail(describe(ret))
1282 self.fail(describe(ret))
1283
1283
1284 def tearDown(self):
1284 def tearDown(self):
1285 """Tasks to perform after run()."""
1285 """Tasks to perform after run()."""
1286 for entry in self._daemonpids:
1286 for entry in self._daemonpids:
1287 killdaemons(entry)
1287 killdaemons(entry)
1288 self._daemonpids = []
1288 self._daemonpids = []
1289
1289
1290 if self._keeptmpdir:
1290 if self._keeptmpdir:
1291 log(
1291 log(
1292 '\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s'
1292 '\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s'
1293 % (
1293 % (
1294 _bytes2sys(self._testtmp),
1294 _bytes2sys(self._testtmp),
1295 _bytes2sys(self._threadtmp),
1295 _bytes2sys(self._threadtmp),
1296 )
1296 )
1297 )
1297 )
1298 else:
1298 else:
1299 try:
1299 try:
1300 shutil.rmtree(self._testtmp)
1300 shutil.rmtree(self._testtmp)
1301 except OSError:
1301 except OSError:
1302 # unreadable directory may be left in $TESTTMP; fix permission
1302 # unreadable directory may be left in $TESTTMP; fix permission
1303 # and try again
1303 # and try again
1304 makecleanable(self._testtmp)
1304 makecleanable(self._testtmp)
1305 shutil.rmtree(self._testtmp, True)
1305 shutil.rmtree(self._testtmp, True)
1306 shutil.rmtree(self._threadtmp, True)
1306 shutil.rmtree(self._threadtmp, True)
1307
1307
1308 if self._usechg:
1308 if self._usechg:
1309 # chgservers will stop automatically after they find the socket
1309 # chgservers will stop automatically after they find the socket
1310 # files are deleted
1310 # files are deleted
1311 shutil.rmtree(self._chgsockdir, True)
1311 shutil.rmtree(self._chgsockdir, True)
1312
1312
1313 if (
1313 if (
1314 (self._ret != 0 or self._out != self._refout)
1314 (self._ret != 0 or self._out != self._refout)
1315 and not self._skipped
1315 and not self._skipped
1316 and not self._debug
1316 and not self._debug
1317 and self._out
1317 and self._out
1318 ):
1318 ):
1319 with open(self.errpath, 'wb') as f:
1319 with open(self.errpath, 'wb') as f:
1320 for line in self._out:
1320 for line in self._out:
1321 f.write(line)
1321 f.write(line)
1322
1322
1323 vlog("# Ret was:", self._ret, '(%s)' % self.name)
1323 vlog("# Ret was:", self._ret, '(%s)' % self.name)
1324
1324
1325 def _run(self, env):
1325 def _run(self, env):
1326 # This should be implemented in child classes to run tests.
1326 # This should be implemented in child classes to run tests.
1327 raise unittest.SkipTest('unknown test type')
1327 raise unittest.SkipTest('unknown test type')
1328
1328
1329 def abort(self):
1329 def abort(self):
1330 """Terminate execution of this test."""
1330 """Terminate execution of this test."""
1331 self._aborted = True
1331 self._aborted = True
1332
1332
1333 def _portmap(self, i):
1333 def _portmap(self, i):
1334 offset = b'' if i == 0 else b'%d' % i
1334 offset = b'' if i == 0 else b'%d' % i
1335 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
1335 return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset)
1336
1336
1337 def _getreplacements(self):
1337 def _getreplacements(self):
1338 """Obtain a mapping of text replacements to apply to test output.
1338 """Obtain a mapping of text replacements to apply to test output.
1339
1339
1340 Test output needs to be normalized so it can be compared to expected
1340 Test output needs to be normalized so it can be compared to expected
1341 output. This function defines how some of that normalization will
1341 output. This function defines how some of that normalization will
1342 occur.
1342 occur.
1343 """
1343 """
1344 r = [
1344 r = [
1345 # This list should be parallel to defineport in _getenv
1345 # This list should be parallel to defineport in _getenv
1346 self._portmap(0),
1346 self._portmap(0),
1347 self._portmap(1),
1347 self._portmap(1),
1348 self._portmap(2),
1348 self._portmap(2),
1349 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
1349 (br'([^0-9])%s' % re.escape(self._localip()), br'\1$LOCALIP'),
1350 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
1350 (br'\bHG_TXNID=TXN:[a-f0-9]{40}\b', br'HG_TXNID=TXN:$ID$'),
1351 ]
1351 ]
1352 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
1352 r.append((self._escapepath(self._testtmp), b'$TESTTMP'))
1353 if WINDOWS:
1353 if WINDOWS:
1354 # JSON output escapes backslashes in Windows paths, so also catch a
1354 # JSON output escapes backslashes in Windows paths, so also catch a
1355 # double-escape.
1355 # double-escape.
1356 replaced = self._testtmp.replace(b'\\', br'\\')
1356 replaced = self._testtmp.replace(b'\\', br'\\')
1357 r.append((self._escapepath(replaced), b'$STR_REPR_TESTTMP'))
1357 r.append((self._escapepath(replaced), b'$STR_REPR_TESTTMP'))
1358
1358
1359 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1359 replacementfile = os.path.join(self._testdir, b'common-pattern.py')
1360
1360
1361 if os.path.exists(replacementfile):
1361 if os.path.exists(replacementfile):
1362 data = {}
1362 data = {}
1363 with open(replacementfile, mode='rb') as source:
1363 with open(replacementfile, mode='rb') as source:
1364 # the intermediate 'compile' step help with debugging
1364 # the intermediate 'compile' step help with debugging
1365 code = compile(source.read(), replacementfile, 'exec')
1365 code = compile(source.read(), replacementfile, 'exec')
1366 exec(code, data)
1366 exec(code, data)
1367 for value in data.get('substitutions', ()):
1367 for value in data.get('substitutions', ()):
1368 if len(value) != 2:
1368 if len(value) != 2:
1369 msg = 'malformatted substitution in %s: %r'
1369 msg = 'malformatted substitution in %s: %r'
1370 msg %= (replacementfile, value)
1370 msg %= (replacementfile, value)
1371 raise ValueError(msg)
1371 raise ValueError(msg)
1372 r.append(value)
1372 r.append(value)
1373 return r
1373 return r
1374
1374
1375 def _escapepath(self, p):
1375 def _escapepath(self, p):
1376 if WINDOWS:
1376 if WINDOWS:
1377 return b''.join(
1377 return b''.join(
1378 c.isalpha()
1378 c.isalpha()
1379 and b'[%s%s]' % (c.lower(), c.upper())
1379 and b'[%s%s]' % (c.lower(), c.upper())
1380 or c in b'/\\'
1380 or c in b'/\\'
1381 and br'[/\\]'
1381 and br'[/\\]'
1382 or c.isdigit()
1382 or c.isdigit()
1383 and c
1383 and c
1384 or b'\\' + c
1384 or b'\\' + c
1385 for c in [p[i : i + 1] for i in range(len(p))]
1385 for c in [p[i : i + 1] for i in range(len(p))]
1386 )
1386 )
1387 else:
1387 else:
1388 return re.escape(p)
1388 return re.escape(p)
1389
1389
1390 def _localip(self):
1390 def _localip(self):
1391 if self._useipv6:
1391 if self._useipv6:
1392 return b'::1'
1392 return b'::1'
1393 else:
1393 else:
1394 return b'127.0.0.1'
1394 return b'127.0.0.1'
1395
1395
1396 def _genrestoreenv(self, testenv):
1396 def _genrestoreenv(self, testenv):
1397 """Generate a script that can be used by tests to restore the original
1397 """Generate a script that can be used by tests to restore the original
1398 environment."""
1398 environment."""
1399 # Put the restoreenv script inside self._threadtmp
1399 # Put the restoreenv script inside self._threadtmp
1400 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1400 scriptpath = os.path.join(self._threadtmp, b'restoreenv.sh')
1401 testenv['HGTEST_RESTOREENV'] = _bytes2sys(scriptpath)
1401 testenv['HGTEST_RESTOREENV'] = _bytes2sys(scriptpath)
1402
1402
1403 # Only restore environment variable names that the shell allows
1403 # Only restore environment variable names that the shell allows
1404 # us to export.
1404 # us to export.
1405 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1405 name_regex = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$')
1406
1406
1407 # Do not restore these variables; otherwise tests would fail.
1407 # Do not restore these variables; otherwise tests would fail.
1408 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1408 reqnames = {'PYTHON', 'TESTDIR', 'TESTTMP'}
1409
1409
1410 with open(scriptpath, 'w') as envf:
1410 with open(scriptpath, 'w') as envf:
1411 for name, value in origenviron.items():
1411 for name, value in origenviron.items():
1412 if not name_regex.match(name):
1412 if not name_regex.match(name):
1413 # Skip environment variables with unusual names not
1413 # Skip environment variables with unusual names not
1414 # allowed by most shells.
1414 # allowed by most shells.
1415 continue
1415 continue
1416 if name in reqnames:
1416 if name in reqnames:
1417 continue
1417 continue
1418 envf.write('%s=%s\n' % (name, shellquote(value)))
1418 envf.write('%s=%s\n' % (name, shellquote(value)))
1419
1419
1420 for name in testenv:
1420 for name in testenv:
1421 if name in origenviron or name in reqnames:
1421 if name in origenviron or name in reqnames:
1422 continue
1422 continue
1423 envf.write('unset %s\n' % (name,))
1423 envf.write('unset %s\n' % (name,))
1424
1424
1425 def _getenv(self):
1425 def _getenv(self):
1426 """Obtain environment variables to use during test execution."""
1426 """Obtain environment variables to use during test execution."""
1427
1427
1428 def defineport(i):
1428 def defineport(i):
1429 offset = '' if i == 0 else '%s' % i
1429 offset = '' if i == 0 else '%s' % i
1430 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1430 env["HGPORT%s" % offset] = '%s' % (self._startport + i)
1431
1431
1432 env = os.environ.copy()
1432 env = os.environ.copy()
1433 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1433 env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
1434 env['HGEMITWARNINGS'] = '1'
1434 env['HGEMITWARNINGS'] = '1'
1435 env['TESTTMP'] = _bytes2sys(self._testtmp)
1435 env['TESTTMP'] = _bytes2sys(self._testtmp)
1436 uid_file = os.path.join(_bytes2sys(self._testtmp), 'UID')
1436 uid_file = os.path.join(_bytes2sys(self._testtmp), 'UID')
1437 env['HGTEST_UUIDFILE'] = uid_file
1437 env['HGTEST_UUIDFILE'] = uid_file
1438 env['TESTNAME'] = self.name
1438 env['TESTNAME'] = self.name
1439 env['HOME'] = _bytes2sys(self._testtmp)
1439 env['HOME'] = _bytes2sys(self._testtmp)
1440 if WINDOWS:
1440 if WINDOWS:
1441 env['REALUSERPROFILE'] = env['USERPROFILE']
1441 env['REALUSERPROFILE'] = env['USERPROFILE']
1442 # py3.8+ ignores HOME: https://bugs.python.org/issue36264
1442 # py3.8+ ignores HOME: https://bugs.python.org/issue36264
1443 env['USERPROFILE'] = env['HOME']
1443 env['USERPROFILE'] = env['HOME']
1444 formated_timeout = _bytes2sys(b"%d" % default_defaults['timeout'][1])
1444 formated_timeout = _bytes2sys(b"%d" % default_defaults['timeout'][1])
1445 env['HGTEST_TIMEOUT_DEFAULT'] = formated_timeout
1445 env['HGTEST_TIMEOUT_DEFAULT'] = formated_timeout
1446 env['HGTEST_TIMEOUT'] = _bytes2sys(b"%d" % self._timeout)
1446 env['HGTEST_TIMEOUT'] = _bytes2sys(b"%d" % self._timeout)
1447 # This number should match portneeded in _getport
1447 # This number should match portneeded in _getport
1448 for port in xrange(3):
1448 for port in xrange(3):
1449 # This list should be parallel to _portmap in _getreplacements
1449 # This list should be parallel to _portmap in _getreplacements
1450 defineport(port)
1450 defineport(port)
1451 env["HGRCPATH"] = _bytes2sys(os.path.join(self._threadtmp, b'.hgrc'))
1451 env["HGRCPATH"] = _bytes2sys(os.path.join(self._threadtmp, b'.hgrc'))
1452 env["DAEMON_PIDS"] = _bytes2sys(
1452 env["DAEMON_PIDS"] = _bytes2sys(
1453 os.path.join(self._threadtmp, b'daemon.pids')
1453 os.path.join(self._threadtmp, b'daemon.pids')
1454 )
1454 )
1455 env["HGEDITOR"] = (
1455 env["HGEDITOR"] = (
1456 '"' + sysexecutable + '"' + ' -c "import sys; sys.exit(0)"'
1456 '"' + sysexecutable + '"' + ' -c "import sys; sys.exit(0)"'
1457 )
1457 )
1458 env["HGUSER"] = "test"
1458 env["HGUSER"] = "test"
1459 env["HGENCODING"] = "ascii"
1459 env["HGENCODING"] = "ascii"
1460 env["HGENCODINGMODE"] = "strict"
1460 env["HGENCODINGMODE"] = "strict"
1461 env["HGHOSTNAME"] = "test-hostname"
1461 env["HGHOSTNAME"] = "test-hostname"
1462 env['HGIPV6'] = str(int(self._useipv6))
1462 env['HGIPV6'] = str(int(self._useipv6))
1463 # See contrib/catapipe.py for how to use this functionality.
1463 # See contrib/catapipe.py for how to use this functionality.
1464 if 'HGTESTCATAPULTSERVERPIPE' not in env:
1464 if 'HGTESTCATAPULTSERVERPIPE' not in env:
1465 # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
1465 # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
1466 # non-test one in as a default, otherwise set to devnull
1466 # non-test one in as a default, otherwise set to devnull
1467 env['HGTESTCATAPULTSERVERPIPE'] = env.get(
1467 env['HGTESTCATAPULTSERVERPIPE'] = env.get(
1468 'HGCATAPULTSERVERPIPE', os.devnull
1468 'HGCATAPULTSERVERPIPE', os.devnull
1469 )
1469 )
1470
1470
1471 extraextensions = []
1471 extraextensions = []
1472 for opt in self._extraconfigopts:
1472 for opt in self._extraconfigopts:
1473 section, key = opt.split('.', 1)
1473 section, key = opt.split('.', 1)
1474 if section != 'extensions':
1474 if section != 'extensions':
1475 continue
1475 continue
1476 name = key.split('=', 1)[0]
1476 name = key.split('=', 1)[0]
1477 extraextensions.append(name)
1477 extraextensions.append(name)
1478
1478
1479 if extraextensions:
1479 if extraextensions:
1480 env['HGTESTEXTRAEXTENSIONS'] = ' '.join(extraextensions)
1480 env['HGTESTEXTRAEXTENSIONS'] = ' '.join(extraextensions)
1481
1481
1482 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1482 # LOCALIP could be ::1 or 127.0.0.1. Useful for tests that require raw
1483 # IP addresses.
1483 # IP addresses.
1484 env['LOCALIP'] = _bytes2sys(self._localip())
1484 env['LOCALIP'] = _bytes2sys(self._localip())
1485
1485
1486 # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
1486 # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
1487 # but this is needed for testing python instances like dummyssh,
1487 # but this is needed for testing python instances like dummyssh,
1488 # dummysmtpd.py, and dumbhttp.py.
1488 # dummysmtpd.py, and dumbhttp.py.
1489 if PYTHON3 and WINDOWS:
1489 if PYTHON3 and WINDOWS:
1490 env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
1490 env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
1491
1491
1492 # Modified HOME in test environment can confuse Rust tools. So set
1492 # Modified HOME in test environment can confuse Rust tools. So set
1493 # CARGO_HOME and RUSTUP_HOME automatically if a Rust toolchain is
1493 # CARGO_HOME and RUSTUP_HOME automatically if a Rust toolchain is
1494 # present and these variables aren't already defined.
1494 # present and these variables aren't already defined.
1495 cargo_home_path = os.path.expanduser('~/.cargo')
1495 cargo_home_path = os.path.expanduser('~/.cargo')
1496 rustup_home_path = os.path.expanduser('~/.rustup')
1496 rustup_home_path = os.path.expanduser('~/.rustup')
1497
1497
1498 if os.path.exists(cargo_home_path) and b'CARGO_HOME' not in osenvironb:
1498 if os.path.exists(cargo_home_path) and b'CARGO_HOME' not in osenvironb:
1499 env['CARGO_HOME'] = cargo_home_path
1499 env['CARGO_HOME'] = cargo_home_path
1500 if (
1500 if (
1501 os.path.exists(rustup_home_path)
1501 os.path.exists(rustup_home_path)
1502 and b'RUSTUP_HOME' not in osenvironb
1502 and b'RUSTUP_HOME' not in osenvironb
1503 ):
1503 ):
1504 env['RUSTUP_HOME'] = rustup_home_path
1504 env['RUSTUP_HOME'] = rustup_home_path
1505
1505
1506 # Reset some environment variables to well-known values so that
1506 # Reset some environment variables to well-known values so that
1507 # the tests produce repeatable output.
1507 # the tests produce repeatable output.
1508 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1508 env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
1509 env['TZ'] = 'GMT'
1509 env['TZ'] = 'GMT'
1510 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1510 env["EMAIL"] = "Foo Bar <foo.bar@example.com>"
1511 env['COLUMNS'] = '80'
1511 env['COLUMNS'] = '80'
1512 env['TERM'] = 'xterm'
1512 env['TERM'] = 'xterm'
1513
1513
1514 dropped = [
1514 dropped = [
1515 'CDPATH',
1515 'CDPATH',
1516 'CHGDEBUG',
1516 'CHGDEBUG',
1517 'EDITOR',
1517 'EDITOR',
1518 'GREP_OPTIONS',
1518 'GREP_OPTIONS',
1519 'HG',
1519 'HG',
1520 'HGMERGE',
1520 'HGMERGE',
1521 'HGPLAIN',
1521 'HGPLAIN',
1522 'HGPLAINEXCEPT',
1522 'HGPLAINEXCEPT',
1523 'HGPROF',
1523 'HGPROF',
1524 'http_proxy',
1524 'http_proxy',
1525 'no_proxy',
1525 'no_proxy',
1526 'NO_PROXY',
1526 'NO_PROXY',
1527 'PAGER',
1527 'PAGER',
1528 'VISUAL',
1528 'VISUAL',
1529 ]
1529 ]
1530
1530
1531 for k in dropped:
1531 for k in dropped:
1532 if k in env:
1532 if k in env:
1533 del env[k]
1533 del env[k]
1534
1534
1535 # unset env related to hooks
1535 # unset env related to hooks
1536 for k in list(env):
1536 for k in list(env):
1537 if k.startswith('HG_'):
1537 if k.startswith('HG_'):
1538 del env[k]
1538 del env[k]
1539
1539
1540 if self._usechg:
1540 if self._usechg:
1541 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1541 env['CHGSOCKNAME'] = os.path.join(self._chgsockdir, b'server')
1542 if self._chgdebug:
1542 if self._chgdebug:
1543 env['CHGDEBUG'] = 'true'
1543 env['CHGDEBUG'] = 'true'
1544
1544
1545 return env
1545 return env
1546
1546
1547 def _createhgrc(self, path):
1547 def _createhgrc(self, path):
1548 """Create an hgrc file for this test."""
1548 """Create an hgrc file for this test."""
1549 with open(path, 'wb') as hgrc:
1549 with open(path, 'wb') as hgrc:
1550 hgrc.write(b'[ui]\n')
1550 hgrc.write(b'[ui]\n')
1551 hgrc.write(b'slash = True\n')
1551 hgrc.write(b'slash = True\n')
1552 hgrc.write(b'interactive = False\n')
1552 hgrc.write(b'interactive = False\n')
1553 hgrc.write(b'detailed-exit-code = True\n')
1553 hgrc.write(b'detailed-exit-code = True\n')
1554 hgrc.write(b'merge = internal:merge\n')
1554 hgrc.write(b'merge = internal:merge\n')
1555 hgrc.write(b'mergemarkers = detailed\n')
1555 hgrc.write(b'mergemarkers = detailed\n')
1556 hgrc.write(b'promptecho = True\n')
1556 hgrc.write(b'promptecho = True\n')
1557 dummyssh = os.path.join(self._testdir, b'dummyssh')
1557 dummyssh = os.path.join(self._testdir, b'dummyssh')
1558 hgrc.write(b'ssh = "%s" "%s"\n' % (PYTHON, dummyssh))
1558 hgrc.write(b'ssh = "%s" "%s"\n' % (PYTHON, dummyssh))
1559 hgrc.write(b'timeout.warn=15\n')
1559 hgrc.write(b'timeout.warn=15\n')
1560 hgrc.write(b'[chgserver]\n')
1560 hgrc.write(b'[chgserver]\n')
1561 hgrc.write(b'idletimeout=60\n')
1561 hgrc.write(b'idletimeout=60\n')
1562 hgrc.write(b'[defaults]\n')
1562 hgrc.write(b'[defaults]\n')
1563 hgrc.write(b'[devel]\n')
1563 hgrc.write(b'[devel]\n')
1564 hgrc.write(b'all-warnings = true\n')
1564 hgrc.write(b'all-warnings = true\n')
1565 hgrc.write(b'default-date = 0 0\n')
1565 hgrc.write(b'default-date = 0 0\n')
1566 hgrc.write(b'[largefiles]\n')
1566 hgrc.write(b'[largefiles]\n')
1567 hgrc.write(
1567 hgrc.write(
1568 b'usercache = %s\n'
1568 b'usercache = %s\n'
1569 % (os.path.join(self._testtmp, b'.cache/largefiles'))
1569 % (os.path.join(self._testtmp, b'.cache/largefiles'))
1570 )
1570 )
1571 hgrc.write(b'[lfs]\n')
1571 hgrc.write(b'[lfs]\n')
1572 hgrc.write(
1572 hgrc.write(
1573 b'usercache = %s\n'
1573 b'usercache = %s\n'
1574 % (os.path.join(self._testtmp, b'.cache/lfs'))
1574 % (os.path.join(self._testtmp, b'.cache/lfs'))
1575 )
1575 )
1576 hgrc.write(b'[web]\n')
1576 hgrc.write(b'[web]\n')
1577 hgrc.write(b'address = localhost\n')
1577 hgrc.write(b'address = localhost\n')
1578 hgrc.write(b'ipv6 = %r\n' % self._useipv6)
1578 hgrc.write(b'ipv6 = %r\n' % self._useipv6)
1579 hgrc.write(b'server-header = testing stub value\n')
1579 hgrc.write(b'server-header = testing stub value\n')
1580
1580
1581 for opt in self._extraconfigopts:
1581 for opt in self._extraconfigopts:
1582 section, key = _sys2bytes(opt).split(b'.', 1)
1582 section, key = _sys2bytes(opt).split(b'.', 1)
1583 assert b'=' in key, (
1583 assert b'=' in key, (
1584 'extra config opt %s must ' 'have an = for assignment' % opt
1584 'extra config opt %s must ' 'have an = for assignment' % opt
1585 )
1585 )
1586 hgrc.write(b'[%s]\n%s\n' % (section, key))
1586 hgrc.write(b'[%s]\n%s\n' % (section, key))
1587
1587
1588 def fail(self, msg):
1588 def fail(self, msg):
1589 # unittest differentiates between errored and failed.
1589 # unittest differentiates between errored and failed.
1590 # Failed is denoted by AssertionError (by default at least).
1590 # Failed is denoted by AssertionError (by default at least).
1591 raise AssertionError(msg)
1591 raise AssertionError(msg)
1592
1592
1593 def _runcommand(self, cmd, env, normalizenewlines=False):
1593 def _runcommand(self, cmd, env, normalizenewlines=False):
1594 """Run command in a sub-process, capturing the output (stdout and
1594 """Run command in a sub-process, capturing the output (stdout and
1595 stderr).
1595 stderr).
1596
1596
1597 Return a tuple (exitcode, output). output is None in debug mode.
1597 Return a tuple (exitcode, output). output is None in debug mode.
1598 """
1598 """
1599 if self._debug:
1599 if self._debug:
1600 proc = subprocess.Popen(
1600 proc = subprocess.Popen(
1601 _bytes2sys(cmd),
1601 _bytes2sys(cmd),
1602 shell=True,
1602 shell=True,
1603 close_fds=closefds,
1603 close_fds=closefds,
1604 cwd=_bytes2sys(self._testtmp),
1604 cwd=_bytes2sys(self._testtmp),
1605 env=env,
1605 env=env,
1606 )
1606 )
1607 ret = proc.wait()
1607 ret = proc.wait()
1608 return (ret, None)
1608 return (ret, None)
1609
1609
1610 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1610 proc = Popen4(cmd, self._testtmp, self._timeout, env)
1611
1611
1612 def cleanup():
1612 def cleanup():
1613 terminate(proc)
1613 terminate(proc)
1614 ret = proc.wait()
1614 ret = proc.wait()
1615 if ret == 0:
1615 if ret == 0:
1616 ret = signal.SIGTERM << 8
1616 ret = signal.SIGTERM << 8
1617 killdaemons(env['DAEMON_PIDS'])
1617 killdaemons(env['DAEMON_PIDS'])
1618 return ret
1618 return ret
1619
1619
1620 proc.tochild.close()
1620 proc.tochild.close()
1621
1621
1622 try:
1622 try:
1623 output = proc.fromchild.read()
1623 output = proc.fromchild.read()
1624 except KeyboardInterrupt:
1624 except KeyboardInterrupt:
1625 vlog('# Handling keyboard interrupt')
1625 vlog('# Handling keyboard interrupt')
1626 cleanup()
1626 cleanup()
1627 raise
1627 raise
1628
1628
1629 ret = proc.wait()
1629 ret = proc.wait()
1630 if wifexited(ret):
1630 if wifexited(ret):
1631 ret = os.WEXITSTATUS(ret)
1631 ret = os.WEXITSTATUS(ret)
1632
1632
1633 if proc.timeout:
1633 if proc.timeout:
1634 ret = 'timeout'
1634 ret = 'timeout'
1635
1635
1636 if ret:
1636 if ret:
1637 killdaemons(env['DAEMON_PIDS'])
1637 killdaemons(env['DAEMON_PIDS'])
1638
1638
1639 for s, r in self._getreplacements():
1639 for s, r in self._getreplacements():
1640 output = re.sub(s, r, output)
1640 output = re.sub(s, r, output)
1641
1641
1642 if normalizenewlines:
1642 if normalizenewlines:
1643 output = output.replace(b'\r\n', b'\n')
1643 output = output.replace(b'\r\n', b'\n')
1644
1644
1645 return ret, output.splitlines(True)
1645 return ret, output.splitlines(True)
1646
1646
1647
1647
1648 class PythonTest(Test):
1648 class PythonTest(Test):
1649 """A Python-based test."""
1649 """A Python-based test."""
1650
1650
1651 @property
1651 @property
1652 def refpath(self):
1652 def refpath(self):
1653 return os.path.join(self._testdir, b'%s.out' % self.bname)
1653 return os.path.join(self._testdir, b'%s.out' % self.bname)
1654
1654
1655 def _run(self, env):
1655 def _run(self, env):
1656 # Quote the python(3) executable for Windows
1656 # Quote the python(3) executable for Windows
1657 cmd = b'"%s" "%s"' % (PYTHON, self.path)
1657 cmd = b'"%s" "%s"' % (PYTHON, self.path)
1658 vlog("# Running", cmd.decode("utf-8"))
1658 vlog("# Running", cmd.decode("utf-8"))
1659 result = self._runcommand(cmd, env, normalizenewlines=WINDOWS)
1659 result = self._runcommand(cmd, env, normalizenewlines=WINDOWS)
1660 if self._aborted:
1660 if self._aborted:
1661 raise KeyboardInterrupt()
1661 raise KeyboardInterrupt()
1662
1662
1663 return result
1663 return result
1664
1664
1665
1665
1666 # Some glob patterns apply only in some circumstances, so the script
1666 # Some glob patterns apply only in some circumstances, so the script
1667 # might want to remove (glob) annotations that otherwise should be
1667 # might want to remove (glob) annotations that otherwise should be
1668 # retained.
1668 # retained.
1669 checkcodeglobpats = [
1669 checkcodeglobpats = [
1670 # On Windows it looks like \ doesn't require a (glob), but we know
1670 # On Windows it looks like \ doesn't require a (glob), but we know
1671 # better.
1671 # better.
1672 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1672 re.compile(br'^pushing to \$TESTTMP/.*[^)]$'),
1673 re.compile(br'^moving \S+/.*[^)]$'),
1673 re.compile(br'^moving \S+/.*[^)]$'),
1674 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1674 re.compile(br'^pulling from \$TESTTMP/.*[^)]$'),
1675 # Not all platforms have 127.0.0.1 as loopback (though most do),
1675 # Not all platforms have 127.0.0.1 as loopback (though most do),
1676 # so we always glob that too.
1676 # so we always glob that too.
1677 re.compile(br'.*\$LOCALIP.*$'),
1677 re.compile(br'.*\$LOCALIP.*$'),
1678 ]
1678 ]
1679
1679
1680 bchr = chr
1680 bchr = chr
1681 if PYTHON3:
1681 if PYTHON3:
1682 bchr = lambda x: bytes([x])
1682 bchr = lambda x: bytes([x])
1683
1683
1684 WARN_UNDEFINED = 1
1684 WARN_UNDEFINED = 1
1685 WARN_YES = 2
1685 WARN_YES = 2
1686 WARN_NO = 3
1686 WARN_NO = 3
1687
1687
1688 MARK_OPTIONAL = b" (?)\n"
1688 MARK_OPTIONAL = b" (?)\n"
1689
1689
1690
1690
1691 def isoptional(line):
1691 def isoptional(line):
1692 return line.endswith(MARK_OPTIONAL)
1692 return line.endswith(MARK_OPTIONAL)
1693
1693
1694
1694
1695 class TTest(Test):
1695 class TTest(Test):
1696 """A "t test" is a test backed by a .t file."""
1696 """A "t test" is a test backed by a .t file."""
1697
1697
1698 SKIPPED_PREFIX = b'skipped: '
1698 SKIPPED_PREFIX = b'skipped: '
1699 FAILED_PREFIX = b'hghave check failed: '
1699 FAILED_PREFIX = b'hghave check failed: '
1700 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1700 NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search
1701
1701
1702 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1702 ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub
1703 ESCAPEMAP = {bchr(i): br'\x%02x' % i for i in range(256)}
1703 ESCAPEMAP = {bchr(i): br'\x%02x' % i for i in range(256)}
1704 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1704 ESCAPEMAP.update({b'\\': b'\\\\', b'\r': br'\r'})
1705
1705
1706 def __init__(self, path, *args, **kwds):
1706 def __init__(self, path, *args, **kwds):
1707 # accept an extra "case" parameter
1707 # accept an extra "case" parameter
1708 case = kwds.pop('case', [])
1708 case = kwds.pop('case', [])
1709 self._case = case
1709 self._case = case
1710 self._allcases = {x for y in parsettestcases(path) for x in y}
1710 self._allcases = {x for y in parsettestcases(path) for x in y}
1711 super(TTest, self).__init__(path, *args, **kwds)
1711 super(TTest, self).__init__(path, *args, **kwds)
1712 if case:
1712 if case:
1713 casepath = b'#'.join(case)
1713 casepath = b'#'.join(case)
1714 self.name = '%s#%s' % (self.name, _bytes2sys(casepath))
1714 self.name = '%s#%s' % (self.name, _bytes2sys(casepath))
1715 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1715 self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
1716 self._tmpname += b'-%s' % casepath.replace(b'#', b'-')
1716 self._tmpname += b'-%s' % casepath.replace(b'#', b'-')
1717 self._have = {}
1717 self._have = {}
1718
1718
1719 @property
1719 @property
1720 def refpath(self):
1720 def refpath(self):
1721 return os.path.join(self._testdir, self.bname)
1721 return os.path.join(self._testdir, self.bname)
1722
1722
1723 def _run(self, env):
1723 def _run(self, env):
1724 with open(self.path, 'rb') as f:
1724 with open(self.path, 'rb') as f:
1725 lines = f.readlines()
1725 lines = f.readlines()
1726
1726
1727 # .t file is both reference output and the test input, keep reference
1727 # .t file is both reference output and the test input, keep reference
1728 # output updated with the the test input. This avoids some race
1728 # output updated with the the test input. This avoids some race
1729 # conditions where the reference output does not match the actual test.
1729 # conditions where the reference output does not match the actual test.
1730 if self._refout is not None:
1730 if self._refout is not None:
1731 self._refout = lines
1731 self._refout = lines
1732
1732
1733 salt, script, after, expected = self._parsetest(lines)
1733 salt, script, after, expected = self._parsetest(lines)
1734
1734
1735 # Write out the generated script.
1735 # Write out the generated script.
1736 fname = b'%s.sh' % self._testtmp
1736 fname = b'%s.sh' % self._testtmp
1737 with open(fname, 'wb') as f:
1737 with open(fname, 'wb') as f:
1738 for l in script:
1738 for l in script:
1739 f.write(l)
1739 f.write(l)
1740
1740
1741 cmd = b'%s "%s"' % (self._shell, fname)
1741 cmd = b'%s "%s"' % (self._shell, fname)
1742 vlog("# Running", cmd.decode("utf-8"))
1742 vlog("# Running", cmd.decode("utf-8"))
1743
1743
1744 exitcode, output = self._runcommand(cmd, env)
1744 exitcode, output = self._runcommand(cmd, env)
1745
1745
1746 if self._aborted:
1746 if self._aborted:
1747 raise KeyboardInterrupt()
1747 raise KeyboardInterrupt()
1748
1748
1749 # Do not merge output if skipped. Return hghave message instead.
1749 # Do not merge output if skipped. Return hghave message instead.
1750 # Similarly, with --debug, output is None.
1750 # Similarly, with --debug, output is None.
1751 if exitcode == self.SKIPPED_STATUS or output is None:
1751 if exitcode == self.SKIPPED_STATUS or output is None:
1752 return exitcode, output
1752 return exitcode, output
1753
1753
1754 return self._processoutput(exitcode, output, salt, after, expected)
1754 return self._processoutput(exitcode, output, salt, after, expected)
1755
1755
1756 def _hghave(self, reqs):
1756 def _hghave(self, reqs):
1757 allreqs = b' '.join(reqs)
1757 allreqs = b' '.join(reqs)
1758
1758
1759 self._detectslow(reqs)
1759 self._detectslow(reqs)
1760
1760
1761 if allreqs in self._have:
1761 if allreqs in self._have:
1762 return self._have.get(allreqs)
1762 return self._have.get(allreqs)
1763
1763
1764 # TODO do something smarter when all other uses of hghave are gone.
1764 # TODO do something smarter when all other uses of hghave are gone.
1765 runtestdir = osenvironb[b'RUNTESTDIR']
1765 runtestdir = osenvironb[b'RUNTESTDIR']
1766 tdir = runtestdir.replace(b'\\', b'/')
1766 tdir = runtestdir.replace(b'\\', b'/')
1767 proc = Popen4(
1767 proc = Popen4(
1768 b'%s -c "%s/hghave %s"' % (self._shell, tdir, allreqs),
1768 b'%s -c "%s/hghave %s"' % (self._shell, tdir, allreqs),
1769 self._testtmp,
1769 self._testtmp,
1770 0,
1770 0,
1771 self._getenv(),
1771 self._getenv(),
1772 )
1772 )
1773 stdout, stderr = proc.communicate()
1773 stdout, stderr = proc.communicate()
1774 ret = proc.wait()
1774 ret = proc.wait()
1775 if wifexited(ret):
1775 if wifexited(ret):
1776 ret = os.WEXITSTATUS(ret)
1776 ret = os.WEXITSTATUS(ret)
1777 if ret == 2:
1777 if ret == 2:
1778 print(stdout.decode('utf-8'))
1778 print(stdout.decode('utf-8'))
1779 sys.exit(1)
1779 sys.exit(1)
1780
1780
1781 if ret != 0:
1781 if ret != 0:
1782 self._have[allreqs] = (False, stdout)
1782 self._have[allreqs] = (False, stdout)
1783 return False, stdout
1783 return False, stdout
1784
1784
1785 self._have[allreqs] = (True, None)
1785 self._have[allreqs] = (True, None)
1786 return True, None
1786 return True, None
1787
1787
1788 def _detectslow(self, reqs):
1788 def _detectslow(self, reqs):
1789 """update the timeout of slow test when appropriate"""
1789 """update the timeout of slow test when appropriate"""
1790 if b'slow' in reqs:
1790 if b'slow' in reqs:
1791 self._timeout = self._slowtimeout
1791 self._timeout = self._slowtimeout
1792
1792
1793 def _iftest(self, args):
1793 def _iftest(self, args):
1794 # implements "#if"
1794 # implements "#if"
1795 reqs = []
1795 reqs = []
1796 for arg in args:
1796 for arg in args:
1797 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1797 if arg.startswith(b'no-') and arg[3:] in self._allcases:
1798 if arg[3:] in self._case:
1798 if arg[3:] in self._case:
1799 return False
1799 return False
1800 elif arg in self._allcases:
1800 elif arg in self._allcases:
1801 if arg not in self._case:
1801 if arg not in self._case:
1802 return False
1802 return False
1803 else:
1803 else:
1804 reqs.append(arg)
1804 reqs.append(arg)
1805 self._detectslow(reqs)
1805 self._detectslow(reqs)
1806 return self._hghave(reqs)[0]
1806 return self._hghave(reqs)[0]
1807
1807
1808 def _parsetest(self, lines):
1808 def _parsetest(self, lines):
1809 # We generate a shell script which outputs unique markers to line
1809 # We generate a shell script which outputs unique markers to line
1810 # up script results with our source. These markers include input
1810 # up script results with our source. These markers include input
1811 # line number and the last return code.
1811 # line number and the last return code.
1812 salt = b"SALT%d" % time.time()
1812 salt = b"SALT%d" % time.time()
1813
1813
1814 def addsalt(line, inpython):
1814 def addsalt(line, inpython):
1815 if inpython:
1815 if inpython:
1816 script.append(b'%s %d 0\n' % (salt, line))
1816 script.append(b'%s %d 0\n' % (salt, line))
1817 else:
1817 else:
1818 script.append(b'echo %s %d $?\n' % (salt, line))
1818 script.append(b'echo %s %d $?\n' % (salt, line))
1819
1819
1820 activetrace = []
1820 activetrace = []
1821 session = str(uuid.uuid4())
1821 session = str(uuid.uuid4())
1822 if PYTHON3:
1822 if PYTHON3:
1823 session = session.encode('ascii')
1823 session = session.encode('ascii')
1824 hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE') or os.getenv(
1824 hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE') or os.getenv(
1825 'HGCATAPULTSERVERPIPE'
1825 'HGCATAPULTSERVERPIPE'
1826 )
1826 )
1827
1827
1828 def toggletrace(cmd=None):
1828 def toggletrace(cmd=None):
1829 if not hgcatapult or hgcatapult == os.devnull:
1829 if not hgcatapult or hgcatapult == os.devnull:
1830 return
1830 return
1831
1831
1832 if activetrace:
1832 if activetrace:
1833 script.append(
1833 script.append(
1834 b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1834 b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1835 % (session, activetrace[0])
1835 % (session, activetrace[0])
1836 )
1836 )
1837 if cmd is None:
1837 if cmd is None:
1838 return
1838 return
1839
1839
1840 if isinstance(cmd, str):
1840 if isinstance(cmd, str):
1841 quoted = shellquote(cmd.strip())
1841 quoted = shellquote(cmd.strip())
1842 else:
1842 else:
1843 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1843 quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
1844 quoted = quoted.replace(b'\\', b'\\\\')
1844 quoted = quoted.replace(b'\\', b'\\\\')
1845 script.append(
1845 script.append(
1846 b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1846 b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n'
1847 % (session, quoted)
1847 % (session, quoted)
1848 )
1848 )
1849 activetrace[0:] = [quoted]
1849 activetrace[0:] = [quoted]
1850
1850
1851 script = []
1851 script = []
1852
1852
1853 # After we run the shell script, we re-unify the script output
1853 # After we run the shell script, we re-unify the script output
1854 # with non-active parts of the source, with synchronization by our
1854 # with non-active parts of the source, with synchronization by our
1855 # SALT line number markers. The after table contains the non-active
1855 # SALT line number markers. The after table contains the non-active
1856 # components, ordered by line number.
1856 # components, ordered by line number.
1857 after = {}
1857 after = {}
1858
1858
1859 # Expected shell script output.
1859 # Expected shell script output.
1860 expected = {}
1860 expected = {}
1861
1861
1862 pos = prepos = -1
1862 pos = prepos = -1
1863
1863
1864 # True or False when in a true or false conditional section
1864 # True or False when in a true or false conditional section
1865 skipping = None
1865 skipping = None
1866
1866
1867 # We keep track of whether or not we're in a Python block so we
1867 # We keep track of whether or not we're in a Python block so we
1868 # can generate the surrounding doctest magic.
1868 # can generate the surrounding doctest magic.
1869 inpython = False
1869 inpython = False
1870
1870
1871 if self._debug:
1871 if self._debug:
1872 script.append(b'set -x\n')
1872 script.append(b'set -x\n')
1873 if os.getenv('MSYSTEM'):
1873 if os.getenv('MSYSTEM'):
1874 script.append(b'alias pwd="pwd -W"\n')
1874 script.append(b'alias pwd="pwd -W"\n')
1875
1875
1876 if hgcatapult and hgcatapult != os.devnull:
1876 if hgcatapult and hgcatapult != os.devnull:
1877 if PYTHON3:
1877 if PYTHON3:
1878 hgcatapult = hgcatapult.encode('utf8')
1878 hgcatapult = hgcatapult.encode('utf8')
1879 cataname = self.name.encode('utf8')
1879 cataname = self.name.encode('utf8')
1880 else:
1880 else:
1881 cataname = self.name
1881 cataname = self.name
1882
1882
1883 # Kludge: use a while loop to keep the pipe from getting
1883 # Kludge: use a while loop to keep the pipe from getting
1884 # closed by our echo commands. The still-running file gets
1884 # closed by our echo commands. The still-running file gets
1885 # reaped at the end of the script, which causes the while
1885 # reaped at the end of the script, which causes the while
1886 # loop to exit and closes the pipe. Sigh.
1886 # loop to exit and closes the pipe. Sigh.
1887 script.append(
1887 script.append(
1888 b'rtendtracing() {\n'
1888 b'rtendtracing() {\n'
1889 b' echo END %(session)s %(name)s >> %(catapult)s\n'
1889 b' echo END %(session)s %(name)s >> %(catapult)s\n'
1890 b' rm -f "$TESTTMP/.still-running"\n'
1890 b' rm -f "$TESTTMP/.still-running"\n'
1891 b'}\n'
1891 b'}\n'
1892 b'trap "rtendtracing" 0\n'
1892 b'trap "rtendtracing" 0\n'
1893 b'touch "$TESTTMP/.still-running"\n'
1893 b'touch "$TESTTMP/.still-running"\n'
1894 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1894 b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
1895 b'> %(catapult)s &\n'
1895 b'> %(catapult)s &\n'
1896 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1896 b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
1897 b'echo START %(session)s %(name)s >> %(catapult)s\n'
1897 b'echo START %(session)s %(name)s >> %(catapult)s\n'
1898 % {
1898 % {
1899 b'name': cataname,
1899 b'name': cataname,
1900 b'session': session,
1900 b'session': session,
1901 b'catapult': hgcatapult,
1901 b'catapult': hgcatapult,
1902 }
1902 }
1903 )
1903 )
1904
1904
1905 if self._case:
1905 if self._case:
1906 casestr = b'#'.join(self._case)
1906 casestr = b'#'.join(self._case)
1907 if isinstance(casestr, str):
1907 if isinstance(casestr, str):
1908 quoted = shellquote(casestr)
1908 quoted = shellquote(casestr)
1909 else:
1909 else:
1910 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1910 quoted = shellquote(casestr.decode('utf8')).encode('utf8')
1911 script.append(b'TESTCASE=%s\n' % quoted)
1911 script.append(b'TESTCASE=%s\n' % quoted)
1912 script.append(b'export TESTCASE\n')
1912 script.append(b'export TESTCASE\n')
1913
1913
1914 n = 0
1914 n = 0
1915 for n, l in enumerate(lines):
1915 for n, l in enumerate(lines):
1916 if not l.endswith(b'\n'):
1916 if not l.endswith(b'\n'):
1917 l += b'\n'
1917 l += b'\n'
1918 if l.startswith(b'#require'):
1918 if l.startswith(b'#require'):
1919 lsplit = l.split()
1919 lsplit = l.split()
1920 if len(lsplit) < 2 or lsplit[0] != b'#require':
1920 if len(lsplit) < 2 or lsplit[0] != b'#require':
1921 after.setdefault(pos, []).append(
1921 after.setdefault(pos, []).append(
1922 b' !!! invalid #require\n'
1922 b' !!! invalid #require\n'
1923 )
1923 )
1924 if not skipping:
1924 if not skipping:
1925 haveresult, message = self._hghave(lsplit[1:])
1925 haveresult, message = self._hghave(lsplit[1:])
1926 if not haveresult:
1926 if not haveresult:
1927 script = [b'echo "%s"\nexit 80\n' % message]
1927 script = [b'echo "%s"\nexit 80\n' % message]
1928 break
1928 break
1929 after.setdefault(pos, []).append(l)
1929 after.setdefault(pos, []).append(l)
1930 elif l.startswith(b'#if'):
1930 elif l.startswith(b'#if'):
1931 lsplit = l.split()
1931 lsplit = l.split()
1932 if len(lsplit) < 2 or lsplit[0] != b'#if':
1932 if len(lsplit) < 2 or lsplit[0] != b'#if':
1933 after.setdefault(pos, []).append(b' !!! invalid #if\n')
1933 after.setdefault(pos, []).append(b' !!! invalid #if\n')
1934 if skipping is not None:
1934 if skipping is not None:
1935 after.setdefault(pos, []).append(b' !!! nested #if\n')
1935 after.setdefault(pos, []).append(b' !!! nested #if\n')
1936 skipping = not self._iftest(lsplit[1:])
1936 skipping = not self._iftest(lsplit[1:])
1937 after.setdefault(pos, []).append(l)
1937 after.setdefault(pos, []).append(l)
1938 elif l.startswith(b'#else'):
1938 elif l.startswith(b'#else'):
1939 if skipping is None:
1939 if skipping is None:
1940 after.setdefault(pos, []).append(b' !!! missing #if\n')
1940 after.setdefault(pos, []).append(b' !!! missing #if\n')
1941 skipping = not skipping
1941 skipping = not skipping
1942 after.setdefault(pos, []).append(l)
1942 after.setdefault(pos, []).append(l)
1943 elif l.startswith(b'#endif'):
1943 elif l.startswith(b'#endif'):
1944 if skipping is None:
1944 if skipping is None:
1945 after.setdefault(pos, []).append(b' !!! missing #if\n')
1945 after.setdefault(pos, []).append(b' !!! missing #if\n')
1946 skipping = None
1946 skipping = None
1947 after.setdefault(pos, []).append(l)
1947 after.setdefault(pos, []).append(l)
1948 elif skipping:
1948 elif skipping:
1949 after.setdefault(pos, []).append(l)
1949 after.setdefault(pos, []).append(l)
1950 elif l.startswith(b' >>> '): # python inlines
1950 elif l.startswith(b' >>> '): # python inlines
1951 after.setdefault(pos, []).append(l)
1951 after.setdefault(pos, []).append(l)
1952 prepos = pos
1952 prepos = pos
1953 pos = n
1953 pos = n
1954 if not inpython:
1954 if not inpython:
1955 # We've just entered a Python block. Add the header.
1955 # We've just entered a Python block. Add the header.
1956 inpython = True
1956 inpython = True
1957 addsalt(prepos, False) # Make sure we report the exit code.
1957 addsalt(prepos, False) # Make sure we report the exit code.
1958 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1958 script.append(b'"%s" -m heredoctest <<EOF\n' % PYTHON)
1959 addsalt(n, True)
1959 addsalt(n, True)
1960 script.append(l[2:])
1960 script.append(l[2:])
1961 elif l.startswith(b' ... '): # python inlines
1961 elif l.startswith(b' ... '): # python inlines
1962 after.setdefault(prepos, []).append(l)
1962 after.setdefault(prepos, []).append(l)
1963 script.append(l[2:])
1963 script.append(l[2:])
1964 elif l.startswith(b' $ '): # commands
1964 elif l.startswith(b' $ '): # commands
1965 if inpython:
1965 if inpython:
1966 script.append(b'EOF\n')
1966 script.append(b'EOF\n')
1967 inpython = False
1967 inpython = False
1968 after.setdefault(pos, []).append(l)
1968 after.setdefault(pos, []).append(l)
1969 prepos = pos
1969 prepos = pos
1970 pos = n
1970 pos = n
1971 addsalt(n, False)
1971 addsalt(n, False)
1972 rawcmd = l[4:]
1972 rawcmd = l[4:]
1973 cmd = rawcmd.split()
1973 cmd = rawcmd.split()
1974 toggletrace(rawcmd)
1974 toggletrace(rawcmd)
1975 if len(cmd) == 2 and cmd[0] == b'cd':
1975 if len(cmd) == 2 and cmd[0] == b'cd':
1976 rawcmd = b'cd %s || exit 1\n' % cmd[1]
1976 rawcmd = b'cd %s || exit 1\n' % cmd[1]
1977 script.append(rawcmd)
1977 script.append(rawcmd)
1978 elif l.startswith(b' > '): # continuations
1978 elif l.startswith(b' > '): # continuations
1979 after.setdefault(prepos, []).append(l)
1979 after.setdefault(prepos, []).append(l)
1980 script.append(l[4:])
1980 script.append(l[4:])
1981 elif l.startswith(b' '): # results
1981 elif l.startswith(b' '): # results
1982 # Queue up a list of expected results.
1982 # Queue up a list of expected results.
1983 expected.setdefault(pos, []).append(l[2:])
1983 expected.setdefault(pos, []).append(l[2:])
1984 else:
1984 else:
1985 if inpython:
1985 if inpython:
1986 script.append(b'EOF\n')
1986 script.append(b'EOF\n')
1987 inpython = False
1987 inpython = False
1988 # Non-command/result. Queue up for merged output.
1988 # Non-command/result. Queue up for merged output.
1989 after.setdefault(pos, []).append(l)
1989 after.setdefault(pos, []).append(l)
1990
1990
1991 if inpython:
1991 if inpython:
1992 script.append(b'EOF\n')
1992 script.append(b'EOF\n')
1993 if skipping is not None:
1993 if skipping is not None:
1994 after.setdefault(pos, []).append(b' !!! missing #endif\n')
1994 after.setdefault(pos, []).append(b' !!! missing #endif\n')
1995 addsalt(n + 1, False)
1995 addsalt(n + 1, False)
1996 # Need to end any current per-command trace
1996 # Need to end any current per-command trace
1997 if activetrace:
1997 if activetrace:
1998 toggletrace()
1998 toggletrace()
1999 return salt, script, after, expected
1999 return salt, script, after, expected
2000
2000
2001 def _processoutput(self, exitcode, output, salt, after, expected):
2001 def _processoutput(self, exitcode, output, salt, after, expected):
2002 # Merge the script output back into a unified test.
2002 # Merge the script output back into a unified test.
2003 warnonly = WARN_UNDEFINED # 1: not yet; 2: yes; 3: for sure not
2003 warnonly = WARN_UNDEFINED # 1: not yet; 2: yes; 3: for sure not
2004 if exitcode != 0:
2004 if exitcode != 0:
2005 warnonly = WARN_NO
2005 warnonly = WARN_NO
2006
2006
2007 pos = -1
2007 pos = -1
2008 postout = []
2008 postout = []
2009 for out_rawline in output:
2009 for out_rawline in output:
2010 out_line, cmd_line = out_rawline, None
2010 out_line, cmd_line = out_rawline, None
2011 if salt in out_rawline:
2011 if salt in out_rawline:
2012 out_line, cmd_line = out_rawline.split(salt, 1)
2012 out_line, cmd_line = out_rawline.split(salt, 1)
2013
2013
2014 pos, postout, warnonly = self._process_out_line(
2014 pos, postout, warnonly = self._process_out_line(
2015 out_line, pos, postout, expected, warnonly
2015 out_line, pos, postout, expected, warnonly
2016 )
2016 )
2017 pos, postout = self._process_cmd_line(cmd_line, pos, postout, after)
2017 pos, postout = self._process_cmd_line(cmd_line, pos, postout, after)
2018
2018
2019 if pos in after:
2019 if pos in after:
2020 postout += after.pop(pos)
2020 postout += after.pop(pos)
2021
2021
2022 if warnonly == WARN_YES:
2022 if warnonly == WARN_YES:
2023 exitcode = False # Set exitcode to warned.
2023 exitcode = False # Set exitcode to warned.
2024
2024
2025 return exitcode, postout
2025 return exitcode, postout
2026
2026
2027 def _process_out_line(self, out_line, pos, postout, expected, warnonly):
2027 def _process_out_line(self, out_line, pos, postout, expected, warnonly):
2028 while out_line:
2028 while out_line:
2029 if not out_line.endswith(b'\n'):
2029 if not out_line.endswith(b'\n'):
2030 out_line += b' (no-eol)\n'
2030 out_line += b' (no-eol)\n'
2031
2031
2032 # Find the expected output at the current position.
2032 # Find the expected output at the current position.
2033 els = [None]
2033 els = [None]
2034 if expected.get(pos, None):
2034 if expected.get(pos, None):
2035 els = expected[pos]
2035 els = expected[pos]
2036
2036
2037 optional = []
2037 optional = []
2038 for i, el in enumerate(els):
2038 for i, el in enumerate(els):
2039 r = False
2039 r = False
2040 if el:
2040 if el:
2041 r, exact = self.linematch(el, out_line)
2041 r, exact = self.linematch(el, out_line)
2042 if isinstance(r, str):
2042 if isinstance(r, str):
2043 if r == '-glob':
2043 if r == '-glob':
2044 out_line = ''.join(el.rsplit(' (glob)', 1))
2044 out_line = ''.join(el.rsplit(' (glob)', 1))
2045 r = '' # Warn only this line.
2045 r = '' # Warn only this line.
2046 elif r == "retry":
2046 elif r == "retry":
2047 postout.append(b' ' + el)
2047 postout.append(b' ' + el)
2048 else:
2048 else:
2049 log('\ninfo, unknown linematch result: %r\n' % r)
2049 log('\ninfo, unknown linematch result: %r\n' % r)
2050 r = False
2050 r = False
2051 if r:
2051 if r:
2052 els.pop(i)
2052 els.pop(i)
2053 break
2053 break
2054 if el:
2054 if el:
2055 if isoptional(el):
2055 if isoptional(el):
2056 optional.append(i)
2056 optional.append(i)
2057 else:
2057 else:
2058 m = optline.match(el)
2058 m = optline.match(el)
2059 if m:
2059 if m:
2060 conditions = [c for c in m.group(2).split(b' ')]
2060 conditions = [c for c in m.group(2).split(b' ')]
2061
2061
2062 if not self._iftest(conditions):
2062 if not self._iftest(conditions):
2063 optional.append(i)
2063 optional.append(i)
2064 if exact:
2064 if exact:
2065 # Don't allow line to be matches against a later
2065 # Don't allow line to be matches against a later
2066 # line in the output
2066 # line in the output
2067 els.pop(i)
2067 els.pop(i)
2068 break
2068 break
2069
2069
2070 if r:
2070 if r:
2071 if r == "retry":
2071 if r == "retry":
2072 continue
2072 continue
2073 # clean up any optional leftovers
2073 # clean up any optional leftovers
2074 for i in optional:
2074 for i in optional:
2075 postout.append(b' ' + els[i])
2075 postout.append(b' ' + els[i])
2076 for i in reversed(optional):
2076 for i in reversed(optional):
2077 del els[i]
2077 del els[i]
2078 postout.append(b' ' + el)
2078 postout.append(b' ' + el)
2079 else:
2079 else:
2080 if self.NEEDESCAPE(out_line):
2080 if self.NEEDESCAPE(out_line):
2081 out_line = TTest._stringescape(
2081 out_line = TTest._stringescape(
2082 b'%s (esc)\n' % out_line.rstrip(b'\n')
2082 b'%s (esc)\n' % out_line.rstrip(b'\n')
2083 )
2083 )
2084 postout.append(b' ' + out_line) # Let diff deal with it.
2084 postout.append(b' ' + out_line) # Let diff deal with it.
2085 if r != '': # If line failed.
2085 if r != '': # If line failed.
2086 warnonly = WARN_NO
2086 warnonly = WARN_NO
2087 elif warnonly == WARN_UNDEFINED:
2087 elif warnonly == WARN_UNDEFINED:
2088 warnonly = WARN_YES
2088 warnonly = WARN_YES
2089 break
2089 break
2090 else:
2090 else:
2091 # clean up any optional leftovers
2091 # clean up any optional leftovers
2092 while expected.get(pos, None):
2092 while expected.get(pos, None):
2093 el = expected[pos].pop(0)
2093 el = expected[pos].pop(0)
2094 if el:
2094 if el:
2095 if not isoptional(el):
2095 if not isoptional(el):
2096 m = optline.match(el)
2096 m = optline.match(el)
2097 if m:
2097 if m:
2098 conditions = [c for c in m.group(2).split(b' ')]
2098 conditions = [c for c in m.group(2).split(b' ')]
2099
2099
2100 if self._iftest(conditions):
2100 if self._iftest(conditions):
2101 # Don't append as optional line
2101 # Don't append as optional line
2102 continue
2102 continue
2103 else:
2103 else:
2104 continue
2104 continue
2105 postout.append(b' ' + el)
2105 postout.append(b' ' + el)
2106 return pos, postout, warnonly
2106 return pos, postout, warnonly
2107
2107
2108 def _process_cmd_line(self, cmd_line, pos, postout, after):
2108 def _process_cmd_line(self, cmd_line, pos, postout, after):
2109 """process a "command" part of a line from unified test output"""
2109 """process a "command" part of a line from unified test output"""
2110 if cmd_line:
2110 if cmd_line:
2111 # Add on last return code.
2111 # Add on last return code.
2112 ret = int(cmd_line.split()[1])
2112 ret = int(cmd_line.split()[1])
2113 if ret != 0:
2113 if ret != 0:
2114 postout.append(b' [%d]\n' % ret)
2114 postout.append(b' [%d]\n' % ret)
2115 if pos in after:
2115 if pos in after:
2116 # Merge in non-active test bits.
2116 # Merge in non-active test bits.
2117 postout += after.pop(pos)
2117 postout += after.pop(pos)
2118 pos = int(cmd_line.split()[0])
2118 pos = int(cmd_line.split()[0])
2119 return pos, postout
2119 return pos, postout
2120
2120
2121 @staticmethod
2121 @staticmethod
2122 def rematch(el, l):
2122 def rematch(el, l):
2123 try:
2123 try:
2124 # parse any flags at the beginning of the regex. Only 'i' is
2124 # parse any flags at the beginning of the regex. Only 'i' is
2125 # supported right now, but this should be easy to extend.
2125 # supported right now, but this should be easy to extend.
2126 flags, el = re.match(br'^(\(\?i\))?(.*)', el).groups()[0:2]
2126 flags, el = re.match(br'^(\(\?i\))?(.*)', el).groups()[0:2]
2127 flags = flags or b''
2127 flags = flags or b''
2128 el = flags + b'(?:' + el + b')'
2128 el = flags + b'(?:' + el + b')'
2129 # use \Z to ensure that the regex matches to the end of the string
2129 # use \Z to ensure that the regex matches to the end of the string
2130 if WINDOWS:
2130 if WINDOWS:
2131 return re.match(el + br'\r?\n\Z', l)
2131 return re.match(el + br'\r?\n\Z', l)
2132 return re.match(el + br'\n\Z', l)
2132 return re.match(el + br'\n\Z', l)
2133 except re.error:
2133 except re.error:
2134 # el is an invalid regex
2134 # el is an invalid regex
2135 return False
2135 return False
2136
2136
2137 @staticmethod
2137 @staticmethod
2138 def globmatch(el, l):
2138 def globmatch(el, l):
2139 # The only supported special characters are * and ? plus / which also
2139 # The only supported special characters are * and ? plus / which also
2140 # matches \ on windows. Escaping of these characters is supported.
2140 # matches \ on windows. Escaping of these characters is supported.
2141 if el + b'\n' == l:
2141 if el + b'\n' == l:
2142 if os.altsep:
2142 if os.altsep:
2143 # matching on "/" is not needed for this line
2143 # matching on "/" is not needed for this line
2144 for pat in checkcodeglobpats:
2144 for pat in checkcodeglobpats:
2145 if pat.match(el):
2145 if pat.match(el):
2146 return True
2146 return True
2147 return b'-glob'
2147 return b'-glob'
2148 return True
2148 return True
2149 el = el.replace(b'$LOCALIP', b'*')
2149 el = el.replace(b'$LOCALIP', b'*')
2150 i, n = 0, len(el)
2150 i, n = 0, len(el)
2151 res = b''
2151 res = b''
2152 while i < n:
2152 while i < n:
2153 c = el[i : i + 1]
2153 c = el[i : i + 1]
2154 i += 1
2154 i += 1
2155 if c == b'\\' and i < n and el[i : i + 1] in b'*?\\/':
2155 if c == b'\\' and i < n and el[i : i + 1] in b'*?\\/':
2156 res += el[i - 1 : i + 1]
2156 res += el[i - 1 : i + 1]
2157 i += 1
2157 i += 1
2158 elif c == b'*':
2158 elif c == b'*':
2159 res += b'.*'
2159 res += b'.*'
2160 elif c == b'?':
2160 elif c == b'?':
2161 res += b'.'
2161 res += b'.'
2162 elif c == b'/' and os.altsep:
2162 elif c == b'/' and os.altsep:
2163 res += b'[/\\\\]'
2163 res += b'[/\\\\]'
2164 else:
2164 else:
2165 res += re.escape(c)
2165 res += re.escape(c)
2166 return TTest.rematch(res, l)
2166 return TTest.rematch(res, l)
2167
2167
2168 def linematch(self, el, l):
2168 def linematch(self, el, l):
2169 if el == l: # perfect match (fast)
2169 if el == l: # perfect match (fast)
2170 return True, True
2170 return True, True
2171 retry = False
2171 retry = False
2172 if isoptional(el):
2172 if isoptional(el):
2173 retry = "retry"
2173 retry = "retry"
2174 el = el[: -len(MARK_OPTIONAL)] + b"\n"
2174 el = el[: -len(MARK_OPTIONAL)] + b"\n"
2175 else:
2175 else:
2176 m = optline.match(el)
2176 m = optline.match(el)
2177 if m:
2177 if m:
2178 conditions = [c for c in m.group(2).split(b' ')]
2178 conditions = [c for c in m.group(2).split(b' ')]
2179
2179
2180 el = m.group(1) + b"\n"
2180 el = m.group(1) + b"\n"
2181 if not self._iftest(conditions):
2181 if not self._iftest(conditions):
2182 # listed feature missing, should not match
2182 # listed feature missing, should not match
2183 return "retry", False
2183 return "retry", False
2184
2184
2185 if el.endswith(b" (esc)\n"):
2185 if el.endswith(b" (esc)\n"):
2186 if PYTHON3:
2186 if PYTHON3:
2187 el = el[:-7].decode('unicode_escape') + '\n'
2187 el = el[:-7].decode('unicode_escape') + '\n'
2188 el = el.encode('latin-1')
2188 el = el.encode('latin-1')
2189 else:
2189 else:
2190 el = el[:-7].decode('string-escape') + '\n'
2190 el = el[:-7].decode('string-escape') + '\n'
2191 if el == l or WINDOWS and el[:-1] + b'\r\n' == l:
2191 if el == l or WINDOWS and el[:-1] + b'\r\n' == l:
2192 return True, True
2192 return True, True
2193 if el.endswith(b" (re)\n"):
2193 if el.endswith(b" (re)\n"):
2194 return (TTest.rematch(el[:-6], l) or retry), False
2194 return (TTest.rematch(el[:-6], l) or retry), False
2195 if el.endswith(b" (glob)\n"):
2195 if el.endswith(b" (glob)\n"):
2196 # ignore '(glob)' added to l by 'replacements'
2196 # ignore '(glob)' added to l by 'replacements'
2197 if l.endswith(b" (glob)\n"):
2197 if l.endswith(b" (glob)\n"):
2198 l = l[:-8] + b"\n"
2198 l = l[:-8] + b"\n"
2199 return (TTest.globmatch(el[:-8], l) or retry), False
2199 return (TTest.globmatch(el[:-8], l) or retry), False
2200 if os.altsep:
2200 if os.altsep:
2201 _l = l.replace(b'\\', b'/')
2201 _l = l.replace(b'\\', b'/')
2202 if el == _l or WINDOWS and el[:-1] + b'\r\n' == _l:
2202 if el == _l or WINDOWS and el[:-1] + b'\r\n' == _l:
2203 return True, True
2203 return True, True
2204 return retry, True
2204 return retry, True
2205
2205
2206 @staticmethod
2206 @staticmethod
2207 def parsehghaveoutput(lines):
2207 def parsehghaveoutput(lines):
2208 """Parse hghave log lines.
2208 """Parse hghave log lines.
2209
2209
2210 Return tuple of lists (missing, failed):
2210 Return tuple of lists (missing, failed):
2211 * the missing/unknown features
2211 * the missing/unknown features
2212 * the features for which existence check failed"""
2212 * the features for which existence check failed"""
2213 missing = []
2213 missing = []
2214 failed = []
2214 failed = []
2215 for line in lines:
2215 for line in lines:
2216 if line.startswith(TTest.SKIPPED_PREFIX):
2216 if line.startswith(TTest.SKIPPED_PREFIX):
2217 line = line.splitlines()[0]
2217 line = line.splitlines()[0]
2218 missing.append(_bytes2sys(line[len(TTest.SKIPPED_PREFIX) :]))
2218 missing.append(_bytes2sys(line[len(TTest.SKIPPED_PREFIX) :]))
2219 elif line.startswith(TTest.FAILED_PREFIX):
2219 elif line.startswith(TTest.FAILED_PREFIX):
2220 line = line.splitlines()[0]
2220 line = line.splitlines()[0]
2221 failed.append(_bytes2sys(line[len(TTest.FAILED_PREFIX) :]))
2221 failed.append(_bytes2sys(line[len(TTest.FAILED_PREFIX) :]))
2222
2222
2223 return missing, failed
2223 return missing, failed
2224
2224
2225 @staticmethod
2225 @staticmethod
2226 def _escapef(m):
2226 def _escapef(m):
2227 return TTest.ESCAPEMAP[m.group(0)]
2227 return TTest.ESCAPEMAP[m.group(0)]
2228
2228
2229 @staticmethod
2229 @staticmethod
2230 def _stringescape(s):
2230 def _stringescape(s):
2231 return TTest.ESCAPESUB(TTest._escapef, s)
2231 return TTest.ESCAPESUB(TTest._escapef, s)
2232
2232
2233
2233
2234 iolock = threading.RLock()
2234 iolock = threading.RLock()
2235 firstlock = threading.RLock()
2235 firstlock = threading.RLock()
2236 firsterror = False
2236 firsterror = False
2237
2237
2238 if PYTHON3:
2238 if PYTHON3:
2239 base_class = unittest.TextTestResult
2239 base_class = unittest.TextTestResult
2240 else:
2240 else:
2241 base_class = unittest._TextTestResult
2241 base_class = unittest._TextTestResult
2242
2242
2243
2243
2244 class TestResult(base_class):
2244 class TestResult(base_class):
2245 """Holds results when executing via unittest."""
2245 """Holds results when executing via unittest."""
2246
2246
2247 def __init__(self, options, *args, **kwargs):
2247 def __init__(self, options, *args, **kwargs):
2248 super(TestResult, self).__init__(*args, **kwargs)
2248 super(TestResult, self).__init__(*args, **kwargs)
2249
2249
2250 self._options = options
2250 self._options = options
2251
2251
2252 # unittest.TestResult didn't have skipped until 2.7. We need to
2252 # unittest.TestResult didn't have skipped until 2.7. We need to
2253 # polyfill it.
2253 # polyfill it.
2254 self.skipped = []
2254 self.skipped = []
2255
2255
2256 # We have a custom "ignored" result that isn't present in any Python
2256 # We have a custom "ignored" result that isn't present in any Python
2257 # unittest implementation. It is very similar to skipped. It may make
2257 # unittest implementation. It is very similar to skipped. It may make
2258 # sense to map it into skip some day.
2258 # sense to map it into skip some day.
2259 self.ignored = []
2259 self.ignored = []
2260
2260
2261 self.times = []
2261 self.times = []
2262 self._firststarttime = None
2262 self._firststarttime = None
2263 # Data stored for the benefit of generating xunit reports.
2263 # Data stored for the benefit of generating xunit reports.
2264 self.successes = []
2264 self.successes = []
2265 self.faildata = {}
2265 self.faildata = {}
2266
2266
2267 if options.color == 'auto':
2267 if options.color == 'auto':
2268 isatty = self.stream.isatty()
2268 isatty = self.stream.isatty()
2269 # For some reason, redirecting stdout on Windows disables the ANSI
2269 # For some reason, redirecting stdout on Windows disables the ANSI
2270 # color processing of stderr, which is what is used to print the
2270 # color processing of stderr, which is what is used to print the
2271 # output. Therefore, both must be tty on Windows to enable color.
2271 # output. Therefore, both must be tty on Windows to enable color.
2272 if WINDOWS:
2272 if WINDOWS:
2273 isatty = isatty and sys.stdout.isatty()
2273 isatty = isatty and sys.stdout.isatty()
2274 self.color = pygmentspresent and isatty
2274 self.color = pygmentspresent and isatty
2275 elif options.color == 'never':
2275 elif options.color == 'never':
2276 self.color = False
2276 self.color = False
2277 else: # 'always', for testing purposes
2277 else: # 'always', for testing purposes
2278 self.color = pygmentspresent
2278 self.color = pygmentspresent
2279
2279
2280 def onStart(self, test):
2280 def onStart(self, test):
2281 """Can be overriden by custom TestResult"""
2281 """Can be overriden by custom TestResult"""
2282
2282
2283 def onEnd(self):
2283 def onEnd(self):
2284 """Can be overriden by custom TestResult"""
2284 """Can be overriden by custom TestResult"""
2285
2285
2286 def addFailure(self, test, reason):
2286 def addFailure(self, test, reason):
2287 self.failures.append((test, reason))
2287 self.failures.append((test, reason))
2288
2288
2289 if self._options.first:
2289 if self._options.first:
2290 self.stop()
2290 self.stop()
2291 else:
2291 else:
2292 with iolock:
2292 with iolock:
2293 if reason == "timed out":
2293 if reason == "timed out":
2294 self.stream.write('t')
2294 self.stream.write('t')
2295 else:
2295 else:
2296 if not self._options.nodiff:
2296 if not self._options.nodiff:
2297 self.stream.write('\n')
2297 self.stream.write('\n')
2298 # Exclude the '\n' from highlighting to lex correctly
2298 # Exclude the '\n' from highlighting to lex correctly
2299 formatted = 'ERROR: %s output changed\n' % test
2299 formatted = 'ERROR: %s output changed\n' % test
2300 self.stream.write(highlightmsg(formatted, self.color))
2300 self.stream.write(highlightmsg(formatted, self.color))
2301 self.stream.write('!')
2301 self.stream.write('!')
2302
2302
2303 self.stream.flush()
2303 self.stream.flush()
2304
2304
2305 def addSuccess(self, test):
2305 def addSuccess(self, test):
2306 with iolock:
2306 with iolock:
2307 super(TestResult, self).addSuccess(test)
2307 super(TestResult, self).addSuccess(test)
2308 self.successes.append(test)
2308 self.successes.append(test)
2309
2309
2310 def addError(self, test, err):
2310 def addError(self, test, err):
2311 super(TestResult, self).addError(test, err)
2311 super(TestResult, self).addError(test, err)
2312 if self._options.first:
2312 if self._options.first:
2313 self.stop()
2313 self.stop()
2314
2314
2315 # Polyfill.
2315 # Polyfill.
2316 def addSkip(self, test, reason):
2316 def addSkip(self, test, reason):
2317 self.skipped.append((test, reason))
2317 self.skipped.append((test, reason))
2318 with iolock:
2318 with iolock:
2319 if self.showAll:
2319 if self.showAll:
2320 self.stream.writeln('skipped %s' % reason)
2320 self.stream.writeln('skipped %s' % reason)
2321 else:
2321 else:
2322 self.stream.write('s')
2322 self.stream.write('s')
2323 self.stream.flush()
2323 self.stream.flush()
2324
2324
2325 def addIgnore(self, test, reason):
2325 def addIgnore(self, test, reason):
2326 self.ignored.append((test, reason))
2326 self.ignored.append((test, reason))
2327 with iolock:
2327 with iolock:
2328 if self.showAll:
2328 if self.showAll:
2329 self.stream.writeln('ignored %s' % reason)
2329 self.stream.writeln('ignored %s' % reason)
2330 else:
2330 else:
2331 if reason not in ('not retesting', "doesn't match keyword"):
2331 if reason not in ('not retesting', "doesn't match keyword"):
2332 self.stream.write('i')
2332 self.stream.write('i')
2333 else:
2333 else:
2334 self.testsRun += 1
2334 self.testsRun += 1
2335 self.stream.flush()
2335 self.stream.flush()
2336
2336
2337 def addOutputMismatch(self, test, ret, got, expected):
2337 def addOutputMismatch(self, test, ret, got, expected):
2338 """Record a mismatch in test output for a particular test."""
2338 """Record a mismatch in test output for a particular test."""
2339 if self.shouldStop or firsterror:
2339 if self.shouldStop or firsterror:
2340 # don't print, some other test case already failed and
2340 # don't print, some other test case already failed and
2341 # printed, we're just stale and probably failed due to our
2341 # printed, we're just stale and probably failed due to our
2342 # temp dir getting cleaned up.
2342 # temp dir getting cleaned up.
2343 return
2343 return
2344
2344
2345 accepted = False
2345 accepted = False
2346 lines = []
2346 lines = []
2347
2347
2348 with iolock:
2348 with iolock:
2349 if self._options.nodiff:
2349 if self._options.nodiff:
2350 pass
2350 pass
2351 elif self._options.view:
2351 elif self._options.view:
2352 v = self._options.view
2352 v = self._options.view
2353 subprocess.call(
2353 subprocess.call(
2354 r'"%s" "%s" "%s"'
2354 r'"%s" "%s" "%s"'
2355 % (v, _bytes2sys(test.refpath), _bytes2sys(test.errpath)),
2355 % (v, _bytes2sys(test.refpath), _bytes2sys(test.errpath)),
2356 shell=True,
2356 shell=True,
2357 )
2357 )
2358 else:
2358 else:
2359 servefail, lines = getdiff(
2359 servefail, lines = getdiff(
2360 expected, got, test.refpath, test.errpath
2360 expected, got, test.refpath, test.errpath
2361 )
2361 )
2362 self.stream.write('\n')
2362 self.stream.write('\n')
2363 for line in lines:
2363 for line in lines:
2364 line = highlightdiff(line, self.color)
2364 line = highlightdiff(line, self.color)
2365 if PYTHON3:
2365 if PYTHON3:
2366 self.stream.flush()
2366 self.stream.flush()
2367 self.stream.buffer.write(line)
2367 self.stream.buffer.write(line)
2368 self.stream.buffer.flush()
2368 self.stream.buffer.flush()
2369 else:
2369 else:
2370 self.stream.write(line)
2370 self.stream.write(line)
2371 self.stream.flush()
2371 self.stream.flush()
2372
2372
2373 if servefail:
2373 if servefail:
2374 raise test.failureException(
2374 raise test.failureException(
2375 'server failed to start (HGPORT=%s)' % test._startport
2375 'server failed to start (HGPORT=%s)' % test._startport
2376 )
2376 )
2377
2377
2378 # handle interactive prompt without releasing iolock
2378 # handle interactive prompt without releasing iolock
2379 if self._options.interactive:
2379 if self._options.interactive:
2380 if test.readrefout() != expected:
2380 if test.readrefout() != expected:
2381 self.stream.write(
2381 self.stream.write(
2382 'Reference output has changed (run again to prompt '
2382 'Reference output has changed (run again to prompt '
2383 'changes)'
2383 'changes)'
2384 )
2384 )
2385 else:
2385 else:
2386 self.stream.write('Accept this change? [y/N] ')
2386 self.stream.write('Accept this change? [y/N] ')
2387 self.stream.flush()
2387 self.stream.flush()
2388 answer = sys.stdin.readline().strip()
2388 answer = sys.stdin.readline().strip()
2389 if answer.lower() in ('y', 'yes'):
2389 if answer.lower() in ('y', 'yes'):
2390 if test.path.endswith(b'.t'):
2390 if test.path.endswith(b'.t'):
2391 rename(test.errpath, test.path)
2391 rename(test.errpath, test.path)
2392 else:
2392 else:
2393 rename(test.errpath, b'%s.out' % test.path)
2393 rename(test.errpath, b'%s.out' % test.path)
2394 accepted = True
2394 accepted = True
2395 if not accepted:
2395 if not accepted:
2396 self.faildata[test.name] = b''.join(lines)
2396 self.faildata[test.name] = b''.join(lines)
2397
2397
2398 return accepted
2398 return accepted
2399
2399
2400 def startTest(self, test):
2400 def startTest(self, test):
2401 super(TestResult, self).startTest(test)
2401 super(TestResult, self).startTest(test)
2402
2402
2403 # os.times module computes the user time and system time spent by
2403 # os.times module computes the user time and system time spent by
2404 # child's processes along with real elapsed time taken by a process.
2404 # child's processes along with real elapsed time taken by a process.
2405 # This module has one limitation. It can only work for Linux user
2405 # This module has one limitation. It can only work for Linux user
2406 # and not for Windows. Hence why we fall back to another function
2406 # and not for Windows. Hence why we fall back to another function
2407 # for wall time calculations.
2407 # for wall time calculations.
2408 test.started_times = os.times()
2408 test.started_times = os.times()
2409 # TODO use a monotonic clock once support for Python 2.7 is dropped.
2409 # TODO use a monotonic clock once support for Python 2.7 is dropped.
2410 test.started_time = time.time()
2410 test.started_time = time.time()
2411 if self._firststarttime is None: # thread racy but irrelevant
2411 if self._firststarttime is None: # thread racy but irrelevant
2412 self._firststarttime = test.started_time
2412 self._firststarttime = test.started_time
2413
2413
2414 def stopTest(self, test, interrupted=False):
2414 def stopTest(self, test, interrupted=False):
2415 super(TestResult, self).stopTest(test)
2415 super(TestResult, self).stopTest(test)
2416
2416
2417 test.stopped_times = os.times()
2417 test.stopped_times = os.times()
2418 stopped_time = time.time()
2418 stopped_time = time.time()
2419
2419
2420 starttime = test.started_times
2420 starttime = test.started_times
2421 endtime = test.stopped_times
2421 endtime = test.stopped_times
2422 origin = self._firststarttime
2422 origin = self._firststarttime
2423 self.times.append(
2423 self.times.append(
2424 (
2424 (
2425 test.name,
2425 test.name,
2426 endtime[2] - starttime[2], # user space CPU time
2426 endtime[2] - starttime[2], # user space CPU time
2427 endtime[3] - starttime[3], # sys space CPU time
2427 endtime[3] - starttime[3], # sys space CPU time
2428 stopped_time - test.started_time, # real time
2428 stopped_time - test.started_time, # real time
2429 test.started_time - origin, # start date in run context
2429 test.started_time - origin, # start date in run context
2430 stopped_time - origin, # end date in run context
2430 stopped_time - origin, # end date in run context
2431 )
2431 )
2432 )
2432 )
2433
2433
2434 if interrupted:
2434 if interrupted:
2435 with iolock:
2435 with iolock:
2436 self.stream.writeln(
2436 self.stream.writeln(
2437 'INTERRUPTED: %s (after %d seconds)'
2437 'INTERRUPTED: %s (after %d seconds)'
2438 % (test.name, self.times[-1][3])
2438 % (test.name, self.times[-1][3])
2439 )
2439 )
2440
2440
2441
2441
2442 def getTestResult():
2442 def getTestResult():
2443 """
2443 """
2444 Returns the relevant test result
2444 Returns the relevant test result
2445 """
2445 """
2446 if "CUSTOM_TEST_RESULT" in os.environ:
2446 if "CUSTOM_TEST_RESULT" in os.environ:
2447 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
2447 testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
2448 return testresultmodule.TestResult
2448 return testresultmodule.TestResult
2449 else:
2449 else:
2450 return TestResult
2450 return TestResult
2451
2451
2452
2452
2453 class TestSuite(unittest.TestSuite):
2453 class TestSuite(unittest.TestSuite):
2454 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
2454 """Custom unittest TestSuite that knows how to execute Mercurial tests."""
2455
2455
2456 def __init__(
2456 def __init__(
2457 self,
2457 self,
2458 testdir,
2458 testdir,
2459 jobs=1,
2459 jobs=1,
2460 whitelist=None,
2460 whitelist=None,
2461 blacklist=None,
2461 blacklist=None,
2462 keywords=None,
2462 keywords=None,
2463 loop=False,
2463 loop=False,
2464 runs_per_test=1,
2464 runs_per_test=1,
2465 loadtest=None,
2465 loadtest=None,
2466 showchannels=False,
2466 showchannels=False,
2467 *args,
2467 *args,
2468 **kwargs
2468 **kwargs
2469 ):
2469 ):
2470 """Create a new instance that can run tests with a configuration.
2470 """Create a new instance that can run tests with a configuration.
2471
2471
2472 testdir specifies the directory where tests are executed from. This
2472 testdir specifies the directory where tests are executed from. This
2473 is typically the ``tests`` directory from Mercurial's source
2473 is typically the ``tests`` directory from Mercurial's source
2474 repository.
2474 repository.
2475
2475
2476 jobs specifies the number of jobs to run concurrently. Each test
2476 jobs specifies the number of jobs to run concurrently. Each test
2477 executes on its own thread. Tests actually spawn new processes, so
2477 executes on its own thread. Tests actually spawn new processes, so
2478 state mutation should not be an issue.
2478 state mutation should not be an issue.
2479
2479
2480 If there is only one job, it will use the main thread.
2480 If there is only one job, it will use the main thread.
2481
2481
2482 whitelist and blacklist denote tests that have been whitelisted and
2482 whitelist and blacklist denote tests that have been whitelisted and
2483 blacklisted, respectively. These arguments don't belong in TestSuite.
2483 blacklisted, respectively. These arguments don't belong in TestSuite.
2484 Instead, whitelist and blacklist should be handled by the thing that
2484 Instead, whitelist and blacklist should be handled by the thing that
2485 populates the TestSuite with tests. They are present to preserve
2485 populates the TestSuite with tests. They are present to preserve
2486 backwards compatible behavior which reports skipped tests as part
2486 backwards compatible behavior which reports skipped tests as part
2487 of the results.
2487 of the results.
2488
2488
2489 keywords denotes key words that will be used to filter which tests
2489 keywords denotes key words that will be used to filter which tests
2490 to execute. This arguably belongs outside of TestSuite.
2490 to execute. This arguably belongs outside of TestSuite.
2491
2491
2492 loop denotes whether to loop over tests forever.
2492 loop denotes whether to loop over tests forever.
2493 """
2493 """
2494 super(TestSuite, self).__init__(*args, **kwargs)
2494 super(TestSuite, self).__init__(*args, **kwargs)
2495
2495
2496 self._jobs = jobs
2496 self._jobs = jobs
2497 self._whitelist = whitelist
2497 self._whitelist = whitelist
2498 self._blacklist = blacklist
2498 self._blacklist = blacklist
2499 self._keywords = keywords
2499 self._keywords = keywords
2500 self._loop = loop
2500 self._loop = loop
2501 self._runs_per_test = runs_per_test
2501 self._runs_per_test = runs_per_test
2502 self._loadtest = loadtest
2502 self._loadtest = loadtest
2503 self._showchannels = showchannels
2503 self._showchannels = showchannels
2504
2504
2505 def run(self, result):
2505 def run(self, result):
2506 # We have a number of filters that need to be applied. We do this
2506 # We have a number of filters that need to be applied. We do this
2507 # here instead of inside Test because it makes the running logic for
2507 # here instead of inside Test because it makes the running logic for
2508 # Test simpler.
2508 # Test simpler.
2509 tests = []
2509 tests = []
2510 num_tests = [0]
2510 num_tests = [0]
2511 for test in self._tests:
2511 for test in self._tests:
2512
2512
2513 def get():
2513 def get():
2514 num_tests[0] += 1
2514 num_tests[0] += 1
2515 if getattr(test, 'should_reload', False):
2515 if getattr(test, 'should_reload', False):
2516 return self._loadtest(test, num_tests[0])
2516 return self._loadtest(test, num_tests[0])
2517 return test
2517 return test
2518
2518
2519 if not os.path.exists(test.path):
2519 if not os.path.exists(test.path):
2520 result.addSkip(test, "Doesn't exist")
2520 result.addSkip(test, "Doesn't exist")
2521 continue
2521 continue
2522
2522
2523 is_whitelisted = self._whitelist and (
2523 is_whitelisted = self._whitelist and (
2524 test.relpath in self._whitelist or test.bname in self._whitelist
2524 test.relpath in self._whitelist or test.bname in self._whitelist
2525 )
2525 )
2526 if not is_whitelisted:
2526 if not is_whitelisted:
2527 is_blacklisted = self._blacklist and (
2527 is_blacklisted = self._blacklist and (
2528 test.relpath in self._blacklist
2528 test.relpath in self._blacklist
2529 or test.bname in self._blacklist
2529 or test.bname in self._blacklist
2530 )
2530 )
2531 if is_blacklisted:
2531 if is_blacklisted:
2532 result.addSkip(test, 'blacklisted')
2532 result.addSkip(test, 'blacklisted')
2533 continue
2533 continue
2534 if self._keywords:
2534 if self._keywords:
2535 with open(test.path, 'rb') as f:
2535 with open(test.path, 'rb') as f:
2536 t = f.read().lower() + test.bname.lower()
2536 t = f.read().lower() + test.bname.lower()
2537 ignored = False
2537 ignored = False
2538 for k in self._keywords.lower().split():
2538 for k in self._keywords.lower().split():
2539 if k not in t:
2539 if k not in t:
2540 result.addIgnore(test, "doesn't match keyword")
2540 result.addIgnore(test, "doesn't match keyword")
2541 ignored = True
2541 ignored = True
2542 break
2542 break
2543
2543
2544 if ignored:
2544 if ignored:
2545 continue
2545 continue
2546 for _ in xrange(self._runs_per_test):
2546 for _ in xrange(self._runs_per_test):
2547 tests.append(get())
2547 tests.append(get())
2548
2548
2549 runtests = list(tests)
2549 runtests = list(tests)
2550 done = queue.Queue()
2550 done = queue.Queue()
2551 running = 0
2551 running = 0
2552
2552
2553 channels = [""] * self._jobs
2553 channels = [""] * self._jobs
2554
2554
2555 def job(test, result):
2555 def job(test, result):
2556 for n, v in enumerate(channels):
2556 for n, v in enumerate(channels):
2557 if not v:
2557 if not v:
2558 channel = n
2558 channel = n
2559 break
2559 break
2560 else:
2560 else:
2561 raise ValueError('Could not find output channel')
2561 raise ValueError('Could not find output channel')
2562 channels[channel] = "=" + test.name[5:].split(".")[0]
2562 channels[channel] = "=" + test.name[5:].split(".")[0]
2563 try:
2563 try:
2564 test(result)
2564 test(result)
2565 done.put(None)
2565 done.put(None)
2566 except KeyboardInterrupt:
2566 except KeyboardInterrupt:
2567 pass
2567 pass
2568 except: # re-raises
2568 except: # re-raises
2569 done.put(('!', test, 'run-test raised an error, see traceback'))
2569 done.put(('!', test, 'run-test raised an error, see traceback'))
2570 raise
2570 raise
2571 finally:
2571 finally:
2572 try:
2572 try:
2573 channels[channel] = ''
2573 channels[channel] = ''
2574 except IndexError:
2574 except IndexError:
2575 pass
2575 pass
2576
2576
2577 def stat():
2577 def stat():
2578 count = 0
2578 count = 0
2579 while channels:
2579 while channels:
2580 d = '\n%03s ' % count
2580 d = '\n%03s ' % count
2581 for n, v in enumerate(channels):
2581 for n, v in enumerate(channels):
2582 if v:
2582 if v:
2583 d += v[0]
2583 d += v[0]
2584 channels[n] = v[1:] or '.'
2584 channels[n] = v[1:] or '.'
2585 else:
2585 else:
2586 d += ' '
2586 d += ' '
2587 d += ' '
2587 d += ' '
2588 with iolock:
2588 with iolock:
2589 sys.stdout.write(d + ' ')
2589 sys.stdout.write(d + ' ')
2590 sys.stdout.flush()
2590 sys.stdout.flush()
2591 for x in xrange(10):
2591 for x in xrange(10):
2592 if channels:
2592 if channels:
2593 time.sleep(0.1)
2593 time.sleep(0.1)
2594 count += 1
2594 count += 1
2595
2595
2596 stoppedearly = False
2596 stoppedearly = False
2597
2597
2598 if self._showchannels:
2598 if self._showchannels:
2599 statthread = threading.Thread(target=stat, name="stat")
2599 statthread = threading.Thread(target=stat, name="stat")
2600 statthread.start()
2600 statthread.start()
2601
2601
2602 try:
2602 try:
2603 while tests or running:
2603 while tests or running:
2604 if not done.empty() or running == self._jobs or not tests:
2604 if not done.empty() or running == self._jobs or not tests:
2605 try:
2605 try:
2606 done.get(True, 1)
2606 done.get(True, 1)
2607 running -= 1
2607 running -= 1
2608 if result and result.shouldStop:
2608 if result and result.shouldStop:
2609 stoppedearly = True
2609 stoppedearly = True
2610 break
2610 break
2611 except queue.Empty:
2611 except queue.Empty:
2612 continue
2612 continue
2613 if tests and not running == self._jobs:
2613 if tests and not running == self._jobs:
2614 test = tests.pop(0)
2614 test = tests.pop(0)
2615 if self._loop:
2615 if self._loop:
2616 if getattr(test, 'should_reload', False):
2616 if getattr(test, 'should_reload', False):
2617 num_tests[0] += 1
2617 num_tests[0] += 1
2618 tests.append(self._loadtest(test, num_tests[0]))
2618 tests.append(self._loadtest(test, num_tests[0]))
2619 else:
2619 else:
2620 tests.append(test)
2620 tests.append(test)
2621 if self._jobs == 1:
2621 if self._jobs == 1:
2622 job(test, result)
2622 job(test, result)
2623 else:
2623 else:
2624 t = threading.Thread(
2624 t = threading.Thread(
2625 target=job, name=test.name, args=(test, result)
2625 target=job, name=test.name, args=(test, result)
2626 )
2626 )
2627 t.start()
2627 t.start()
2628 running += 1
2628 running += 1
2629
2629
2630 # If we stop early we still need to wait on started tests to
2630 # If we stop early we still need to wait on started tests to
2631 # finish. Otherwise, there is a race between the test completing
2631 # finish. Otherwise, there is a race between the test completing
2632 # and the test's cleanup code running. This could result in the
2632 # and the test's cleanup code running. This could result in the
2633 # test reporting incorrect.
2633 # test reporting incorrect.
2634 if stoppedearly:
2634 if stoppedearly:
2635 while running:
2635 while running:
2636 try:
2636 try:
2637 done.get(True, 1)
2637 done.get(True, 1)
2638 running -= 1
2638 running -= 1
2639 except queue.Empty:
2639 except queue.Empty:
2640 continue
2640 continue
2641 except KeyboardInterrupt:
2641 except KeyboardInterrupt:
2642 for test in runtests:
2642 for test in runtests:
2643 test.abort()
2643 test.abort()
2644
2644
2645 channels = []
2645 channels = []
2646
2646
2647 return result
2647 return result
2648
2648
2649
2649
2650 # Save the most recent 5 wall-clock runtimes of each test to a
2650 # Save the most recent 5 wall-clock runtimes of each test to a
2651 # human-readable text file named .testtimes. Tests are sorted
2651 # human-readable text file named .testtimes. Tests are sorted
2652 # alphabetically, while times for each test are listed from oldest to
2652 # alphabetically, while times for each test are listed from oldest to
2653 # newest.
2653 # newest.
2654
2654
2655
2655
2656 def loadtimes(outputdir):
2656 def loadtimes(outputdir):
2657 times = []
2657 times = []
2658 try:
2658 try:
2659 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2659 with open(os.path.join(outputdir, b'.testtimes')) as fp:
2660 for line in fp:
2660 for line in fp:
2661 m = re.match('(.*?) ([0-9. ]+)', line)
2661 m = re.match('(.*?) ([0-9. ]+)', line)
2662 times.append(
2662 times.append(
2663 (m.group(1), [float(t) for t in m.group(2).split()])
2663 (m.group(1), [float(t) for t in m.group(2).split()])
2664 )
2664 )
2665 except IOError as err:
2665 except IOError as err:
2666 if err.errno != errno.ENOENT:
2666 if err.errno != errno.ENOENT:
2667 raise
2667 raise
2668 return times
2668 return times
2669
2669
2670
2670
2671 def savetimes(outputdir, result):
2671 def savetimes(outputdir, result):
2672 saved = dict(loadtimes(outputdir))
2672 saved = dict(loadtimes(outputdir))
2673 maxruns = 5
2673 maxruns = 5
2674 skipped = {str(t[0]) for t in result.skipped}
2674 skipped = {str(t[0]) for t in result.skipped}
2675 for tdata in result.times:
2675 for tdata in result.times:
2676 test, real = tdata[0], tdata[3]
2676 test, real = tdata[0], tdata[3]
2677 if test not in skipped:
2677 if test not in skipped:
2678 ts = saved.setdefault(test, [])
2678 ts = saved.setdefault(test, [])
2679 ts.append(real)
2679 ts.append(real)
2680 ts[:] = ts[-maxruns:]
2680 ts[:] = ts[-maxruns:]
2681
2681
2682 fd, tmpname = tempfile.mkstemp(
2682 fd, tmpname = tempfile.mkstemp(
2683 prefix=b'.testtimes', dir=outputdir, text=True
2683 prefix=b'.testtimes', dir=outputdir, text=True
2684 )
2684 )
2685 with os.fdopen(fd, 'w') as fp:
2685 with os.fdopen(fd, 'w') as fp:
2686 for name, ts in sorted(saved.items()):
2686 for name, ts in sorted(saved.items()):
2687 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2687 fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts])))
2688 timepath = os.path.join(outputdir, b'.testtimes')
2688 timepath = os.path.join(outputdir, b'.testtimes')
2689 try:
2689 try:
2690 os.unlink(timepath)
2690 os.unlink(timepath)
2691 except OSError:
2691 except OSError:
2692 pass
2692 pass
2693 try:
2693 try:
2694 os.rename(tmpname, timepath)
2694 os.rename(tmpname, timepath)
2695 except OSError:
2695 except OSError:
2696 pass
2696 pass
2697
2697
2698
2698
2699 class TextTestRunner(unittest.TextTestRunner):
2699 class TextTestRunner(unittest.TextTestRunner):
2700 """Custom unittest test runner that uses appropriate settings."""
2700 """Custom unittest test runner that uses appropriate settings."""
2701
2701
2702 def __init__(self, runner, *args, **kwargs):
2702 def __init__(self, runner, *args, **kwargs):
2703 super(TextTestRunner, self).__init__(*args, **kwargs)
2703 super(TextTestRunner, self).__init__(*args, **kwargs)
2704
2704
2705 self._runner = runner
2705 self._runner = runner
2706
2706
2707 self._result = getTestResult()(
2707 self._result = getTestResult()(
2708 self._runner.options, self.stream, self.descriptions, self.verbosity
2708 self._runner.options, self.stream, self.descriptions, self.verbosity
2709 )
2709 )
2710
2710
2711 def listtests(self, test):
2711 def listtests(self, test):
2712 test = sorted(test, key=lambda t: t.name)
2712 test = sorted(test, key=lambda t: t.name)
2713
2713
2714 self._result.onStart(test)
2714 self._result.onStart(test)
2715
2715
2716 for t in test:
2716 for t in test:
2717 print(t.name)
2717 print(t.name)
2718 self._result.addSuccess(t)
2718 self._result.addSuccess(t)
2719
2719
2720 if self._runner.options.xunit:
2720 if self._runner.options.xunit:
2721 with open(self._runner.options.xunit, "wb") as xuf:
2721 with open(self._runner.options.xunit, "wb") as xuf:
2722 self._writexunit(self._result, xuf)
2722 self._writexunit(self._result, xuf)
2723
2723
2724 if self._runner.options.json:
2724 if self._runner.options.json:
2725 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2725 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2726 with open(jsonpath, 'w') as fp:
2726 with open(jsonpath, 'w') as fp:
2727 self._writejson(self._result, fp)
2727 self._writejson(self._result, fp)
2728
2728
2729 return self._result
2729 return self._result
2730
2730
2731 def run(self, test):
2731 def run(self, test):
2732 self._result.onStart(test)
2732 self._result.onStart(test)
2733 test(self._result)
2733 test(self._result)
2734
2734
2735 failed = len(self._result.failures)
2735 failed = len(self._result.failures)
2736 skipped = len(self._result.skipped)
2736 skipped = len(self._result.skipped)
2737 ignored = len(self._result.ignored)
2737 ignored = len(self._result.ignored)
2738
2738
2739 with iolock:
2739 with iolock:
2740 self.stream.writeln('')
2740 self.stream.writeln('')
2741
2741
2742 if not self._runner.options.noskips:
2742 if not self._runner.options.noskips:
2743 for test, msg in sorted(
2743 for test, msg in sorted(
2744 self._result.skipped, key=lambda s: s[0].name
2744 self._result.skipped, key=lambda s: s[0].name
2745 ):
2745 ):
2746 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2746 formatted = 'Skipped %s: %s\n' % (test.name, msg)
2747 msg = highlightmsg(formatted, self._result.color)
2747 msg = highlightmsg(formatted, self._result.color)
2748 self.stream.write(msg)
2748 self.stream.write(msg)
2749 for test, msg in sorted(
2749 for test, msg in sorted(
2750 self._result.failures, key=lambda f: f[0].name
2750 self._result.failures, key=lambda f: f[0].name
2751 ):
2751 ):
2752 formatted = 'Failed %s: %s\n' % (test.name, msg)
2752 formatted = 'Failed %s: %s\n' % (test.name, msg)
2753 self.stream.write(highlightmsg(formatted, self._result.color))
2753 self.stream.write(highlightmsg(formatted, self._result.color))
2754 for test, msg in sorted(
2754 for test, msg in sorted(
2755 self._result.errors, key=lambda e: e[0].name
2755 self._result.errors, key=lambda e: e[0].name
2756 ):
2756 ):
2757 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2757 self.stream.writeln('Errored %s: %s' % (test.name, msg))
2758
2758
2759 if self._runner.options.xunit:
2759 if self._runner.options.xunit:
2760 with open(self._runner.options.xunit, "wb") as xuf:
2760 with open(self._runner.options.xunit, "wb") as xuf:
2761 self._writexunit(self._result, xuf)
2761 self._writexunit(self._result, xuf)
2762
2762
2763 if self._runner.options.json:
2763 if self._runner.options.json:
2764 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2764 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
2765 with open(jsonpath, 'w') as fp:
2765 with open(jsonpath, 'w') as fp:
2766 self._writejson(self._result, fp)
2766 self._writejson(self._result, fp)
2767
2767
2768 self._runner._checkhglib('Tested')
2768 self._runner._checkhglib('Tested')
2769
2769
2770 savetimes(self._runner._outputdir, self._result)
2770 savetimes(self._runner._outputdir, self._result)
2771
2771
2772 if failed and self._runner.options.known_good_rev:
2772 if failed and self._runner.options.known_good_rev:
2773 self._bisecttests(t for t, m in self._result.failures)
2773 self._bisecttests(t for t, m in self._result.failures)
2774 self.stream.writeln(
2774 self.stream.writeln(
2775 '# Ran %d tests, %d skipped, %d failed.'
2775 '# Ran %d tests, %d skipped, %d failed.'
2776 % (self._result.testsRun, skipped + ignored, failed)
2776 % (self._result.testsRun, skipped + ignored, failed)
2777 )
2777 )
2778 if failed:
2778 if failed:
2779 self.stream.writeln(
2779 self.stream.writeln(
2780 'python hash seed: %s' % os.environ['PYTHONHASHSEED']
2780 'python hash seed: %s' % os.environ['PYTHONHASHSEED']
2781 )
2781 )
2782 if self._runner.options.time:
2782 if self._runner.options.time:
2783 self.printtimes(self._result.times)
2783 self.printtimes(self._result.times)
2784
2784
2785 if self._runner.options.exceptions:
2785 if self._runner.options.exceptions:
2786 exceptions = aggregateexceptions(
2786 exceptions = aggregateexceptions(
2787 os.path.join(self._runner._outputdir, b'exceptions')
2787 os.path.join(self._runner._outputdir, b'exceptions')
2788 )
2788 )
2789
2789
2790 self.stream.writeln('Exceptions Report:')
2790 self.stream.writeln('Exceptions Report:')
2791 self.stream.writeln(
2791 self.stream.writeln(
2792 '%d total from %d frames'
2792 '%d total from %d frames'
2793 % (exceptions['total'], len(exceptions['exceptioncounts']))
2793 % (exceptions['total'], len(exceptions['exceptioncounts']))
2794 )
2794 )
2795 combined = exceptions['combined']
2795 combined = exceptions['combined']
2796 for key in sorted(combined, key=combined.get, reverse=True):
2796 for key in sorted(combined, key=combined.get, reverse=True):
2797 frame, line, exc = key
2797 frame, line, exc = key
2798 totalcount, testcount, leastcount, leasttest = combined[key]
2798 totalcount, testcount, leastcount, leasttest = combined[key]
2799
2799
2800 self.stream.writeln(
2800 self.stream.writeln(
2801 '%d (%d tests)\t%s: %s (%s - %d total)'
2801 '%d (%d tests)\t%s: %s (%s - %d total)'
2802 % (
2802 % (
2803 totalcount,
2803 totalcount,
2804 testcount,
2804 testcount,
2805 frame,
2805 frame,
2806 exc,
2806 exc,
2807 leasttest,
2807 leasttest,
2808 leastcount,
2808 leastcount,
2809 )
2809 )
2810 )
2810 )
2811
2811
2812 self.stream.flush()
2812 self.stream.flush()
2813
2813
2814 return self._result
2814 return self._result
2815
2815
2816 def _bisecttests(self, tests):
2816 def _bisecttests(self, tests):
2817 bisectcmd = ['hg', 'bisect']
2817 bisectcmd = ['hg', 'bisect']
2818 bisectrepo = self._runner.options.bisect_repo
2818 bisectrepo = self._runner.options.bisect_repo
2819 if bisectrepo:
2819 if bisectrepo:
2820 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2820 bisectcmd.extend(['-R', os.path.abspath(bisectrepo)])
2821
2821
2822 def pread(args):
2822 def pread(args):
2823 env = os.environ.copy()
2823 env = os.environ.copy()
2824 env['HGPLAIN'] = '1'
2824 env['HGPLAIN'] = '1'
2825 p = subprocess.Popen(
2825 p = subprocess.Popen(
2826 args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env
2826 args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, env=env
2827 )
2827 )
2828 data = p.stdout.read()
2828 data = p.stdout.read()
2829 p.wait()
2829 p.wait()
2830 return data
2830 return data
2831
2831
2832 for test in tests:
2832 for test in tests:
2833 pread(bisectcmd + ['--reset']),
2833 pread(bisectcmd + ['--reset']),
2834 pread(bisectcmd + ['--bad', '.'])
2834 pread(bisectcmd + ['--bad', '.'])
2835 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2835 pread(bisectcmd + ['--good', self._runner.options.known_good_rev])
2836 # TODO: we probably need to forward more options
2836 # TODO: we probably need to forward more options
2837 # that alter hg's behavior inside the tests.
2837 # that alter hg's behavior inside the tests.
2838 opts = ''
2838 opts = ''
2839 withhg = self._runner.options.with_hg
2839 withhg = self._runner.options.with_hg
2840 if withhg:
2840 if withhg:
2841 opts += ' --with-hg=%s ' % shellquote(_bytes2sys(withhg))
2841 opts += ' --with-hg=%s ' % shellquote(_bytes2sys(withhg))
2842 rtc = '%s %s %s %s' % (sysexecutable, sys.argv[0], opts, test)
2842 rtc = '%s %s %s %s' % (sysexecutable, sys.argv[0], opts, test)
2843 data = pread(bisectcmd + ['--command', rtc])
2843 data = pread(bisectcmd + ['--command', rtc])
2844 m = re.search(
2844 m = re.search(
2845 (
2845 (
2846 br'\nThe first (?P<goodbad>bad|good) revision '
2846 br'\nThe first (?P<goodbad>bad|good) revision '
2847 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2847 br'is:\nchangeset: +\d+:(?P<node>[a-f0-9]+)\n.*\n'
2848 br'summary: +(?P<summary>[^\n]+)\n'
2848 br'summary: +(?P<summary>[^\n]+)\n'
2849 ),
2849 ),
2850 data,
2850 data,
2851 (re.MULTILINE | re.DOTALL),
2851 (re.MULTILINE | re.DOTALL),
2852 )
2852 )
2853 if m is None:
2853 if m is None:
2854 self.stream.writeln(
2854 self.stream.writeln(
2855 'Failed to identify failure point for %s' % test
2855 'Failed to identify failure point for %s' % test
2856 )
2856 )
2857 continue
2857 continue
2858 dat = m.groupdict()
2858 dat = m.groupdict()
2859 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2859 verb = 'broken' if dat['goodbad'] == b'bad' else 'fixed'
2860 self.stream.writeln(
2860 self.stream.writeln(
2861 '%s %s by %s (%s)'
2861 '%s %s by %s (%s)'
2862 % (
2862 % (
2863 test,
2863 test,
2864 verb,
2864 verb,
2865 dat['node'].decode('ascii'),
2865 dat['node'].decode('ascii'),
2866 dat['summary'].decode('utf8', 'ignore'),
2866 dat['summary'].decode('utf8', 'ignore'),
2867 )
2867 )
2868 )
2868 )
2869
2869
2870 def printtimes(self, times):
2870 def printtimes(self, times):
2871 # iolock held by run
2871 # iolock held by run
2872 self.stream.writeln('# Producing time report')
2872 self.stream.writeln('# Producing time report')
2873 times.sort(key=lambda t: (t[3]))
2873 times.sort(key=lambda t: (t[3]))
2874 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2874 cols = '%7.3f %7.3f %7.3f %7.3f %7.3f %s'
2875 self.stream.writeln(
2875 self.stream.writeln(
2876 '%-7s %-7s %-7s %-7s %-7s %s'
2876 '%-7s %-7s %-7s %-7s %-7s %s'
2877 % ('start', 'end', 'cuser', 'csys', 'real', 'Test')
2877 % ('start', 'end', 'cuser', 'csys', 'real', 'Test')
2878 )
2878 )
2879 for tdata in times:
2879 for tdata in times:
2880 test = tdata[0]
2880 test = tdata[0]
2881 cuser, csys, real, start, end = tdata[1:6]
2881 cuser, csys, real, start, end = tdata[1:6]
2882 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2882 self.stream.writeln(cols % (start, end, cuser, csys, real, test))
2883
2883
2884 @staticmethod
2884 @staticmethod
2885 def _writexunit(result, outf):
2885 def _writexunit(result, outf):
2886 # See http://llg.cubic.org/docs/junit/ for a reference.
2886 # See http://llg.cubic.org/docs/junit/ for a reference.
2887 timesd = {t[0]: t[3] for t in result.times}
2887 timesd = {t[0]: t[3] for t in result.times}
2888 doc = minidom.Document()
2888 doc = minidom.Document()
2889 s = doc.createElement('testsuite')
2889 s = doc.createElement('testsuite')
2890 s.setAttribute('errors', "0") # TODO
2890 s.setAttribute('errors', "0") # TODO
2891 s.setAttribute('failures', str(len(result.failures)))
2891 s.setAttribute('failures', str(len(result.failures)))
2892 s.setAttribute('name', 'run-tests')
2892 s.setAttribute('name', 'run-tests')
2893 s.setAttribute(
2893 s.setAttribute(
2894 'skipped', str(len(result.skipped) + len(result.ignored))
2894 'skipped', str(len(result.skipped) + len(result.ignored))
2895 )
2895 )
2896 s.setAttribute('tests', str(result.testsRun))
2896 s.setAttribute('tests', str(result.testsRun))
2897 doc.appendChild(s)
2897 doc.appendChild(s)
2898 for tc in result.successes:
2898 for tc in result.successes:
2899 t = doc.createElement('testcase')
2899 t = doc.createElement('testcase')
2900 t.setAttribute('name', tc.name)
2900 t.setAttribute('name', tc.name)
2901 tctime = timesd.get(tc.name)
2901 tctime = timesd.get(tc.name)
2902 if tctime is not None:
2902 if tctime is not None:
2903 t.setAttribute('time', '%.3f' % tctime)
2903 t.setAttribute('time', '%.3f' % tctime)
2904 s.appendChild(t)
2904 s.appendChild(t)
2905 for tc, err in sorted(result.faildata.items()):
2905 for tc, err in sorted(result.faildata.items()):
2906 t = doc.createElement('testcase')
2906 t = doc.createElement('testcase')
2907 t.setAttribute('name', tc)
2907 t.setAttribute('name', tc)
2908 tctime = timesd.get(tc)
2908 tctime = timesd.get(tc)
2909 if tctime is not None:
2909 if tctime is not None:
2910 t.setAttribute('time', '%.3f' % tctime)
2910 t.setAttribute('time', '%.3f' % tctime)
2911 # createCDATASection expects a unicode or it will
2911 # createCDATASection expects a unicode or it will
2912 # convert using default conversion rules, which will
2912 # convert using default conversion rules, which will
2913 # fail if string isn't ASCII.
2913 # fail if string isn't ASCII.
2914 err = cdatasafe(err).decode('utf-8', 'replace')
2914 err = cdatasafe(err).decode('utf-8', 'replace')
2915 cd = doc.createCDATASection(err)
2915 cd = doc.createCDATASection(err)
2916 # Use 'failure' here instead of 'error' to match errors = 0,
2916 # Use 'failure' here instead of 'error' to match errors = 0,
2917 # failures = len(result.failures) in the testsuite element.
2917 # failures = len(result.failures) in the testsuite element.
2918 failelem = doc.createElement('failure')
2918 failelem = doc.createElement('failure')
2919 failelem.setAttribute('message', 'output changed')
2919 failelem.setAttribute('message', 'output changed')
2920 failelem.setAttribute('type', 'output-mismatch')
2920 failelem.setAttribute('type', 'output-mismatch')
2921 failelem.appendChild(cd)
2921 failelem.appendChild(cd)
2922 t.appendChild(failelem)
2922 t.appendChild(failelem)
2923 s.appendChild(t)
2923 s.appendChild(t)
2924 for tc, message in result.skipped:
2924 for tc, message in result.skipped:
2925 # According to the schema, 'skipped' has no attributes. So store
2925 # According to the schema, 'skipped' has no attributes. So store
2926 # the skip message as a text node instead.
2926 # the skip message as a text node instead.
2927 t = doc.createElement('testcase')
2927 t = doc.createElement('testcase')
2928 t.setAttribute('name', tc.name)
2928 t.setAttribute('name', tc.name)
2929 binmessage = message.encode('utf-8')
2929 binmessage = message.encode('utf-8')
2930 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2930 message = cdatasafe(binmessage).decode('utf-8', 'replace')
2931 cd = doc.createCDATASection(message)
2931 cd = doc.createCDATASection(message)
2932 skipelem = doc.createElement('skipped')
2932 skipelem = doc.createElement('skipped')
2933 skipelem.appendChild(cd)
2933 skipelem.appendChild(cd)
2934 t.appendChild(skipelem)
2934 t.appendChild(skipelem)
2935 s.appendChild(t)
2935 s.appendChild(t)
2936 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2936 outf.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
2937
2937
2938 @staticmethod
2938 @staticmethod
2939 def _writejson(result, outf):
2939 def _writejson(result, outf):
2940 timesd = {}
2940 timesd = {}
2941 for tdata in result.times:
2941 for tdata in result.times:
2942 test = tdata[0]
2942 test = tdata[0]
2943 timesd[test] = tdata[1:]
2943 timesd[test] = tdata[1:]
2944
2944
2945 outcome = {}
2945 outcome = {}
2946 groups = [
2946 groups = [
2947 ('success', ((tc, None) for tc in result.successes)),
2947 ('success', ((tc, None) for tc in result.successes)),
2948 ('failure', result.failures),
2948 ('failure', result.failures),
2949 ('skip', result.skipped),
2949 ('skip', result.skipped),
2950 ]
2950 ]
2951 for res, testcases in groups:
2951 for res, testcases in groups:
2952 for tc, __ in testcases:
2952 for tc, __ in testcases:
2953 if tc.name in timesd:
2953 if tc.name in timesd:
2954 diff = result.faildata.get(tc.name, b'')
2954 diff = result.faildata.get(tc.name, b'')
2955 try:
2955 try:
2956 diff = diff.decode('unicode_escape')
2956 diff = diff.decode('unicode_escape')
2957 except UnicodeDecodeError as e:
2957 except UnicodeDecodeError as e:
2958 diff = '%r decoding diff, sorry' % e
2958 diff = '%r decoding diff, sorry' % e
2959 tres = {
2959 tres = {
2960 'result': res,
2960 'result': res,
2961 'time': ('%0.3f' % timesd[tc.name][2]),
2961 'time': ('%0.3f' % timesd[tc.name][2]),
2962 'cuser': ('%0.3f' % timesd[tc.name][0]),
2962 'cuser': ('%0.3f' % timesd[tc.name][0]),
2963 'csys': ('%0.3f' % timesd[tc.name][1]),
2963 'csys': ('%0.3f' % timesd[tc.name][1]),
2964 'start': ('%0.3f' % timesd[tc.name][3]),
2964 'start': ('%0.3f' % timesd[tc.name][3]),
2965 'end': ('%0.3f' % timesd[tc.name][4]),
2965 'end': ('%0.3f' % timesd[tc.name][4]),
2966 'diff': diff,
2966 'diff': diff,
2967 }
2967 }
2968 else:
2968 else:
2969 # blacklisted test
2969 # blacklisted test
2970 tres = {'result': res}
2970 tres = {'result': res}
2971
2971
2972 outcome[tc.name] = tres
2972 outcome[tc.name] = tres
2973 jsonout = json.dumps(
2973 jsonout = json.dumps(
2974 outcome, sort_keys=True, indent=4, separators=(',', ': ')
2974 outcome, sort_keys=True, indent=4, separators=(',', ': ')
2975 )
2975 )
2976 outf.writelines(("testreport =", jsonout))
2976 outf.writelines(("testreport =", jsonout))
2977
2977
2978
2978
2979 def sorttests(testdescs, previoustimes, shuffle=False):
2979 def sorttests(testdescs, previoustimes, shuffle=False):
2980 """Do an in-place sort of tests."""
2980 """Do an in-place sort of tests."""
2981 if shuffle:
2981 if shuffle:
2982 random.shuffle(testdescs)
2982 random.shuffle(testdescs)
2983 return
2983 return
2984
2984
2985 if previoustimes:
2985 if previoustimes:
2986
2986
2987 def sortkey(f):
2987 def sortkey(f):
2988 f = f['path']
2988 f = f['path']
2989 if f in previoustimes:
2989 if f in previoustimes:
2990 # Use most recent time as estimate
2990 # Use most recent time as estimate
2991 return -(previoustimes[f][-1])
2991 return -(previoustimes[f][-1])
2992 else:
2992 else:
2993 # Default to a rather arbitrary value of 1 second for new tests
2993 # Default to a rather arbitrary value of 1 second for new tests
2994 return -1.0
2994 return -1.0
2995
2995
2996 else:
2996 else:
2997 # keywords for slow tests
2997 # keywords for slow tests
2998 slow = {
2998 slow = {
2999 b'svn': 10,
2999 b'svn': 10,
3000 b'cvs': 10,
3000 b'cvs': 10,
3001 b'hghave': 10,
3001 b'hghave': 10,
3002 b'largefiles-update': 10,
3002 b'largefiles-update': 10,
3003 b'run-tests': 10,
3003 b'run-tests': 10,
3004 b'corruption': 10,
3004 b'corruption': 10,
3005 b'race': 10,
3005 b'race': 10,
3006 b'i18n': 10,
3006 b'i18n': 10,
3007 b'check': 100,
3007 b'check': 100,
3008 b'gendoc': 100,
3008 b'gendoc': 100,
3009 b'contrib-perf': 200,
3009 b'contrib-perf': 200,
3010 b'merge-combination': 100,
3010 b'merge-combination': 100,
3011 }
3011 }
3012 perf = {}
3012 perf = {}
3013
3013
3014 def sortkey(f):
3014 def sortkey(f):
3015 # run largest tests first, as they tend to take the longest
3015 # run largest tests first, as they tend to take the longest
3016 f = f['path']
3016 f = f['path']
3017 try:
3017 try:
3018 return perf[f]
3018 return perf[f]
3019 except KeyError:
3019 except KeyError:
3020 try:
3020 try:
3021 val = -os.stat(f).st_size
3021 val = -os.stat(f).st_size
3022 except OSError as e:
3022 except OSError as e:
3023 if e.errno != errno.ENOENT:
3023 if e.errno != errno.ENOENT:
3024 raise
3024 raise
3025 perf[f] = -1e9 # file does not exist, tell early
3025 perf[f] = -1e9 # file does not exist, tell early
3026 return -1e9
3026 return -1e9
3027 for kw, mul in slow.items():
3027 for kw, mul in slow.items():
3028 if kw in f:
3028 if kw in f:
3029 val *= mul
3029 val *= mul
3030 if f.endswith(b'.py'):
3030 if f.endswith(b'.py'):
3031 val /= 10.0
3031 val /= 10.0
3032 perf[f] = val / 1000.0
3032 perf[f] = val / 1000.0
3033 return perf[f]
3033 return perf[f]
3034
3034
3035 testdescs.sort(key=sortkey)
3035 testdescs.sort(key=sortkey)
3036
3036
3037
3037
3038 class TestRunner(object):
3038 class TestRunner(object):
3039 """Holds context for executing tests.
3039 """Holds context for executing tests.
3040
3040
3041 Tests rely on a lot of state. This object holds it for them.
3041 Tests rely on a lot of state. This object holds it for them.
3042 """
3042 """
3043
3043
3044 # Programs required to run tests.
3044 # Programs required to run tests.
3045 REQUIREDTOOLS = [
3045 REQUIREDTOOLS = [
3046 b'diff',
3046 b'diff',
3047 b'grep',
3047 b'grep',
3048 b'unzip',
3048 b'unzip',
3049 b'gunzip',
3049 b'gunzip',
3050 b'bunzip2',
3050 b'bunzip2',
3051 b'sed',
3051 b'sed',
3052 ]
3052 ]
3053
3053
3054 # Maps file extensions to test class.
3054 # Maps file extensions to test class.
3055 TESTTYPES = [
3055 TESTTYPES = [
3056 (b'.py', PythonTest),
3056 (b'.py', PythonTest),
3057 (b'.t', TTest),
3057 (b'.t', TTest),
3058 ]
3058 ]
3059
3059
3060 def __init__(self):
3060 def __init__(self):
3061 self.options = None
3061 self.options = None
3062 self._hgroot = None
3062 self._hgroot = None
3063 self._testdir = None
3063 self._testdir = None
3064 self._outputdir = None
3064 self._outputdir = None
3065 self._hgtmp = None
3065 self._hgtmp = None
3066 self._installdir = None
3066 self._installdir = None
3067 self._bindir = None
3067 self._bindir = None
3068 # a place for run-tests.py to generate executable it needs
3068 # a place for run-tests.py to generate executable it needs
3069 self._custom_bin_dir = None
3069 self._custom_bin_dir = None
3070 self._pythondir = None
3070 self._pythondir = None
3071 # True if we had to infer the pythondir from --with-hg
3071 # True if we had to infer the pythondir from --with-hg
3072 self._pythondir_inferred = False
3072 self._pythondir_inferred = False
3073 self._coveragefile = None
3073 self._coveragefile = None
3074 self._createdfiles = []
3074 self._createdfiles = []
3075 self._hgcommand = None
3075 self._hgcommand = None
3076 self._hgpath = None
3076 self._hgpath = None
3077 self._portoffset = 0
3077 self._portoffset = 0
3078 self._ports = {}
3078 self._ports = {}
3079
3079
3080 def run(self, args, parser=None):
3080 def run(self, args, parser=None):
3081 """Run the test suite."""
3081 """Run the test suite."""
3082 oldmask = os.umask(0o22)
3082 oldmask = os.umask(0o22)
3083 try:
3083 try:
3084 parser = parser or getparser()
3084 parser = parser or getparser()
3085 options = parseargs(args, parser)
3085 options = parseargs(args, parser)
3086 tests = [_sys2bytes(a) for a in options.tests]
3086 tests = [_sys2bytes(a) for a in options.tests]
3087 if options.test_list is not None:
3087 if options.test_list is not None:
3088 for listfile in options.test_list:
3088 for listfile in options.test_list:
3089 with open(listfile, 'rb') as f:
3089 with open(listfile, 'rb') as f:
3090 tests.extend(t for t in f.read().splitlines() if t)
3090 tests.extend(t for t in f.read().splitlines() if t)
3091 self.options = options
3091 self.options = options
3092
3092
3093 self._checktools()
3093 self._checktools()
3094 testdescs = self.findtests(tests)
3094 testdescs = self.findtests(tests)
3095 if options.profile_runner:
3095 if options.profile_runner:
3096 import statprof
3096 import statprof
3097
3097
3098 statprof.start()
3098 statprof.start()
3099 result = self._run(testdescs)
3099 result = self._run(testdescs)
3100 if options.profile_runner:
3100 if options.profile_runner:
3101 statprof.stop()
3101 statprof.stop()
3102 statprof.display()
3102 statprof.display()
3103 return result
3103 return result
3104
3104
3105 finally:
3105 finally:
3106 os.umask(oldmask)
3106 os.umask(oldmask)
3107
3107
3108 def _run(self, testdescs):
3108 def _run(self, testdescs):
3109 testdir = getcwdb()
3109 testdir = getcwdb()
3110 # assume all tests in same folder for now
3110 # assume all tests in same folder for now
3111 if testdescs:
3111 if testdescs:
3112 pathname = os.path.dirname(testdescs[0]['path'])
3112 pathname = os.path.dirname(testdescs[0]['path'])
3113 if pathname:
3113 if pathname:
3114 testdir = os.path.join(testdir, pathname)
3114 testdir = os.path.join(testdir, pathname)
3115 self._testdir = osenvironb[b'TESTDIR'] = testdir
3115 self._testdir = osenvironb[b'TESTDIR'] = testdir
3116 if self.options.outputdir:
3116 if self.options.outputdir:
3117 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3117 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3118 else:
3118 else:
3119 self._outputdir = getcwdb()
3119 self._outputdir = getcwdb()
3120 if testdescs and pathname:
3120 if testdescs and pathname:
3121 self._outputdir = os.path.join(self._outputdir, pathname)
3121 self._outputdir = os.path.join(self._outputdir, pathname)
3122 previoustimes = {}
3122 previoustimes = {}
3123 if self.options.order_by_runtime:
3123 if self.options.order_by_runtime:
3124 previoustimes = dict(loadtimes(self._outputdir))
3124 previoustimes = dict(loadtimes(self._outputdir))
3125 sorttests(testdescs, previoustimes, shuffle=self.options.random)
3125 sorttests(testdescs, previoustimes, shuffle=self.options.random)
3126
3126
3127 if 'PYTHONHASHSEED' not in os.environ:
3127 if 'PYTHONHASHSEED' not in os.environ:
3128 # use a random python hash seed all the time
3128 # use a random python hash seed all the time
3129 # we do the randomness ourself to know what seed is used
3129 # we do the randomness ourself to know what seed is used
3130 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
3130 os.environ['PYTHONHASHSEED'] = str(random.getrandbits(32))
3131
3131
3132 # Rayon (Rust crate for multi-threading) will use all logical CPU cores
3132 # Rayon (Rust crate for multi-threading) will use all logical CPU cores
3133 # by default, causing thrashing on high-cpu-count systems.
3133 # by default, causing thrashing on high-cpu-count systems.
3134 # Setting its limit to 3 during tests should still let us uncover
3134 # Setting its limit to 3 during tests should still let us uncover
3135 # multi-threading bugs while keeping the thrashing reasonable.
3135 # multi-threading bugs while keeping the thrashing reasonable.
3136 os.environ.setdefault("RAYON_NUM_THREADS", "3")
3136 os.environ.setdefault("RAYON_NUM_THREADS", "3")
3137
3137
3138 if self.options.tmpdir:
3138 if self.options.tmpdir:
3139 self.options.keep_tmpdir = True
3139 self.options.keep_tmpdir = True
3140 tmpdir = _sys2bytes(self.options.tmpdir)
3140 tmpdir = _sys2bytes(self.options.tmpdir)
3141 if os.path.exists(tmpdir):
3141 if os.path.exists(tmpdir):
3142 # Meaning of tmpdir has changed since 1.3: we used to create
3142 # Meaning of tmpdir has changed since 1.3: we used to create
3143 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
3143 # HGTMP inside tmpdir; now HGTMP is tmpdir. So fail if
3144 # tmpdir already exists.
3144 # tmpdir already exists.
3145 print("error: temp dir %r already exists" % tmpdir)
3145 print("error: temp dir %r already exists" % tmpdir)
3146 return 1
3146 return 1
3147
3147
3148 os.makedirs(tmpdir)
3148 os.makedirs(tmpdir)
3149 else:
3149 else:
3150 d = None
3150 d = None
3151 if WINDOWS:
3151 if WINDOWS:
3152 # without this, we get the default temp dir location, but
3152 # without this, we get the default temp dir location, but
3153 # in all lowercase, which causes troubles with paths (issue3490)
3153 # in all lowercase, which causes troubles with paths (issue3490)
3154 d = osenvironb.get(b'TMP', None)
3154 d = osenvironb.get(b'TMP', None)
3155 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
3155 tmpdir = tempfile.mkdtemp(b'', b'hgtests.', d)
3156
3156
3157 self._hgtmp = osenvironb[b'HGTMP'] = os.path.realpath(tmpdir)
3157 self._hgtmp = osenvironb[b'HGTMP'] = os.path.realpath(tmpdir)
3158
3158
3159 self._custom_bin_dir = os.path.join(self._hgtmp, b'custom-bin')
3159 self._custom_bin_dir = os.path.join(self._hgtmp, b'custom-bin')
3160 os.makedirs(self._custom_bin_dir)
3160 os.makedirs(self._custom_bin_dir)
3161
3161
3162 if self.options.with_hg:
3162 if self.options.with_hg:
3163 self._installdir = None
3163 self._installdir = None
3164 whg = self.options.with_hg
3164 whg = self.options.with_hg
3165 self._bindir = os.path.dirname(os.path.realpath(whg))
3165 self._bindir = os.path.dirname(os.path.realpath(whg))
3166 assert isinstance(self._bindir, bytes)
3166 assert isinstance(self._bindir, bytes)
3167 self._hgcommand = os.path.basename(whg)
3167 self._hgcommand = os.path.basename(whg)
3168
3168
3169 normbin = os.path.normpath(os.path.abspath(whg))
3169 normbin = os.path.normpath(os.path.abspath(whg))
3170 normbin = normbin.replace(_sys2bytes(os.sep), b'/')
3170 normbin = normbin.replace(_sys2bytes(os.sep), b'/')
3171
3171
3172 # Other Python scripts in the test harness need to
3172 # Other Python scripts in the test harness need to
3173 # `import mercurial`. If `hg` is a Python script, we assume
3173 # `import mercurial`. If `hg` is a Python script, we assume
3174 # the Mercurial modules are relative to its path and tell the tests
3174 # the Mercurial modules are relative to its path and tell the tests
3175 # to load Python modules from its directory.
3175 # to load Python modules from its directory.
3176 with open(whg, 'rb') as fh:
3176 with open(whg, 'rb') as fh:
3177 initial = fh.read(1024)
3177 initial = fh.read(1024)
3178
3178
3179 if re.match(b'#!.*python', initial):
3179 if re.match(b'#!.*python', initial):
3180 self._pythondir = self._bindir
3180 self._pythondir = self._bindir
3181 # If it looks like our in-repo Rust binary, use the source root.
3181 # If it looks like our in-repo Rust binary, use the source root.
3182 # This is a bit hacky. But rhg is still not supported outside the
3182 # This is a bit hacky. But rhg is still not supported outside the
3183 # source directory. So until it is, do the simple thing.
3183 # source directory. So until it is, do the simple thing.
3184 elif re.search(b'/rust/target/[^/]+/hg', normbin):
3184 elif re.search(b'/rust/target/[^/]+/hg', normbin):
3185 self._pythondir = os.path.dirname(self._testdir)
3185 self._pythondir = os.path.dirname(self._testdir)
3186 # Fall back to the legacy behavior.
3186 # Fall back to the legacy behavior.
3187 else:
3187 else:
3188 self._pythondir = self._bindir
3188 self._pythondir = self._bindir
3189 self._pythondir_inferred = True
3189 self._pythondir_inferred = True
3190
3190
3191 else:
3191 else:
3192 self._installdir = os.path.join(self._hgtmp, b"install")
3192 self._installdir = os.path.join(self._hgtmp, b"install")
3193 self._bindir = os.path.join(self._installdir, b"bin")
3193 self._bindir = os.path.join(self._installdir, b"bin")
3194 self._hgcommand = b'hg'
3194 self._hgcommand = b'hg'
3195 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
3195 self._pythondir = os.path.join(self._installdir, b"lib", b"python")
3196
3196
3197 # Force the use of hg.exe instead of relying on MSYS to recognize hg is
3197 # Force the use of hg.exe instead of relying on MSYS to recognize hg is
3198 # a python script and feed it to python.exe. Legacy stdio is force
3198 # a python script and feed it to python.exe. Legacy stdio is force
3199 # enabled by hg.exe, and this is a more realistic way to launch hg
3199 # enabled by hg.exe, and this is a more realistic way to launch hg
3200 # anyway.
3200 # anyway.
3201 if WINDOWS and not self._hgcommand.endswith(b'.exe'):
3201 if WINDOWS and not self._hgcommand.endswith(b'.exe'):
3202 self._hgcommand += b'.exe'
3202 self._hgcommand += b'.exe'
3203
3203
3204 real_hg = os.path.join(self._bindir, self._hgcommand)
3204 real_hg = os.path.join(self._bindir, self._hgcommand)
3205 osenvironb[b'HGTEST_REAL_HG'] = real_hg
3205 osenvironb[b'HGTEST_REAL_HG'] = real_hg
3206 # set CHGHG, then replace "hg" command by "chg"
3206 # set CHGHG, then replace "hg" command by "chg"
3207 chgbindir = self._bindir
3207 chgbindir = self._bindir
3208 if self.options.chg or self.options.with_chg:
3208 if self.options.chg or self.options.with_chg:
3209 osenvironb[b'CHG_INSTALLED_AS_HG'] = b'1'
3209 osenvironb[b'CHG_INSTALLED_AS_HG'] = b'1'
3210 osenvironb[b'CHGHG'] = real_hg
3210 osenvironb[b'CHGHG'] = real_hg
3211 else:
3211 else:
3212 # drop flag for hghave
3212 # drop flag for hghave
3213 osenvironb.pop(b'CHG_INSTALLED_AS_HG', None)
3213 osenvironb.pop(b'CHG_INSTALLED_AS_HG', None)
3214 if self.options.chg:
3214 if self.options.chg:
3215 self._hgcommand = b'chg'
3215 self._hgcommand = b'chg'
3216 elif self.options.with_chg:
3216 elif self.options.with_chg:
3217 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
3217 chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg))
3218 self._hgcommand = os.path.basename(self.options.with_chg)
3218 self._hgcommand = os.path.basename(self.options.with_chg)
3219
3219
3220 # configure fallback and replace "hg" command by "rhg"
3220 # configure fallback and replace "hg" command by "rhg"
3221 rhgbindir = self._bindir
3221 rhgbindir = self._bindir
3222 if self.options.rhg or self.options.with_rhg:
3222 if self.options.rhg or self.options.with_rhg:
3223 # Affects hghave.py
3223 # Affects hghave.py
3224 osenvironb[b'RHG_INSTALLED_AS_HG'] = b'1'
3224 osenvironb[b'RHG_INSTALLED_AS_HG'] = b'1'
3225 # Affects configuration. Alternatives would be setting configuration through
3225 # Affects configuration. Alternatives would be setting configuration through
3226 # `$HGRCPATH` but some tests override that, or changing `_hgcommand` to include
3226 # `$HGRCPATH` but some tests override that, or changing `_hgcommand` to include
3227 # `--config` but that disrupts tests that print command lines and check expected
3227 # `--config` but that disrupts tests that print command lines and check expected
3228 # output.
3228 # output.
3229 osenvironb[b'RHG_ON_UNSUPPORTED'] = b'fallback'
3229 osenvironb[b'RHG_ON_UNSUPPORTED'] = b'fallback'
3230 osenvironb[b'RHG_FALLBACK_EXECUTABLE'] = real_hg
3230 osenvironb[b'RHG_FALLBACK_EXECUTABLE'] = real_hg
3231 osenvironb[b'RHG_STATUS'] = b'1'
3232 else:
3231 else:
3233 # drop flag for hghave
3232 # drop flag for hghave
3234 osenvironb.pop(b'RHG_INSTALLED_AS_HG', None)
3233 osenvironb.pop(b'RHG_INSTALLED_AS_HG', None)
3235 if self.options.rhg:
3234 if self.options.rhg:
3236 self._hgcommand = b'rhg'
3235 self._hgcommand = b'rhg'
3237 elif self.options.with_rhg:
3236 elif self.options.with_rhg:
3238 rhgbindir = os.path.dirname(os.path.realpath(self.options.with_rhg))
3237 rhgbindir = os.path.dirname(os.path.realpath(self.options.with_rhg))
3239 self._hgcommand = os.path.basename(self.options.with_rhg)
3238 self._hgcommand = os.path.basename(self.options.with_rhg)
3240
3239
3241 if self.options.pyoxidized:
3240 if self.options.pyoxidized:
3242 testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
3241 testdir = os.path.dirname(_sys2bytes(canonpath(sys.argv[0])))
3243 reporootdir = os.path.dirname(testdir)
3242 reporootdir = os.path.dirname(testdir)
3244 # XXX we should ideally install stuff instead of using the local build
3243 # XXX we should ideally install stuff instead of using the local build
3245 bin_path = (
3244 bin_path = (
3246 b'build/pyoxidizer/x86_64-pc-windows-msvc/release/app/hg.exe'
3245 b'build/pyoxidizer/x86_64-pc-windows-msvc/release/app/hg.exe'
3247 )
3246 )
3248 full_path = os.path.join(reporootdir, bin_path)
3247 full_path = os.path.join(reporootdir, bin_path)
3249 self._hgcommand = full_path
3248 self._hgcommand = full_path
3250 # Affects hghave.py
3249 # Affects hghave.py
3251 osenvironb[b'PYOXIDIZED_INSTALLED_AS_HG'] = b'1'
3250 osenvironb[b'PYOXIDIZED_INSTALLED_AS_HG'] = b'1'
3252 else:
3251 else:
3253 osenvironb.pop(b'PYOXIDIZED_INSTALLED_AS_HG', None)
3252 osenvironb.pop(b'PYOXIDIZED_INSTALLED_AS_HG', None)
3254
3253
3255 osenvironb[b"BINDIR"] = self._bindir
3254 osenvironb[b"BINDIR"] = self._bindir
3256 osenvironb[b"PYTHON"] = PYTHON
3255 osenvironb[b"PYTHON"] = PYTHON
3257
3256
3258 fileb = _sys2bytes(__file__)
3257 fileb = _sys2bytes(__file__)
3259 runtestdir = os.path.abspath(os.path.dirname(fileb))
3258 runtestdir = os.path.abspath(os.path.dirname(fileb))
3260 osenvironb[b'RUNTESTDIR'] = runtestdir
3259 osenvironb[b'RUNTESTDIR'] = runtestdir
3261 if PYTHON3:
3260 if PYTHON3:
3262 sepb = _sys2bytes(os.pathsep)
3261 sepb = _sys2bytes(os.pathsep)
3263 else:
3262 else:
3264 sepb = os.pathsep
3263 sepb = os.pathsep
3265 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
3264 path = [self._bindir, runtestdir] + osenvironb[b"PATH"].split(sepb)
3266 if os.path.islink(__file__):
3265 if os.path.islink(__file__):
3267 # test helper will likely be at the end of the symlink
3266 # test helper will likely be at the end of the symlink
3268 realfile = os.path.realpath(fileb)
3267 realfile = os.path.realpath(fileb)
3269 realdir = os.path.abspath(os.path.dirname(realfile))
3268 realdir = os.path.abspath(os.path.dirname(realfile))
3270 path.insert(2, realdir)
3269 path.insert(2, realdir)
3271 if chgbindir != self._bindir:
3270 if chgbindir != self._bindir:
3272 path.insert(1, chgbindir)
3271 path.insert(1, chgbindir)
3273 if rhgbindir != self._bindir:
3272 if rhgbindir != self._bindir:
3274 path.insert(1, rhgbindir)
3273 path.insert(1, rhgbindir)
3275 if self._testdir != runtestdir:
3274 if self._testdir != runtestdir:
3276 path = [self._testdir] + path
3275 path = [self._testdir] + path
3277 path = [self._custom_bin_dir] + path
3276 path = [self._custom_bin_dir] + path
3278 osenvironb[b"PATH"] = sepb.join(path)
3277 osenvironb[b"PATH"] = sepb.join(path)
3279
3278
3280 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
3279 # Include TESTDIR in PYTHONPATH so that out-of-tree extensions
3281 # can run .../tests/run-tests.py test-foo where test-foo
3280 # can run .../tests/run-tests.py test-foo where test-foo
3282 # adds an extension to HGRC. Also include run-test.py directory to
3281 # adds an extension to HGRC. Also include run-test.py directory to
3283 # import modules like heredoctest.
3282 # import modules like heredoctest.
3284 pypath = [self._pythondir, self._testdir, runtestdir]
3283 pypath = [self._pythondir, self._testdir, runtestdir]
3285 # We have to augment PYTHONPATH, rather than simply replacing
3284 # We have to augment PYTHONPATH, rather than simply replacing
3286 # it, in case external libraries are only available via current
3285 # it, in case external libraries are only available via current
3287 # PYTHONPATH. (In particular, the Subversion bindings on OS X
3286 # PYTHONPATH. (In particular, the Subversion bindings on OS X
3288 # are in /opt/subversion.)
3287 # are in /opt/subversion.)
3289 oldpypath = osenvironb.get(IMPL_PATH)
3288 oldpypath = osenvironb.get(IMPL_PATH)
3290 if oldpypath:
3289 if oldpypath:
3291 pypath.append(oldpypath)
3290 pypath.append(oldpypath)
3292 osenvironb[IMPL_PATH] = sepb.join(pypath)
3291 osenvironb[IMPL_PATH] = sepb.join(pypath)
3293
3292
3294 if self.options.pure:
3293 if self.options.pure:
3295 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
3294 os.environ["HGTEST_RUN_TESTS_PURE"] = "--pure"
3296 os.environ["HGMODULEPOLICY"] = "py"
3295 os.environ["HGMODULEPOLICY"] = "py"
3297 if self.options.rust:
3296 if self.options.rust:
3298 os.environ["HGMODULEPOLICY"] = "rust+c"
3297 os.environ["HGMODULEPOLICY"] = "rust+c"
3299 if self.options.no_rust:
3298 if self.options.no_rust:
3300 current_policy = os.environ.get("HGMODULEPOLICY", "")
3299 current_policy = os.environ.get("HGMODULEPOLICY", "")
3301 if current_policy.startswith("rust+"):
3300 if current_policy.startswith("rust+"):
3302 os.environ["HGMODULEPOLICY"] = current_policy[len("rust+") :]
3301 os.environ["HGMODULEPOLICY"] = current_policy[len("rust+") :]
3303 os.environ.pop("HGWITHRUSTEXT", None)
3302 os.environ.pop("HGWITHRUSTEXT", None)
3304
3303
3305 if self.options.allow_slow_tests:
3304 if self.options.allow_slow_tests:
3306 os.environ["HGTEST_SLOW"] = "slow"
3305 os.environ["HGTEST_SLOW"] = "slow"
3307 elif 'HGTEST_SLOW' in os.environ:
3306 elif 'HGTEST_SLOW' in os.environ:
3308 del os.environ['HGTEST_SLOW']
3307 del os.environ['HGTEST_SLOW']
3309
3308
3310 self._coveragefile = os.path.join(self._testdir, b'.coverage')
3309 self._coveragefile = os.path.join(self._testdir, b'.coverage')
3311
3310
3312 if self.options.exceptions:
3311 if self.options.exceptions:
3313 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
3312 exceptionsdir = os.path.join(self._outputdir, b'exceptions')
3314 try:
3313 try:
3315 os.makedirs(exceptionsdir)
3314 os.makedirs(exceptionsdir)
3316 except OSError as e:
3315 except OSError as e:
3317 if e.errno != errno.EEXIST:
3316 if e.errno != errno.EEXIST:
3318 raise
3317 raise
3319
3318
3320 # Remove all existing exception reports.
3319 # Remove all existing exception reports.
3321 for f in os.listdir(exceptionsdir):
3320 for f in os.listdir(exceptionsdir):
3322 os.unlink(os.path.join(exceptionsdir, f))
3321 os.unlink(os.path.join(exceptionsdir, f))
3323
3322
3324 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
3323 osenvironb[b'HGEXCEPTIONSDIR'] = exceptionsdir
3325 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
3324 logexceptions = os.path.join(self._testdir, b'logexceptions.py')
3326 self.options.extra_config_opt.append(
3325 self.options.extra_config_opt.append(
3327 'extensions.logexceptions=%s' % logexceptions.decode('utf-8')
3326 'extensions.logexceptions=%s' % logexceptions.decode('utf-8')
3328 )
3327 )
3329
3328
3330 vlog("# Using TESTDIR", _bytes2sys(self._testdir))
3329 vlog("# Using TESTDIR", _bytes2sys(self._testdir))
3331 vlog("# Using RUNTESTDIR", _bytes2sys(osenvironb[b'RUNTESTDIR']))
3330 vlog("# Using RUNTESTDIR", _bytes2sys(osenvironb[b'RUNTESTDIR']))
3332 vlog("# Using HGTMP", _bytes2sys(self._hgtmp))
3331 vlog("# Using HGTMP", _bytes2sys(self._hgtmp))
3333 vlog("# Using PATH", os.environ["PATH"])
3332 vlog("# Using PATH", os.environ["PATH"])
3334 vlog(
3333 vlog(
3335 "# Using",
3334 "# Using",
3336 _bytes2sys(IMPL_PATH),
3335 _bytes2sys(IMPL_PATH),
3337 _bytes2sys(osenvironb[IMPL_PATH]),
3336 _bytes2sys(osenvironb[IMPL_PATH]),
3338 )
3337 )
3339 vlog("# Writing to directory", _bytes2sys(self._outputdir))
3338 vlog("# Writing to directory", _bytes2sys(self._outputdir))
3340
3339
3341 try:
3340 try:
3342 return self._runtests(testdescs) or 0
3341 return self._runtests(testdescs) or 0
3343 finally:
3342 finally:
3344 time.sleep(0.1)
3343 time.sleep(0.1)
3345 self._cleanup()
3344 self._cleanup()
3346
3345
3347 def findtests(self, args):
3346 def findtests(self, args):
3348 """Finds possible test files from arguments.
3347 """Finds possible test files from arguments.
3349
3348
3350 If you wish to inject custom tests into the test harness, this would
3349 If you wish to inject custom tests into the test harness, this would
3351 be a good function to monkeypatch or override in a derived class.
3350 be a good function to monkeypatch or override in a derived class.
3352 """
3351 """
3353 if not args:
3352 if not args:
3354 if self.options.changed:
3353 if self.options.changed:
3355 proc = Popen4(
3354 proc = Popen4(
3356 b'hg st --rev "%s" -man0 .'
3355 b'hg st --rev "%s" -man0 .'
3357 % _sys2bytes(self.options.changed),
3356 % _sys2bytes(self.options.changed),
3358 None,
3357 None,
3359 0,
3358 0,
3360 )
3359 )
3361 stdout, stderr = proc.communicate()
3360 stdout, stderr = proc.communicate()
3362 args = stdout.strip(b'\0').split(b'\0')
3361 args = stdout.strip(b'\0').split(b'\0')
3363 else:
3362 else:
3364 args = os.listdir(b'.')
3363 args = os.listdir(b'.')
3365
3364
3366 expanded_args = []
3365 expanded_args = []
3367 for arg in args:
3366 for arg in args:
3368 if os.path.isdir(arg):
3367 if os.path.isdir(arg):
3369 if not arg.endswith(b'/'):
3368 if not arg.endswith(b'/'):
3370 arg += b'/'
3369 arg += b'/'
3371 expanded_args.extend([arg + a for a in os.listdir(arg)])
3370 expanded_args.extend([arg + a for a in os.listdir(arg)])
3372 else:
3371 else:
3373 expanded_args.append(arg)
3372 expanded_args.append(arg)
3374 args = expanded_args
3373 args = expanded_args
3375
3374
3376 testcasepattern = re.compile(br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-.#]+))')
3375 testcasepattern = re.compile(br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-.#]+))')
3377 tests = []
3376 tests = []
3378 for t in args:
3377 for t in args:
3379 case = []
3378 case = []
3380
3379
3381 if not (
3380 if not (
3382 os.path.basename(t).startswith(b'test-')
3381 os.path.basename(t).startswith(b'test-')
3383 and (t.endswith(b'.py') or t.endswith(b'.t'))
3382 and (t.endswith(b'.py') or t.endswith(b'.t'))
3384 ):
3383 ):
3385
3384
3386 m = testcasepattern.match(os.path.basename(t))
3385 m = testcasepattern.match(os.path.basename(t))
3387 if m is not None:
3386 if m is not None:
3388 t_basename, casestr = m.groups()
3387 t_basename, casestr = m.groups()
3389 t = os.path.join(os.path.dirname(t), t_basename)
3388 t = os.path.join(os.path.dirname(t), t_basename)
3390 if casestr:
3389 if casestr:
3391 case = casestr.split(b'#')
3390 case = casestr.split(b'#')
3392 else:
3391 else:
3393 continue
3392 continue
3394
3393
3395 if t.endswith(b'.t'):
3394 if t.endswith(b'.t'):
3396 # .t file may contain multiple test cases
3395 # .t file may contain multiple test cases
3397 casedimensions = parsettestcases(t)
3396 casedimensions = parsettestcases(t)
3398 if casedimensions:
3397 if casedimensions:
3399 cases = []
3398 cases = []
3400
3399
3401 def addcases(case, casedimensions):
3400 def addcases(case, casedimensions):
3402 if not casedimensions:
3401 if not casedimensions:
3403 cases.append(case)
3402 cases.append(case)
3404 else:
3403 else:
3405 for c in casedimensions[0]:
3404 for c in casedimensions[0]:
3406 addcases(case + [c], casedimensions[1:])
3405 addcases(case + [c], casedimensions[1:])
3407
3406
3408 addcases([], casedimensions)
3407 addcases([], casedimensions)
3409 if case and case in cases:
3408 if case and case in cases:
3410 cases = [case]
3409 cases = [case]
3411 elif case:
3410 elif case:
3412 # Ignore invalid cases
3411 # Ignore invalid cases
3413 cases = []
3412 cases = []
3414 else:
3413 else:
3415 pass
3414 pass
3416 tests += [{'path': t, 'case': c} for c in sorted(cases)]
3415 tests += [{'path': t, 'case': c} for c in sorted(cases)]
3417 else:
3416 else:
3418 tests.append({'path': t})
3417 tests.append({'path': t})
3419 else:
3418 else:
3420 tests.append({'path': t})
3419 tests.append({'path': t})
3421
3420
3422 if self.options.retest:
3421 if self.options.retest:
3423 retest_args = []
3422 retest_args = []
3424 for test in tests:
3423 for test in tests:
3425 errpath = self._geterrpath(test)
3424 errpath = self._geterrpath(test)
3426 if os.path.exists(errpath):
3425 if os.path.exists(errpath):
3427 retest_args.append(test)
3426 retest_args.append(test)
3428 tests = retest_args
3427 tests = retest_args
3429 return tests
3428 return tests
3430
3429
3431 def _runtests(self, testdescs):
3430 def _runtests(self, testdescs):
3432 def _reloadtest(test, i):
3431 def _reloadtest(test, i):
3433 # convert a test back to its description dict
3432 # convert a test back to its description dict
3434 desc = {'path': test.path}
3433 desc = {'path': test.path}
3435 case = getattr(test, '_case', [])
3434 case = getattr(test, '_case', [])
3436 if case:
3435 if case:
3437 desc['case'] = case
3436 desc['case'] = case
3438 return self._gettest(desc, i)
3437 return self._gettest(desc, i)
3439
3438
3440 try:
3439 try:
3441 if self.options.restart:
3440 if self.options.restart:
3442 orig = list(testdescs)
3441 orig = list(testdescs)
3443 while testdescs:
3442 while testdescs:
3444 desc = testdescs[0]
3443 desc = testdescs[0]
3445 errpath = self._geterrpath(desc)
3444 errpath = self._geterrpath(desc)
3446 if os.path.exists(errpath):
3445 if os.path.exists(errpath):
3447 break
3446 break
3448 testdescs.pop(0)
3447 testdescs.pop(0)
3449 if not testdescs:
3448 if not testdescs:
3450 print("running all tests")
3449 print("running all tests")
3451 testdescs = orig
3450 testdescs = orig
3452
3451
3453 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
3452 tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
3454 num_tests = len(tests) * self.options.runs_per_test
3453 num_tests = len(tests) * self.options.runs_per_test
3455
3454
3456 jobs = min(num_tests, self.options.jobs)
3455 jobs = min(num_tests, self.options.jobs)
3457
3456
3458 failed = False
3457 failed = False
3459 kws = self.options.keywords
3458 kws = self.options.keywords
3460 if kws is not None and PYTHON3:
3459 if kws is not None and PYTHON3:
3461 kws = kws.encode('utf-8')
3460 kws = kws.encode('utf-8')
3462
3461
3463 suite = TestSuite(
3462 suite = TestSuite(
3464 self._testdir,
3463 self._testdir,
3465 jobs=jobs,
3464 jobs=jobs,
3466 whitelist=self.options.whitelisted,
3465 whitelist=self.options.whitelisted,
3467 blacklist=self.options.blacklist,
3466 blacklist=self.options.blacklist,
3468 keywords=kws,
3467 keywords=kws,
3469 loop=self.options.loop,
3468 loop=self.options.loop,
3470 runs_per_test=self.options.runs_per_test,
3469 runs_per_test=self.options.runs_per_test,
3471 showchannels=self.options.showchannels,
3470 showchannels=self.options.showchannels,
3472 tests=tests,
3471 tests=tests,
3473 loadtest=_reloadtest,
3472 loadtest=_reloadtest,
3474 )
3473 )
3475 verbosity = 1
3474 verbosity = 1
3476 if self.options.list_tests:
3475 if self.options.list_tests:
3477 verbosity = 0
3476 verbosity = 0
3478 elif self.options.verbose:
3477 elif self.options.verbose:
3479 verbosity = 2
3478 verbosity = 2
3480 runner = TextTestRunner(self, verbosity=verbosity)
3479 runner = TextTestRunner(self, verbosity=verbosity)
3481
3480
3482 if self.options.list_tests:
3481 if self.options.list_tests:
3483 result = runner.listtests(suite)
3482 result = runner.listtests(suite)
3484 else:
3483 else:
3485 self._usecorrectpython()
3484 self._usecorrectpython()
3486 if self._installdir:
3485 if self._installdir:
3487 self._installhg()
3486 self._installhg()
3488 self._checkhglib("Testing")
3487 self._checkhglib("Testing")
3489 if self.options.chg:
3488 if self.options.chg:
3490 assert self._installdir
3489 assert self._installdir
3491 self._installchg()
3490 self._installchg()
3492 if self.options.rhg:
3491 if self.options.rhg:
3493 assert self._installdir
3492 assert self._installdir
3494 self._installrhg()
3493 self._installrhg()
3495 elif self.options.pyoxidized:
3494 elif self.options.pyoxidized:
3496 self._build_pyoxidized()
3495 self._build_pyoxidized()
3497 self._use_correct_mercurial()
3496 self._use_correct_mercurial()
3498
3497
3499 log(
3498 log(
3500 'running %d tests using %d parallel processes'
3499 'running %d tests using %d parallel processes'
3501 % (num_tests, jobs)
3500 % (num_tests, jobs)
3502 )
3501 )
3503
3502
3504 result = runner.run(suite)
3503 result = runner.run(suite)
3505
3504
3506 if result.failures or result.errors:
3505 if result.failures or result.errors:
3507 failed = True
3506 failed = True
3508
3507
3509 result.onEnd()
3508 result.onEnd()
3510
3509
3511 if self.options.anycoverage:
3510 if self.options.anycoverage:
3512 self._outputcoverage()
3511 self._outputcoverage()
3513 except KeyboardInterrupt:
3512 except KeyboardInterrupt:
3514 failed = True
3513 failed = True
3515 print("\ninterrupted!")
3514 print("\ninterrupted!")
3516
3515
3517 if failed:
3516 if failed:
3518 return 1
3517 return 1
3519
3518
3520 def _geterrpath(self, test):
3519 def _geterrpath(self, test):
3521 # test['path'] is a relative path
3520 # test['path'] is a relative path
3522 if 'case' in test:
3521 if 'case' in test:
3523 # for multiple dimensions test cases
3522 # for multiple dimensions test cases
3524 casestr = b'#'.join(test['case'])
3523 casestr = b'#'.join(test['case'])
3525 errpath = b'%s#%s.err' % (test['path'], casestr)
3524 errpath = b'%s#%s.err' % (test['path'], casestr)
3526 else:
3525 else:
3527 errpath = b'%s.err' % test['path']
3526 errpath = b'%s.err' % test['path']
3528 if self.options.outputdir:
3527 if self.options.outputdir:
3529 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3528 self._outputdir = canonpath(_sys2bytes(self.options.outputdir))
3530 errpath = os.path.join(self._outputdir, errpath)
3529 errpath = os.path.join(self._outputdir, errpath)
3531 return errpath
3530 return errpath
3532
3531
3533 def _getport(self, count):
3532 def _getport(self, count):
3534 port = self._ports.get(count) # do we have a cached entry?
3533 port = self._ports.get(count) # do we have a cached entry?
3535 if port is None:
3534 if port is None:
3536 portneeded = 3
3535 portneeded = 3
3537 # above 100 tries we just give up and let test reports failure
3536 # above 100 tries we just give up and let test reports failure
3538 for tries in xrange(100):
3537 for tries in xrange(100):
3539 allfree = True
3538 allfree = True
3540 port = self.options.port + self._portoffset
3539 port = self.options.port + self._portoffset
3541 for idx in xrange(portneeded):
3540 for idx in xrange(portneeded):
3542 if not checkportisavailable(port + idx):
3541 if not checkportisavailable(port + idx):
3543 allfree = False
3542 allfree = False
3544 break
3543 break
3545 self._portoffset += portneeded
3544 self._portoffset += portneeded
3546 if allfree:
3545 if allfree:
3547 break
3546 break
3548 self._ports[count] = port
3547 self._ports[count] = port
3549 return port
3548 return port
3550
3549
3551 def _gettest(self, testdesc, count):
3550 def _gettest(self, testdesc, count):
3552 """Obtain a Test by looking at its filename.
3551 """Obtain a Test by looking at its filename.
3553
3552
3554 Returns a Test instance. The Test may not be runnable if it doesn't
3553 Returns a Test instance. The Test may not be runnable if it doesn't
3555 map to a known type.
3554 map to a known type.
3556 """
3555 """
3557 path = testdesc['path']
3556 path = testdesc['path']
3558 lctest = path.lower()
3557 lctest = path.lower()
3559 testcls = Test
3558 testcls = Test
3560
3559
3561 for ext, cls in self.TESTTYPES:
3560 for ext, cls in self.TESTTYPES:
3562 if lctest.endswith(ext):
3561 if lctest.endswith(ext):
3563 testcls = cls
3562 testcls = cls
3564 break
3563 break
3565
3564
3566 refpath = os.path.join(getcwdb(), path)
3565 refpath = os.path.join(getcwdb(), path)
3567 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
3566 tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
3568
3567
3569 # extra keyword parameters. 'case' is used by .t tests
3568 # extra keyword parameters. 'case' is used by .t tests
3570 kwds = {k: testdesc[k] for k in ['case'] if k in testdesc}
3569 kwds = {k: testdesc[k] for k in ['case'] if k in testdesc}
3571
3570
3572 t = testcls(
3571 t = testcls(
3573 refpath,
3572 refpath,
3574 self._outputdir,
3573 self._outputdir,
3575 tmpdir,
3574 tmpdir,
3576 keeptmpdir=self.options.keep_tmpdir,
3575 keeptmpdir=self.options.keep_tmpdir,
3577 debug=self.options.debug,
3576 debug=self.options.debug,
3578 first=self.options.first,
3577 first=self.options.first,
3579 timeout=self.options.timeout,
3578 timeout=self.options.timeout,
3580 startport=self._getport(count),
3579 startport=self._getport(count),
3581 extraconfigopts=self.options.extra_config_opt,
3580 extraconfigopts=self.options.extra_config_opt,
3582 shell=self.options.shell,
3581 shell=self.options.shell,
3583 hgcommand=self._hgcommand,
3582 hgcommand=self._hgcommand,
3584 usechg=bool(self.options.with_chg or self.options.chg),
3583 usechg=bool(self.options.with_chg or self.options.chg),
3585 chgdebug=self.options.chg_debug,
3584 chgdebug=self.options.chg_debug,
3586 useipv6=useipv6,
3585 useipv6=useipv6,
3587 **kwds
3586 **kwds
3588 )
3587 )
3589 t.should_reload = True
3588 t.should_reload = True
3590 return t
3589 return t
3591
3590
3592 def _cleanup(self):
3591 def _cleanup(self):
3593 """Clean up state from this test invocation."""
3592 """Clean up state from this test invocation."""
3594 if self.options.keep_tmpdir:
3593 if self.options.keep_tmpdir:
3595 return
3594 return
3596
3595
3597 vlog("# Cleaning up HGTMP", _bytes2sys(self._hgtmp))
3596 vlog("# Cleaning up HGTMP", _bytes2sys(self._hgtmp))
3598 shutil.rmtree(self._hgtmp, True)
3597 shutil.rmtree(self._hgtmp, True)
3599 for f in self._createdfiles:
3598 for f in self._createdfiles:
3600 try:
3599 try:
3601 os.remove(f)
3600 os.remove(f)
3602 except OSError:
3601 except OSError:
3603 pass
3602 pass
3604
3603
3605 def _usecorrectpython(self):
3604 def _usecorrectpython(self):
3606 """Configure the environment to use the appropriate Python in tests."""
3605 """Configure the environment to use the appropriate Python in tests."""
3607 # Tests must use the same interpreter as us or bad things will happen.
3606 # Tests must use the same interpreter as us or bad things will happen.
3608 if WINDOWS and PYTHON3:
3607 if WINDOWS and PYTHON3:
3609 pyexe_names = [b'python', b'python3', b'python.exe']
3608 pyexe_names = [b'python', b'python3', b'python.exe']
3610 elif WINDOWS:
3609 elif WINDOWS:
3611 pyexe_names = [b'python', b'python.exe']
3610 pyexe_names = [b'python', b'python.exe']
3612 elif PYTHON3:
3611 elif PYTHON3:
3613 pyexe_names = [b'python', b'python3']
3612 pyexe_names = [b'python', b'python3']
3614 else:
3613 else:
3615 pyexe_names = [b'python', b'python2']
3614 pyexe_names = [b'python', b'python2']
3616
3615
3617 # os.symlink() is a thing with py3 on Windows, but it requires
3616 # os.symlink() is a thing with py3 on Windows, but it requires
3618 # Administrator rights.
3617 # Administrator rights.
3619 if not WINDOWS and getattr(os, 'symlink', None):
3618 if not WINDOWS and getattr(os, 'symlink', None):
3620 msg = "# Making python executable in test path a symlink to '%s'"
3619 msg = "# Making python executable in test path a symlink to '%s'"
3621 msg %= sysexecutable
3620 msg %= sysexecutable
3622 vlog(msg)
3621 vlog(msg)
3623 for pyexename in pyexe_names:
3622 for pyexename in pyexe_names:
3624 mypython = os.path.join(self._custom_bin_dir, pyexename)
3623 mypython = os.path.join(self._custom_bin_dir, pyexename)
3625 try:
3624 try:
3626 if os.readlink(mypython) == sysexecutable:
3625 if os.readlink(mypython) == sysexecutable:
3627 continue
3626 continue
3628 os.unlink(mypython)
3627 os.unlink(mypython)
3629 except OSError as err:
3628 except OSError as err:
3630 if err.errno != errno.ENOENT:
3629 if err.errno != errno.ENOENT:
3631 raise
3630 raise
3632 if self._findprogram(pyexename) != sysexecutable:
3631 if self._findprogram(pyexename) != sysexecutable:
3633 try:
3632 try:
3634 os.symlink(sysexecutable, mypython)
3633 os.symlink(sysexecutable, mypython)
3635 self._createdfiles.append(mypython)
3634 self._createdfiles.append(mypython)
3636 except OSError as err:
3635 except OSError as err:
3637 # child processes may race, which is harmless
3636 # child processes may race, which is harmless
3638 if err.errno != errno.EEXIST:
3637 if err.errno != errno.EEXIST:
3639 raise
3638 raise
3640 elif WINDOWS and not os.getenv('MSYSTEM'):
3639 elif WINDOWS and not os.getenv('MSYSTEM'):
3641 raise AssertionError('cannot run test on Windows without MSYSTEM')
3640 raise AssertionError('cannot run test on Windows without MSYSTEM')
3642 else:
3641 else:
3643 # Generate explicit file instead of symlink
3642 # Generate explicit file instead of symlink
3644 #
3643 #
3645 # This is especially important as Windows doesn't have
3644 # This is especially important as Windows doesn't have
3646 # `python3.exe`, and MSYS cannot understand the reparse point with
3645 # `python3.exe`, and MSYS cannot understand the reparse point with
3647 # that name provided by Microsoft. Create a simple script on PATH
3646 # that name provided by Microsoft. Create a simple script on PATH
3648 # with that name that delegates to the py3 launcher so the shebang
3647 # with that name that delegates to the py3 launcher so the shebang
3649 # lines work.
3648 # lines work.
3650 esc_executable = _sys2bytes(shellquote(sysexecutable))
3649 esc_executable = _sys2bytes(shellquote(sysexecutable))
3651 for pyexename in pyexe_names:
3650 for pyexename in pyexe_names:
3652 stub_exec_path = os.path.join(self._custom_bin_dir, pyexename)
3651 stub_exec_path = os.path.join(self._custom_bin_dir, pyexename)
3653 with open(stub_exec_path, 'wb') as f:
3652 with open(stub_exec_path, 'wb') as f:
3654 f.write(b'#!/bin/sh\n')
3653 f.write(b'#!/bin/sh\n')
3655 f.write(b'%s "$@"\n' % esc_executable)
3654 f.write(b'%s "$@"\n' % esc_executable)
3656
3655
3657 if WINDOWS:
3656 if WINDOWS:
3658 if not PYTHON3:
3657 if not PYTHON3:
3659 # lets try to build a valid python3 executable for the
3658 # lets try to build a valid python3 executable for the
3660 # scrip that requires it.
3659 # scrip that requires it.
3661 py3exe_name = os.path.join(self._custom_bin_dir, b'python3')
3660 py3exe_name = os.path.join(self._custom_bin_dir, b'python3')
3662 with open(py3exe_name, 'wb') as f:
3661 with open(py3exe_name, 'wb') as f:
3663 f.write(b'#!/bin/sh\n')
3662 f.write(b'#!/bin/sh\n')
3664 f.write(b'py -3 "$@"\n')
3663 f.write(b'py -3 "$@"\n')
3665
3664
3666 # adjust the path to make sur the main python finds it own dll
3665 # adjust the path to make sur the main python finds it own dll
3667 path = os.environ['PATH'].split(os.pathsep)
3666 path = os.environ['PATH'].split(os.pathsep)
3668 main_exec_dir = os.path.dirname(sysexecutable)
3667 main_exec_dir = os.path.dirname(sysexecutable)
3669 extra_paths = [_bytes2sys(self._custom_bin_dir), main_exec_dir]
3668 extra_paths = [_bytes2sys(self._custom_bin_dir), main_exec_dir]
3670
3669
3671 # Binaries installed by pip into the user area like pylint.exe may
3670 # Binaries installed by pip into the user area like pylint.exe may
3672 # not be in PATH by default.
3671 # not be in PATH by default.
3673 appdata = os.environ.get('APPDATA')
3672 appdata = os.environ.get('APPDATA')
3674 vi = sys.version_info
3673 vi = sys.version_info
3675 if appdata is not None:
3674 if appdata is not None:
3676 python_dir = 'Python%d%d' % (vi[0], vi[1])
3675 python_dir = 'Python%d%d' % (vi[0], vi[1])
3677 scripts_path = [appdata, 'Python', python_dir, 'Scripts']
3676 scripts_path = [appdata, 'Python', python_dir, 'Scripts']
3678 if not PYTHON3:
3677 if not PYTHON3:
3679 scripts_path = [appdata, 'Python', 'Scripts']
3678 scripts_path = [appdata, 'Python', 'Scripts']
3680 scripts_dir = os.path.join(*scripts_path)
3679 scripts_dir = os.path.join(*scripts_path)
3681 extra_paths.append(scripts_dir)
3680 extra_paths.append(scripts_dir)
3682
3681
3683 os.environ['PATH'] = os.pathsep.join(extra_paths + path)
3682 os.environ['PATH'] = os.pathsep.join(extra_paths + path)
3684
3683
3685 def _use_correct_mercurial(self):
3684 def _use_correct_mercurial(self):
3686 target_exec = os.path.join(self._custom_bin_dir, b'hg')
3685 target_exec = os.path.join(self._custom_bin_dir, b'hg')
3687 if self._hgcommand != b'hg':
3686 if self._hgcommand != b'hg':
3688 # shutil.which only accept bytes from 3.8
3687 # shutil.which only accept bytes from 3.8
3689 real_exec = which(self._hgcommand)
3688 real_exec = which(self._hgcommand)
3690 if real_exec is None:
3689 if real_exec is None:
3691 raise ValueError('could not find exec path for "%s"', real_exec)
3690 raise ValueError('could not find exec path for "%s"', real_exec)
3692 if real_exec == target_exec:
3691 if real_exec == target_exec:
3693 # do not overwrite something with itself
3692 # do not overwrite something with itself
3694 return
3693 return
3695 if WINDOWS:
3694 if WINDOWS:
3696 with open(target_exec, 'wb') as f:
3695 with open(target_exec, 'wb') as f:
3697 f.write(b'#!/bin/sh\n')
3696 f.write(b'#!/bin/sh\n')
3698 escaped_exec = shellquote(_bytes2sys(real_exec))
3697 escaped_exec = shellquote(_bytes2sys(real_exec))
3699 f.write(b'%s "$@"\n' % _sys2bytes(escaped_exec))
3698 f.write(b'%s "$@"\n' % _sys2bytes(escaped_exec))
3700 else:
3699 else:
3701 os.symlink(real_exec, target_exec)
3700 os.symlink(real_exec, target_exec)
3702 self._createdfiles.append(target_exec)
3701 self._createdfiles.append(target_exec)
3703
3702
3704 def _installhg(self):
3703 def _installhg(self):
3705 """Install hg into the test environment.
3704 """Install hg into the test environment.
3706
3705
3707 This will also configure hg with the appropriate testing settings.
3706 This will also configure hg with the appropriate testing settings.
3708 """
3707 """
3709 vlog("# Performing temporary installation of HG")
3708 vlog("# Performing temporary installation of HG")
3710 installerrs = os.path.join(self._hgtmp, b"install.err")
3709 installerrs = os.path.join(self._hgtmp, b"install.err")
3711 compiler = ''
3710 compiler = ''
3712 if self.options.compiler:
3711 if self.options.compiler:
3713 compiler = '--compiler ' + self.options.compiler
3712 compiler = '--compiler ' + self.options.compiler
3714 setup_opts = b""
3713 setup_opts = b""
3715 if self.options.pure:
3714 if self.options.pure:
3716 setup_opts = b"--pure"
3715 setup_opts = b"--pure"
3717 elif self.options.rust:
3716 elif self.options.rust:
3718 setup_opts = b"--rust"
3717 setup_opts = b"--rust"
3719 elif self.options.no_rust:
3718 elif self.options.no_rust:
3720 setup_opts = b"--no-rust"
3719 setup_opts = b"--no-rust"
3721
3720
3722 # Run installer in hg root
3721 # Run installer in hg root
3723 script = os.path.realpath(sys.argv[0])
3722 script = os.path.realpath(sys.argv[0])
3724 exe = sysexecutable
3723 exe = sysexecutable
3725 if PYTHON3:
3724 if PYTHON3:
3726 compiler = _sys2bytes(compiler)
3725 compiler = _sys2bytes(compiler)
3727 script = _sys2bytes(script)
3726 script = _sys2bytes(script)
3728 exe = _sys2bytes(exe)
3727 exe = _sys2bytes(exe)
3729 hgroot = os.path.dirname(os.path.dirname(script))
3728 hgroot = os.path.dirname(os.path.dirname(script))
3730 self._hgroot = hgroot
3729 self._hgroot = hgroot
3731 os.chdir(hgroot)
3730 os.chdir(hgroot)
3732 nohome = b'--home=""'
3731 nohome = b'--home=""'
3733 if WINDOWS:
3732 if WINDOWS:
3734 # The --home="" trick works only on OS where os.sep == '/'
3733 # The --home="" trick works only on OS where os.sep == '/'
3735 # because of a distutils convert_path() fast-path. Avoid it at
3734 # because of a distutils convert_path() fast-path. Avoid it at
3736 # least on Windows for now, deal with .pydistutils.cfg bugs
3735 # least on Windows for now, deal with .pydistutils.cfg bugs
3737 # when they happen.
3736 # when they happen.
3738 nohome = b''
3737 nohome = b''
3739 cmd = (
3738 cmd = (
3740 b'"%(exe)s" setup.py %(setup_opts)s clean --all'
3739 b'"%(exe)s" setup.py %(setup_opts)s clean --all'
3741 b' build %(compiler)s --build-base="%(base)s"'
3740 b' build %(compiler)s --build-base="%(base)s"'
3742 b' install --force --prefix="%(prefix)s"'
3741 b' install --force --prefix="%(prefix)s"'
3743 b' --install-lib="%(libdir)s"'
3742 b' --install-lib="%(libdir)s"'
3744 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
3743 b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1'
3745 % {
3744 % {
3746 b'exe': exe,
3745 b'exe': exe,
3747 b'setup_opts': setup_opts,
3746 b'setup_opts': setup_opts,
3748 b'compiler': compiler,
3747 b'compiler': compiler,
3749 b'base': os.path.join(self._hgtmp, b"build"),
3748 b'base': os.path.join(self._hgtmp, b"build"),
3750 b'prefix': self._installdir,
3749 b'prefix': self._installdir,
3751 b'libdir': self._pythondir,
3750 b'libdir': self._pythondir,
3752 b'bindir': self._bindir,
3751 b'bindir': self._bindir,
3753 b'nohome': nohome,
3752 b'nohome': nohome,
3754 b'logfile': installerrs,
3753 b'logfile': installerrs,
3755 }
3754 }
3756 )
3755 )
3757
3756
3758 # setuptools requires install directories to exist.
3757 # setuptools requires install directories to exist.
3759 def makedirs(p):
3758 def makedirs(p):
3760 try:
3759 try:
3761 os.makedirs(p)
3760 os.makedirs(p)
3762 except OSError as e:
3761 except OSError as e:
3763 if e.errno != errno.EEXIST:
3762 if e.errno != errno.EEXIST:
3764 raise
3763 raise
3765
3764
3766 makedirs(self._pythondir)
3765 makedirs(self._pythondir)
3767 makedirs(self._bindir)
3766 makedirs(self._bindir)
3768
3767
3769 vlog("# Running", cmd.decode("utf-8"))
3768 vlog("# Running", cmd.decode("utf-8"))
3770 if subprocess.call(_bytes2sys(cmd), shell=True) == 0:
3769 if subprocess.call(_bytes2sys(cmd), shell=True) == 0:
3771 if not self.options.verbose:
3770 if not self.options.verbose:
3772 try:
3771 try:
3773 os.remove(installerrs)
3772 os.remove(installerrs)
3774 except OSError as e:
3773 except OSError as e:
3775 if e.errno != errno.ENOENT:
3774 if e.errno != errno.ENOENT:
3776 raise
3775 raise
3777 else:
3776 else:
3778 with open(installerrs, 'rb') as f:
3777 with open(installerrs, 'rb') as f:
3779 for line in f:
3778 for line in f:
3780 if PYTHON3:
3779 if PYTHON3:
3781 sys.stdout.buffer.write(line)
3780 sys.stdout.buffer.write(line)
3782 else:
3781 else:
3783 sys.stdout.write(line)
3782 sys.stdout.write(line)
3784 sys.exit(1)
3783 sys.exit(1)
3785 os.chdir(self._testdir)
3784 os.chdir(self._testdir)
3786
3785
3787 hgbat = os.path.join(self._bindir, b'hg.bat')
3786 hgbat = os.path.join(self._bindir, b'hg.bat')
3788 if os.path.isfile(hgbat):
3787 if os.path.isfile(hgbat):
3789 # hg.bat expects to be put in bin/scripts while run-tests.py
3788 # hg.bat expects to be put in bin/scripts while run-tests.py
3790 # installation layout put it in bin/ directly. Fix it
3789 # installation layout put it in bin/ directly. Fix it
3791 with open(hgbat, 'rb') as f:
3790 with open(hgbat, 'rb') as f:
3792 data = f.read()
3791 data = f.read()
3793 if br'"%~dp0..\python" "%~dp0hg" %*' in data:
3792 if br'"%~dp0..\python" "%~dp0hg" %*' in data:
3794 data = data.replace(
3793 data = data.replace(
3795 br'"%~dp0..\python" "%~dp0hg" %*',
3794 br'"%~dp0..\python" "%~dp0hg" %*',
3796 b'"%~dp0python" "%~dp0hg" %*',
3795 b'"%~dp0python" "%~dp0hg" %*',
3797 )
3796 )
3798 with open(hgbat, 'wb') as f:
3797 with open(hgbat, 'wb') as f:
3799 f.write(data)
3798 f.write(data)
3800 else:
3799 else:
3801 print('WARNING: cannot fix hg.bat reference to python.exe')
3800 print('WARNING: cannot fix hg.bat reference to python.exe')
3802
3801
3803 if self.options.anycoverage:
3802 if self.options.anycoverage:
3804 custom = os.path.join(
3803 custom = os.path.join(
3805 osenvironb[b'RUNTESTDIR'], b'sitecustomize.py'
3804 osenvironb[b'RUNTESTDIR'], b'sitecustomize.py'
3806 )
3805 )
3807 target = os.path.join(self._pythondir, b'sitecustomize.py')
3806 target = os.path.join(self._pythondir, b'sitecustomize.py')
3808 vlog('# Installing coverage trigger to %s' % target)
3807 vlog('# Installing coverage trigger to %s' % target)
3809 shutil.copyfile(custom, target)
3808 shutil.copyfile(custom, target)
3810 rc = os.path.join(self._testdir, b'.coveragerc')
3809 rc = os.path.join(self._testdir, b'.coveragerc')
3811 vlog('# Installing coverage rc to %s' % rc)
3810 vlog('# Installing coverage rc to %s' % rc)
3812 osenvironb[b'COVERAGE_PROCESS_START'] = rc
3811 osenvironb[b'COVERAGE_PROCESS_START'] = rc
3813 covdir = os.path.join(self._installdir, b'..', b'coverage')
3812 covdir = os.path.join(self._installdir, b'..', b'coverage')
3814 try:
3813 try:
3815 os.mkdir(covdir)
3814 os.mkdir(covdir)
3816 except OSError as e:
3815 except OSError as e:
3817 if e.errno != errno.EEXIST:
3816 if e.errno != errno.EEXIST:
3818 raise
3817 raise
3819
3818
3820 osenvironb[b'COVERAGE_DIR'] = covdir
3819 osenvironb[b'COVERAGE_DIR'] = covdir
3821
3820
3822 def _checkhglib(self, verb):
3821 def _checkhglib(self, verb):
3823 """Ensure that the 'mercurial' package imported by python is
3822 """Ensure that the 'mercurial' package imported by python is
3824 the one we expect it to be. If not, print a warning to stderr."""
3823 the one we expect it to be. If not, print a warning to stderr."""
3825 if self._pythondir_inferred:
3824 if self._pythondir_inferred:
3826 # The pythondir has been inferred from --with-hg flag.
3825 # The pythondir has been inferred from --with-hg flag.
3827 # We cannot expect anything sensible here.
3826 # We cannot expect anything sensible here.
3828 return
3827 return
3829 expecthg = os.path.join(self._pythondir, b'mercurial')
3828 expecthg = os.path.join(self._pythondir, b'mercurial')
3830 actualhg = self._gethgpath()
3829 actualhg = self._gethgpath()
3831 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3830 if os.path.abspath(actualhg) != os.path.abspath(expecthg):
3832 sys.stderr.write(
3831 sys.stderr.write(
3833 'warning: %s with unexpected mercurial lib: %s\n'
3832 'warning: %s with unexpected mercurial lib: %s\n'
3834 ' (expected %s)\n' % (verb, actualhg, expecthg)
3833 ' (expected %s)\n' % (verb, actualhg, expecthg)
3835 )
3834 )
3836
3835
3837 def _gethgpath(self):
3836 def _gethgpath(self):
3838 """Return the path to the mercurial package that is actually found by
3837 """Return the path to the mercurial package that is actually found by
3839 the current Python interpreter."""
3838 the current Python interpreter."""
3840 if self._hgpath is not None:
3839 if self._hgpath is not None:
3841 return self._hgpath
3840 return self._hgpath
3842
3841
3843 cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
3842 cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
3844 cmd = cmd % PYTHON
3843 cmd = cmd % PYTHON
3845 if PYTHON3:
3844 if PYTHON3:
3846 cmd = _bytes2sys(cmd)
3845 cmd = _bytes2sys(cmd)
3847
3846
3848 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
3847 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
3849 out, err = p.communicate()
3848 out, err = p.communicate()
3850
3849
3851 self._hgpath = out.strip()
3850 self._hgpath = out.strip()
3852
3851
3853 return self._hgpath
3852 return self._hgpath
3854
3853
3855 def _installchg(self):
3854 def _installchg(self):
3856 """Install chg into the test environment"""
3855 """Install chg into the test environment"""
3857 vlog('# Performing temporary installation of CHG')
3856 vlog('# Performing temporary installation of CHG')
3858 assert os.path.dirname(self._bindir) == self._installdir
3857 assert os.path.dirname(self._bindir) == self._installdir
3859 assert self._hgroot, 'must be called after _installhg()'
3858 assert self._hgroot, 'must be called after _installhg()'
3860 cmd = b'"%(make)s" clean install PREFIX="%(prefix)s"' % {
3859 cmd = b'"%(make)s" clean install PREFIX="%(prefix)s"' % {
3861 b'make': b'make', # TODO: switch by option or environment?
3860 b'make': b'make', # TODO: switch by option or environment?
3862 b'prefix': self._installdir,
3861 b'prefix': self._installdir,
3863 }
3862 }
3864 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3863 cwd = os.path.join(self._hgroot, b'contrib', b'chg')
3865 vlog("# Running", cmd)
3864 vlog("# Running", cmd)
3866 proc = subprocess.Popen(
3865 proc = subprocess.Popen(
3867 cmd,
3866 cmd,
3868 shell=True,
3867 shell=True,
3869 cwd=cwd,
3868 cwd=cwd,
3870 stdin=subprocess.PIPE,
3869 stdin=subprocess.PIPE,
3871 stdout=subprocess.PIPE,
3870 stdout=subprocess.PIPE,
3872 stderr=subprocess.STDOUT,
3871 stderr=subprocess.STDOUT,
3873 )
3872 )
3874 out, _err = proc.communicate()
3873 out, _err = proc.communicate()
3875 if proc.returncode != 0:
3874 if proc.returncode != 0:
3876 if PYTHON3:
3875 if PYTHON3:
3877 sys.stdout.buffer.write(out)
3876 sys.stdout.buffer.write(out)
3878 else:
3877 else:
3879 sys.stdout.write(out)
3878 sys.stdout.write(out)
3880 sys.exit(1)
3879 sys.exit(1)
3881
3880
3882 def _installrhg(self):
3881 def _installrhg(self):
3883 """Install rhg into the test environment"""
3882 """Install rhg into the test environment"""
3884 vlog('# Performing temporary installation of rhg')
3883 vlog('# Performing temporary installation of rhg')
3885 assert os.path.dirname(self._bindir) == self._installdir
3884 assert os.path.dirname(self._bindir) == self._installdir
3886 assert self._hgroot, 'must be called after _installhg()'
3885 assert self._hgroot, 'must be called after _installhg()'
3887 cmd = b'"%(make)s" install-rhg PREFIX="%(prefix)s"' % {
3886 cmd = b'"%(make)s" install-rhg PREFIX="%(prefix)s"' % {
3888 b'make': b'make', # TODO: switch by option or environment?
3887 b'make': b'make', # TODO: switch by option or environment?
3889 b'prefix': self._installdir,
3888 b'prefix': self._installdir,
3890 }
3889 }
3891 cwd = self._hgroot
3890 cwd = self._hgroot
3892 vlog("# Running", cmd)
3891 vlog("# Running", cmd)
3893 proc = subprocess.Popen(
3892 proc = subprocess.Popen(
3894 cmd,
3893 cmd,
3895 shell=True,
3894 shell=True,
3896 cwd=cwd,
3895 cwd=cwd,
3897 stdin=subprocess.PIPE,
3896 stdin=subprocess.PIPE,
3898 stdout=subprocess.PIPE,
3897 stdout=subprocess.PIPE,
3899 stderr=subprocess.STDOUT,
3898 stderr=subprocess.STDOUT,
3900 )
3899 )
3901 out, _err = proc.communicate()
3900 out, _err = proc.communicate()
3902 if proc.returncode != 0:
3901 if proc.returncode != 0:
3903 if PYTHON3:
3902 if PYTHON3:
3904 sys.stdout.buffer.write(out)
3903 sys.stdout.buffer.write(out)
3905 else:
3904 else:
3906 sys.stdout.write(out)
3905 sys.stdout.write(out)
3907 sys.exit(1)
3906 sys.exit(1)
3908
3907
3909 def _build_pyoxidized(self):
3908 def _build_pyoxidized(self):
3910 """build a pyoxidized version of mercurial into the test environment
3909 """build a pyoxidized version of mercurial into the test environment
3911
3910
3912 Ideally this function would be `install_pyoxidier` and would both build
3911 Ideally this function would be `install_pyoxidier` and would both build
3913 and install pyoxidier. However we are starting small to get pyoxidizer
3912 and install pyoxidier. However we are starting small to get pyoxidizer
3914 build binary to testing quickly.
3913 build binary to testing quickly.
3915 """
3914 """
3916 vlog('# build a pyoxidized version of Mercurial')
3915 vlog('# build a pyoxidized version of Mercurial')
3917 assert os.path.dirname(self._bindir) == self._installdir
3916 assert os.path.dirname(self._bindir) == self._installdir
3918 assert self._hgroot, 'must be called after _installhg()'
3917 assert self._hgroot, 'must be called after _installhg()'
3919 cmd = b'"%(make)s" pyoxidizer-windows-tests' % {
3918 cmd = b'"%(make)s" pyoxidizer-windows-tests' % {
3920 b'make': b'make',
3919 b'make': b'make',
3921 }
3920 }
3922 cwd = self._hgroot
3921 cwd = self._hgroot
3923 vlog("# Running", cmd)
3922 vlog("# Running", cmd)
3924 proc = subprocess.Popen(
3923 proc = subprocess.Popen(
3925 _bytes2sys(cmd),
3924 _bytes2sys(cmd),
3926 shell=True,
3925 shell=True,
3927 cwd=_bytes2sys(cwd),
3926 cwd=_bytes2sys(cwd),
3928 stdin=subprocess.PIPE,
3927 stdin=subprocess.PIPE,
3929 stdout=subprocess.PIPE,
3928 stdout=subprocess.PIPE,
3930 stderr=subprocess.STDOUT,
3929 stderr=subprocess.STDOUT,
3931 )
3930 )
3932 out, _err = proc.communicate()
3931 out, _err = proc.communicate()
3933 if proc.returncode != 0:
3932 if proc.returncode != 0:
3934 if PYTHON3:
3933 if PYTHON3:
3935 sys.stdout.buffer.write(out)
3934 sys.stdout.buffer.write(out)
3936 else:
3935 else:
3937 sys.stdout.write(out)
3936 sys.stdout.write(out)
3938 sys.exit(1)
3937 sys.exit(1)
3939
3938
3940 def _outputcoverage(self):
3939 def _outputcoverage(self):
3941 """Produce code coverage output."""
3940 """Produce code coverage output."""
3942 import coverage
3941 import coverage
3943
3942
3944 coverage = coverage.coverage
3943 coverage = coverage.coverage
3945
3944
3946 vlog('# Producing coverage report')
3945 vlog('# Producing coverage report')
3947 # chdir is the easiest way to get short, relative paths in the
3946 # chdir is the easiest way to get short, relative paths in the
3948 # output.
3947 # output.
3949 os.chdir(self._hgroot)
3948 os.chdir(self._hgroot)
3950 covdir = os.path.join(_bytes2sys(self._installdir), '..', 'coverage')
3949 covdir = os.path.join(_bytes2sys(self._installdir), '..', 'coverage')
3951 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3950 cov = coverage(data_file=os.path.join(covdir, 'cov'))
3952
3951
3953 # Map install directory paths back to source directory.
3952 # Map install directory paths back to source directory.
3954 cov.config.paths['srcdir'] = ['.', _bytes2sys(self._pythondir)]
3953 cov.config.paths['srcdir'] = ['.', _bytes2sys(self._pythondir)]
3955
3954
3956 cov.combine()
3955 cov.combine()
3957
3956
3958 omit = [
3957 omit = [
3959 _bytes2sys(os.path.join(x, b'*'))
3958 _bytes2sys(os.path.join(x, b'*'))
3960 for x in [self._bindir, self._testdir]
3959 for x in [self._bindir, self._testdir]
3961 ]
3960 ]
3962 cov.report(ignore_errors=True, omit=omit)
3961 cov.report(ignore_errors=True, omit=omit)
3963
3962
3964 if self.options.htmlcov:
3963 if self.options.htmlcov:
3965 htmldir = os.path.join(_bytes2sys(self._outputdir), 'htmlcov')
3964 htmldir = os.path.join(_bytes2sys(self._outputdir), 'htmlcov')
3966 cov.html_report(directory=htmldir, omit=omit)
3965 cov.html_report(directory=htmldir, omit=omit)
3967 if self.options.annotate:
3966 if self.options.annotate:
3968 adir = os.path.join(_bytes2sys(self._outputdir), 'annotated')
3967 adir = os.path.join(_bytes2sys(self._outputdir), 'annotated')
3969 if not os.path.isdir(adir):
3968 if not os.path.isdir(adir):
3970 os.mkdir(adir)
3969 os.mkdir(adir)
3971 cov.annotate(directory=adir, omit=omit)
3970 cov.annotate(directory=adir, omit=omit)
3972
3971
3973 def _findprogram(self, program):
3972 def _findprogram(self, program):
3974 """Search PATH for a executable program"""
3973 """Search PATH for a executable program"""
3975 dpb = _sys2bytes(os.defpath)
3974 dpb = _sys2bytes(os.defpath)
3976 sepb = _sys2bytes(os.pathsep)
3975 sepb = _sys2bytes(os.pathsep)
3977 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3976 for p in osenvironb.get(b'PATH', dpb).split(sepb):
3978 name = os.path.join(p, program)
3977 name = os.path.join(p, program)
3979 if WINDOWS or os.access(name, os.X_OK):
3978 if WINDOWS or os.access(name, os.X_OK):
3980 return _bytes2sys(name)
3979 return _bytes2sys(name)
3981 return None
3980 return None
3982
3981
3983 def _checktools(self):
3982 def _checktools(self):
3984 """Ensure tools required to run tests are present."""
3983 """Ensure tools required to run tests are present."""
3985 for p in self.REQUIREDTOOLS:
3984 for p in self.REQUIREDTOOLS:
3986 if WINDOWS and not p.endswith(b'.exe'):
3985 if WINDOWS and not p.endswith(b'.exe'):
3987 p += b'.exe'
3986 p += b'.exe'
3988 found = self._findprogram(p)
3987 found = self._findprogram(p)
3989 p = p.decode("utf-8")
3988 p = p.decode("utf-8")
3990 if found:
3989 if found:
3991 vlog("# Found prerequisite", p, "at", found)
3990 vlog("# Found prerequisite", p, "at", found)
3992 else:
3991 else:
3993 print("WARNING: Did not find prerequisite tool: %s " % p)
3992 print("WARNING: Did not find prerequisite tool: %s " % p)
3994
3993
3995
3994
3996 def aggregateexceptions(path):
3995 def aggregateexceptions(path):
3997 exceptioncounts = collections.Counter()
3996 exceptioncounts = collections.Counter()
3998 testsbyfailure = collections.defaultdict(set)
3997 testsbyfailure = collections.defaultdict(set)
3999 failuresbytest = collections.defaultdict(set)
3998 failuresbytest = collections.defaultdict(set)
4000
3999
4001 for f in os.listdir(path):
4000 for f in os.listdir(path):
4002 with open(os.path.join(path, f), 'rb') as fh:
4001 with open(os.path.join(path, f), 'rb') as fh:
4003 data = fh.read().split(b'\0')
4002 data = fh.read().split(b'\0')
4004 if len(data) != 5:
4003 if len(data) != 5:
4005 continue
4004 continue
4006
4005
4007 exc, mainframe, hgframe, hgline, testname = data
4006 exc, mainframe, hgframe, hgline, testname = data
4008 exc = exc.decode('utf-8')
4007 exc = exc.decode('utf-8')
4009 mainframe = mainframe.decode('utf-8')
4008 mainframe = mainframe.decode('utf-8')
4010 hgframe = hgframe.decode('utf-8')
4009 hgframe = hgframe.decode('utf-8')
4011 hgline = hgline.decode('utf-8')
4010 hgline = hgline.decode('utf-8')
4012 testname = testname.decode('utf-8')
4011 testname = testname.decode('utf-8')
4013
4012
4014 key = (hgframe, hgline, exc)
4013 key = (hgframe, hgline, exc)
4015 exceptioncounts[key] += 1
4014 exceptioncounts[key] += 1
4016 testsbyfailure[key].add(testname)
4015 testsbyfailure[key].add(testname)
4017 failuresbytest[testname].add(key)
4016 failuresbytest[testname].add(key)
4018
4017
4019 # Find test having fewest failures for each failure.
4018 # Find test having fewest failures for each failure.
4020 leastfailing = {}
4019 leastfailing = {}
4021 for key, tests in testsbyfailure.items():
4020 for key, tests in testsbyfailure.items():
4022 fewesttest = None
4021 fewesttest = None
4023 fewestcount = 99999999
4022 fewestcount = 99999999
4024 for test in sorted(tests):
4023 for test in sorted(tests):
4025 if len(failuresbytest[test]) < fewestcount:
4024 if len(failuresbytest[test]) < fewestcount:
4026 fewesttest = test
4025 fewesttest = test
4027 fewestcount = len(failuresbytest[test])
4026 fewestcount = len(failuresbytest[test])
4028
4027
4029 leastfailing[key] = (fewestcount, fewesttest)
4028 leastfailing[key] = (fewestcount, fewesttest)
4030
4029
4031 # Create a combined counter so we can sort by total occurrences and
4030 # Create a combined counter so we can sort by total occurrences and
4032 # impacted tests.
4031 # impacted tests.
4033 combined = {}
4032 combined = {}
4034 for key in exceptioncounts:
4033 for key in exceptioncounts:
4035 combined[key] = (
4034 combined[key] = (
4036 exceptioncounts[key],
4035 exceptioncounts[key],
4037 len(testsbyfailure[key]),
4036 len(testsbyfailure[key]),
4038 leastfailing[key][0],
4037 leastfailing[key][0],
4039 leastfailing[key][1],
4038 leastfailing[key][1],
4040 )
4039 )
4041
4040
4042 return {
4041 return {
4043 'exceptioncounts': exceptioncounts,
4042 'exceptioncounts': exceptioncounts,
4044 'total': sum(exceptioncounts.values()),
4043 'total': sum(exceptioncounts.values()),
4045 'combined': combined,
4044 'combined': combined,
4046 'leastfailing': leastfailing,
4045 'leastfailing': leastfailing,
4047 'byfailure': testsbyfailure,
4046 'byfailure': testsbyfailure,
4048 'bytest': failuresbytest,
4047 'bytest': failuresbytest,
4049 }
4048 }
4050
4049
4051
4050
4052 if __name__ == '__main__':
4051 if __name__ == '__main__':
4053 if WINDOWS and not os.getenv('MSYSTEM'):
4052 if WINDOWS and not os.getenv('MSYSTEM'):
4054 print('cannot run test on Windows without MSYSTEM', file=sys.stderr)
4053 print('cannot run test on Windows without MSYSTEM', file=sys.stderr)
4055 print(
4054 print(
4056 '(if you need to do so contact the mercurial devs: '
4055 '(if you need to do so contact the mercurial devs: '
4057 'mercurial@mercurial-scm.org)',
4056 'mercurial@mercurial-scm.org)',
4058 file=sys.stderr,
4057 file=sys.stderr,
4059 )
4058 )
4060 sys.exit(255)
4059 sys.exit(255)
4061
4060
4062 runner = TestRunner()
4061 runner = TestRunner()
4063
4062
4064 try:
4063 try:
4065 import msvcrt
4064 import msvcrt
4066
4065
4067 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
4066 msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
4068 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
4067 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
4069 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
4068 msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
4070 except ImportError:
4069 except ImportError:
4071 pass
4070 pass
4072
4071
4073 sys.exit(runner.run(sys.argv[1:]))
4072 sys.exit(runner.run(sys.argv[1:]))
General Comments 0
You need to be logged in to leave comments. Login now