From 4d5052203d200474b1a9aacbb0d59666a576ee16 Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Thu, 17 Oct 2019 09:47:43 +0300 Subject: [PATCH 1/8] Split libtest into several smaller modules --- src/libtest/bench.rs | 251 +++++ src/libtest/cli.rs | 384 ++++++++ src/libtest/formatters/json.rs | 4 +- src/libtest/formatters/mod.rs | 2 +- src/libtest/formatters/pretty.rs | 10 +- src/libtest/formatters/terse.rs | 4 +- src/libtest/helpers/concurrency.rs | 153 +++ src/libtest/helpers/isatty.rs | 33 + src/libtest/helpers/metrics.rs | 50 + src/libtest/helpers/mod.rs | 6 + src/libtest/lib.rs | 1417 +--------------------------- src/libtest/options.rs | 80 ++ src/libtest/test_result.rs | 102 ++ src/libtest/time.rs | 206 ++++ src/libtest/types.rs | 145 +++ 15 files changed, 1462 insertions(+), 1385 deletions(-) create mode 100644 src/libtest/bench.rs create mode 100644 src/libtest/cli.rs create mode 100644 src/libtest/helpers/concurrency.rs create mode 100644 src/libtest/helpers/isatty.rs create mode 100644 src/libtest/helpers/metrics.rs create mode 100644 src/libtest/helpers/mod.rs create mode 100644 src/libtest/options.rs create mode 100644 src/libtest/test_result.rs create mode 100644 src/libtest/time.rs create mode 100644 src/libtest/types.rs diff --git a/src/libtest/bench.rs b/src/libtest/bench.rs new file mode 100644 index 0000000000000..055a74f691cd4 --- /dev/null +++ b/src/libtest/bench.rs @@ -0,0 +1,251 @@ +//! Benchmarking module. +use super::{ + BenchMode, MonitorMsg, Sender, Sink, TestDesc, TestResult +}; + +use crate::stats; +use std::time::{Duration, Instant}; +use std::cmp; +use std::io; +use std::panic::{catch_unwind, AssertUnwindSafe}; +use std::sync::{Arc, Mutex}; +use std::hint::black_box; + +/// Manager of the benchmarking runs. +/// +/// This is fed into functions marked with `#[bench]` to allow for +/// set-up & tear-down before running a piece of code repeatedly via a +/// call to `iter`. +#[derive(Clone)] +pub struct Bencher { + mode: BenchMode, + summary: Option, + pub bytes: u64, +} + +impl Bencher { + /// Callback for benchmark functions to run in their body. + pub fn iter(&mut self, mut inner: F) + where + F: FnMut() -> T, + { + if self.mode == BenchMode::Single { + ns_iter_inner(&mut inner, 1); + return; + } + + self.summary = Some(iter(&mut inner)); + } + + pub fn bench(&mut self, mut f: F) -> Option + where + F: FnMut(&mut Bencher), + { + f(self); + return self.summary; + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct BenchSamples { + pub ns_iter_summ: stats::Summary, + pub mb_s: usize, +} + +pub fn fmt_bench_samples(bs: &BenchSamples) -> String { + use std::fmt::Write; + let mut output = String::new(); + + let median = bs.ns_iter_summ.median as usize; + let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize; + + output + .write_fmt(format_args!( + "{:>11} ns/iter (+/- {})", + fmt_thousands_sep(median, ','), + fmt_thousands_sep(deviation, ',') + )) + .unwrap(); + if bs.mb_s != 0 { + output + .write_fmt(format_args!(" = {} MB/s", bs.mb_s)) + .unwrap(); + } + output +} + +// Format a number with thousands separators +fn fmt_thousands_sep(mut n: usize, sep: char) -> String { + use std::fmt::Write; + let mut output = String::new(); + let mut trailing = false; + for &pow in &[9, 6, 3, 0] { + let base = 10_usize.pow(pow); + if pow == 0 || trailing || n / base != 0 { + if !trailing { + output.write_fmt(format_args!("{}", n / base)).unwrap(); + } else { + output.write_fmt(format_args!("{:03}", n / base)).unwrap(); + } + if pow != 0 { + output.push(sep); + } + trailing = true; + } + n %= base; + } + + output +} + +fn ns_from_dur(dur: Duration) -> u64 { + dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64) +} + +fn ns_iter_inner(inner: &mut F, k: u64) -> u64 +where + F: FnMut() -> T, +{ + let start = Instant::now(); + for _ in 0..k { + black_box(inner()); + } + return ns_from_dur(start.elapsed()); +} + +pub fn iter(inner: &mut F) -> stats::Summary +where + F: FnMut() -> T, +{ + // Initial bench run to get ballpark figure. + let ns_single = ns_iter_inner(inner, 1); + + // Try to estimate iter count for 1ms falling back to 1m + // iterations if first run took < 1ns. + let ns_target_total = 1_000_000; // 1ms + let mut n = ns_target_total / cmp::max(1, ns_single); + + // if the first run took more than 1ms we don't want to just + // be left doing 0 iterations on every loop. The unfortunate + // side effect of not being able to do as many runs is + // automatically handled by the statistical analysis below + // (i.e., larger error bars). + n = cmp::max(1, n); + + let mut total_run = Duration::new(0, 0); + let samples: &mut [f64] = &mut [0.0_f64; 50]; + loop { + let loop_start = Instant::now(); + + for p in &mut *samples { + *p = ns_iter_inner(inner, n) as f64 / n as f64; + } + + stats::winsorize(samples, 5.0); + let summ = stats::Summary::new(samples); + + for p in &mut *samples { + let ns = ns_iter_inner(inner, 5 * n); + *p = ns as f64 / (5 * n) as f64; + } + + stats::winsorize(samples, 5.0); + let summ5 = stats::Summary::new(samples); + + let loop_run = loop_start.elapsed(); + + // If we've run for 100ms and seem to have converged to a + // stable median. + if loop_run > Duration::from_millis(100) + && summ.median_abs_dev_pct < 1.0 + && summ.median - summ5.median < summ5.median_abs_dev + { + return summ5; + } + + total_run = total_run + loop_run; + // Longest we ever run for is 3s. + if total_run > Duration::from_secs(3) { + return summ5; + } + + // If we overflow here just return the results so far. We check a + // multiplier of 10 because we're about to multiply by 2 and the + // next iteration of the loop will also multiply by 5 (to calculate + // the summ5 result) + n = match n.checked_mul(10) { + Some(_) => n * 2, + None => { + return summ5; + } + }; + } +} + +pub fn benchmark(desc: TestDesc, monitor_ch: Sender, nocapture: bool, f: F) +where + F: FnMut(&mut Bencher), +{ + let mut bs = Bencher { + mode: BenchMode::Auto, + summary: None, + bytes: 0, + }; + + let data = Arc::new(Mutex::new(Vec::new())); + let oldio = if !nocapture { + Some(( + io::set_print(Some(Box::new(Sink(data.clone())))), + io::set_panic(Some(Box::new(Sink(data.clone())))), + )) + } else { + None + }; + + let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f))); + + if let Some((printio, panicio)) = oldio { + io::set_print(printio); + io::set_panic(panicio); + } + + let test_result = match result { + //bs.bench(f) { + Ok(Some(ns_iter_summ)) => { + let ns_iter = cmp::max(ns_iter_summ.median as u64, 1); + let mb_s = bs.bytes * 1000 / ns_iter; + + let bs = BenchSamples { + ns_iter_summ, + mb_s: mb_s as usize, + }; + TestResult::TrBench(bs) + } + Ok(None) => { + // iter not called, so no data. + // FIXME: error in this case? + let samples: &mut [f64] = &mut [0.0_f64; 1]; + let bs = BenchSamples { + ns_iter_summ: stats::Summary::new(samples), + mb_s: 0, + }; + TestResult::TrBench(bs) + } + Err(_) => TestResult::TrFailed, + }; + + let stdout = data.lock().unwrap().to_vec(); + monitor_ch.send((desc, test_result, None, stdout)).unwrap(); +} + +pub fn run_once(f: F) +where + F: FnMut(&mut Bencher), +{ + let mut bs = Bencher { + mode: BenchMode::Single, + summary: None, + bytes: 0, + }; + bs.bench(f); +} diff --git a/src/libtest/cli.rs b/src/libtest/cli.rs new file mode 100644 index 0000000000000..b35193701d6ef --- /dev/null +++ b/src/libtest/cli.rs @@ -0,0 +1,384 @@ +//! Module converting command-line arguments into test configuration. + +use std::env; +use std::path::PathBuf; +use getopts; + +use super::options::{RunIgnored, ColorConfig, OutputFormat, Options}; +use super::time::TestTimeOptions; +use super::helpers::isatty; + +#[derive(Debug)] +pub struct TestOpts { + pub list: bool, + pub filter: Option, + pub filter_exact: bool, + pub exclude_should_panic: bool, + pub run_ignored: RunIgnored, + pub run_tests: bool, + pub bench_benchmarks: bool, + pub logfile: Option, + pub nocapture: bool, + pub color: ColorConfig, + pub format: OutputFormat, + pub test_threads: Option, + pub skip: Vec, + pub time_options: Option, + pub options: Options, +} + +impl TestOpts { + pub fn use_color(&self) -> bool { + match self.color { + ColorConfig::AutoColor => !self.nocapture && isatty::stdout_isatty(), + ColorConfig::AlwaysColor => true, + ColorConfig::NeverColor => false, + } + } +} + +/// Result of parsing the options. +pub type OptRes = Result; +/// Result of parsing the option part. +type OptPartRes = Result, String>; + +fn optgroups() -> getopts::Options { + let mut opts = getopts::Options::new(); + opts.optflag("", "include-ignored", "Run ignored and not ignored tests") + .optflag("", "ignored", "Run only ignored tests") + .optflag("", "exclude-should-panic", "Excludes tests marked as should_panic") + .optflag("", "test", "Run tests and not benchmarks") + .optflag("", "bench", "Run benchmarks instead of tests") + .optflag("", "list", "List all tests and benchmarks") + .optflag("h", "help", "Display this message (longer with --help)") + .optopt( + "", + "logfile", + "Write logs to the specified file instead \ + of stdout", + "PATH", + ) + .optflag( + "", + "nocapture", + "don't capture stdout/stderr of each \ + task, allow printing directly", + ) + .optopt( + "", + "test-threads", + "Number of threads used for running tests \ + in parallel", + "n_threads", + ) + .optmulti( + "", + "skip", + "Skip tests whose names contain FILTER (this flag can \ + be used multiple times)", + "FILTER", + ) + .optflag( + "q", + "quiet", + "Display one character per test instead of one line. \ + Alias to --format=terse", + ) + .optflag( + "", + "exact", + "Exactly match filters rather than by substring", + ) + .optopt( + "", + "color", + "Configure coloring of output: + auto = colorize if stdout is a tty and tests are run on serially (default); + always = always colorize output; + never = never colorize output;", + "auto|always|never", + ) + .optopt( + "", + "format", + "Configure formatting of output: + pretty = Print verbose output; + terse = Display one character per test; + json = Output a json document", + "pretty|terse|json", + ) + .optflag( + "", + "show-output", + "Show captured stdout of successful tests" + ) + .optopt( + "Z", + "", + "Enable nightly-only flags: + unstable-options = Allow use of experimental features", + "unstable-options", + ) + .optflagopt( + "", + "report-time", + "Show execution time of each test. Awailable values: + plain = do not colorize the execution time (default); + colored = colorize output according to the `color` parameter value; + + Threshold values for colorized output can be configured via + `RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and + `RUST_TEST_TIME_DOCTEST` environment variables. + + Expected format of environment variable is `VARIABLE=WARN_TIME,CRITICAL_TIME`. + + Not available for --format=terse", + "plain|colored" + ) + .optflag( + "", + "ensure-time", + "Treat excess of the test execution time limit as error. + + Threshold values for this option can be configured via + `RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and + `RUST_TEST_TIME_DOCTEST` environment variables. + + Expected format of environment variable is `VARIABLE=WARN_TIME,CRITICAL_TIME`. + + `CRITICAL_TIME` here means the limit that should not be exceeded by test. + " + ); + return opts; +} + +fn usage(binary: &str, options: &getopts::Options) { + let message = format!("Usage: {} [OPTIONS] [FILTER]", binary); + println!( + r#"{usage} + +The FILTER string is tested against the name of all tests, and only those +tests whose names contain the filter are run. + +By default, all tests are run in parallel. This can be altered with the +--test-threads flag or the RUST_TEST_THREADS environment variable when running +tests (set it to 1). + +All tests have their standard output and standard error captured by default. +This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE +environment variable to a value other than "0". Logging is not captured by default. + +Test Attributes: + + `#[test]` - Indicates a function is a test to be run. This function + takes no arguments. + `#[bench]` - Indicates a function is a benchmark to be run. This + function takes one argument (test::Bencher). + `#[should_panic]` - This function (also labeled with `#[test]`) will only pass if + the code causes a panic (an assertion failure or panic!) + A message may be provided, which the failure string must + contain: #[should_panic(expected = "foo")]. + `#[ignore]` - When applied to a function which is already attributed as a + test, then the test runner will ignore these tests during + normal test runs. Running with --ignored or --include-ignored will run + these tests."#, + usage = options.usage(&message) + ); +} + +// FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566 +fn is_nightly() -> bool { + // Whether this is a feature-staged build, i.e., on the beta or stable channel + let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some(); + // Whether we should enable unstable features for bootstrapping + let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok(); + + bootstrap || !disable_unstable_features +} + +// Gets the option value and checks if unstable features are enabled. +macro_rules! unstable_optflag { + ($matches:ident, $allow_unstable:ident, $option_name:literal) => {{ + let opt = $matches.opt_present($option_name); + if !$allow_unstable && opt { + return Some(Err(format!( + "The \"{}\" flag is only accepted on the nightly compiler", + $option_name + ))); + } + + opt + }}; +} + +// Gets the CLI options assotiated with `report-time` feature. +fn get_time_options( + matches: &getopts::Matches, + allow_unstable: bool) +-> Option> { + let report_time = unstable_optflag!(matches, allow_unstable, "report-time"); + let colored_opt_str = matches.opt_str("report-time"); + let mut report_time_colored = report_time && colored_opt_str == Some("colored".into()); + let ensure_test_time = unstable_optflag!(matches, allow_unstable, "ensure-time"); + + // If `ensure-test-time` option is provided, time output is enforced, + // so user won't be confused if any of tests will silently fail. + let options = if report_time || ensure_test_time { + if ensure_test_time && !report_time { + report_time_colored = true; + } + Some(TestTimeOptions::new_from_env(ensure_test_time, report_time_colored)) + } else { + None + }; + + Some(Ok(options)) +} + +// Parses command line arguments into test options +pub fn parse_opts(args: &[String]) -> Option { + let mut allow_unstable = false; + let opts = optgroups(); + let args = args.get(1..).unwrap_or(args); + let matches = match opts.parse(args) { + Ok(m) => m, + Err(f) => return Some(Err(f.to_string())), + }; + + if let Some(opt) = matches.opt_str("Z") { + if !is_nightly() { + return Some(Err( + "the option `Z` is only accepted on the nightly compiler".into(), + )); + } + + match &*opt { + "unstable-options" => { + allow_unstable = true; + } + _ => { + return Some(Err("Unrecognized option to `Z`".into())); + } + } + }; + + if matches.opt_present("h") { + usage(&args[0], &opts); + return None; + } + + let filter = if !matches.free.is_empty() { + Some(matches.free[0].clone()) + } else { + None + }; + + let exclude_should_panic = unstable_optflag!(matches, allow_unstable, "exclude-should-panic"); + + let include_ignored = unstable_optflag!(matches, allow_unstable, "include-ignored"); + + let run_ignored = match (include_ignored, matches.opt_present("ignored")) { + (true, true) => { + return Some(Err( + "the options --include-ignored and --ignored are mutually exclusive".into(), + )); + } + (true, false) => RunIgnored::Yes, + (false, true) => RunIgnored::Only, + (false, false) => RunIgnored::No, + }; + let quiet = matches.opt_present("quiet"); + let exact = matches.opt_present("exact"); + let list = matches.opt_present("list"); + + let logfile = matches.opt_str("logfile"); + let logfile = logfile.map(|s| PathBuf::from(&s)); + + let bench_benchmarks = matches.opt_present("bench"); + let run_tests = !bench_benchmarks || matches.opt_present("test"); + + let mut nocapture = matches.opt_present("nocapture"); + if !nocapture { + nocapture = match env::var("RUST_TEST_NOCAPTURE") { + Ok(val) => &val != "0", + Err(_) => false, + }; + } + + let time_options = match get_time_options(&matches, allow_unstable) { + Some(Ok(val)) => val, + Some(Err(e)) => return Some(Err(e)), + None => panic!("Unexpected output from `get_time_options`"), + }; + + let test_threads = match matches.opt_str("test-threads") { + Some(n_str) => match n_str.parse::() { + Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())), + Ok(n) => Some(n), + Err(e) => { + return Some(Err(format!( + "argument for --test-threads must be a number > 0 \ + (error: {})", + e + ))); + } + }, + None => None, + }; + + let color = match matches.opt_str("color").as_ref().map(|s| &**s) { + Some("auto") | None => ColorConfig::AutoColor, + Some("always") => ColorConfig::AlwaysColor, + Some("never") => ColorConfig::NeverColor, + + Some(v) => { + return Some(Err(format!( + "argument for --color must be auto, always, or never (was \ + {})", + v + ))); + } + }; + + let format = match matches.opt_str("format").as_ref().map(|s| &**s) { + None if quiet => OutputFormat::Terse, + Some("pretty") | None => OutputFormat::Pretty, + Some("terse") => OutputFormat::Terse, + Some("json") => { + if !allow_unstable { + return Some(Err( + "The \"json\" format is only accepted on the nightly compiler".into(), + )); + } + OutputFormat::Json + } + + Some(v) => { + return Some(Err(format!( + "argument for --format must be pretty, terse, or json (was \ + {})", + v + ))); + } + }; + + let test_opts = TestOpts { + list, + filter, + filter_exact: exact, + exclude_should_panic, + run_ignored, + run_tests, + bench_benchmarks, + logfile, + nocapture, + color, + format, + test_threads, + skip: matches.opt_strs("skip"), + time_options, + options: Options::new().display_output(matches.opt_present("show-output")), + }; + + Some(Ok(test_opts)) +} diff --git a/src/libtest/formatters/json.rs b/src/libtest/formatters/json.rs index dcd733620bf90..ff756c456dae6 100644 --- a/src/libtest/formatters/json.rs +++ b/src/libtest/formatters/json.rs @@ -27,7 +27,7 @@ impl JsonFormatter { ty: &str, name: &str, evt: &str, - exec_time: Option<&TestExecTime>, + exec_time: Option<&time::TestExecTime>, stdout: Option>, extra: Option<&str>, ) -> io::Result<()> { @@ -76,7 +76,7 @@ impl OutputFormatter for JsonFormatter { &mut self, desc: &TestDesc, result: &TestResult, - exec_time: Option<&TestExecTime>, + exec_time: Option<&time::TestExecTime>, stdout: &[u8], state: &ConsoleTestState, ) -> io::Result<()> { diff --git a/src/libtest/formatters/mod.rs b/src/libtest/formatters/mod.rs index dd202fb3ab6fa..72432cd8e3c2b 100644 --- a/src/libtest/formatters/mod.rs +++ b/src/libtest/formatters/mod.rs @@ -16,7 +16,7 @@ pub(crate) trait OutputFormatter { &mut self, desc: &TestDesc, result: &TestResult, - exec_time: Option<&TestExecTime>, + exec_time: Option<&time::TestExecTime>, stdout: &[u8], state: &ConsoleTestState, ) -> io::Result<()>; diff --git a/src/libtest/formatters/pretty.rs b/src/libtest/formatters/pretty.rs index 2935b4c99cec4..84e1a44dab807 100644 --- a/src/libtest/formatters/pretty.rs +++ b/src/libtest/formatters/pretty.rs @@ -3,7 +3,7 @@ use super::*; pub(crate) struct PrettyFormatter { out: OutputLocation, use_color: bool, - time_options: Option, + time_options: Option, /// Number of columns to fill when aligning names max_name_len: usize, @@ -17,7 +17,7 @@ impl PrettyFormatter { use_color: bool, max_name_len: usize, is_multithreaded: bool, - time_options: Option, + time_options: Option, ) -> Self { PrettyFormatter { out, @@ -93,7 +93,7 @@ impl PrettyFormatter { fn write_time( &mut self, desc: &TestDesc, - exec_time: Option<&TestExecTime> + exec_time: Option<&time::TestExecTime> ) -> io::Result<()> { if let (Some(opts), Some(time)) = (self.time_options, exec_time) { let time_str = format!(" <{}>", time); @@ -194,7 +194,7 @@ impl OutputFormatter for PrettyFormatter { &mut self, desc: &TestDesc, result: &TestResult, - exec_time: Option<&TestExecTime>, + exec_time: Option<&time::TestExecTime>, _: &[u8], _: &ConsoleTestState, ) -> io::Result<()> { @@ -225,7 +225,7 @@ impl OutputFormatter for PrettyFormatter { self.write_plain(&format!( "test {} has been running for over {} seconds\n", - desc.name, TEST_WARN_TIMEOUT_S + desc.name, time::TEST_WARN_TIMEOUT_S )) } diff --git a/src/libtest/formatters/terse.rs b/src/libtest/formatters/terse.rs index 8914e7b6b5685..50407d1130f86 100644 --- a/src/libtest/formatters/terse.rs +++ b/src/libtest/formatters/terse.rs @@ -174,7 +174,7 @@ impl OutputFormatter for TerseFormatter { &mut self, desc: &TestDesc, result: &TestResult, - _: Option<&TestExecTime>, + _: Option<&time::TestExecTime>, _: &[u8], _: &ConsoleTestState, ) -> io::Result<()> { @@ -196,7 +196,7 @@ impl OutputFormatter for TerseFormatter { fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> { self.write_plain(&format!( "test {} has been running for over {} seconds\n", - desc.name, TEST_WARN_TIMEOUT_S + desc.name, time::TEST_WARN_TIMEOUT_S )) } diff --git a/src/libtest/helpers/concurrency.rs b/src/libtest/helpers/concurrency.rs new file mode 100644 index 0000000000000..f0292c2d2c792 --- /dev/null +++ b/src/libtest/helpers/concurrency.rs @@ -0,0 +1,153 @@ +//! Helper module which helps to determine amount of threads to be used +//! during tests execution. +use std::env; + +#[cfg(any(unix, target_os = "cloudabi"))] +use libc; + +#[allow(deprecated)] +pub fn get_concurrency() -> usize { + return match env::var("RUST_TEST_THREADS") { + Ok(s) => { + let opt_n: Option = s.parse().ok(); + match opt_n { + Some(n) if n > 0 => n, + _ => panic!( + "RUST_TEST_THREADS is `{}`, should be a positive integer.", + s + ), + } + } + Err(..) => num_cpus(), + }; + + #[cfg(windows)] + #[allow(nonstandard_style)] + fn num_cpus() -> usize { + #[repr(C)] + struct SYSTEM_INFO { + wProcessorArchitecture: u16, + wReserved: u16, + dwPageSize: u32, + lpMinimumApplicationAddress: *mut u8, + lpMaximumApplicationAddress: *mut u8, + dwActiveProcessorMask: *mut u8, + dwNumberOfProcessors: u32, + dwProcessorType: u32, + dwAllocationGranularity: u32, + wProcessorLevel: u16, + wProcessorRevision: u16, + } + extern "system" { + fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32; + } + unsafe { + let mut sysinfo = std::mem::zeroed(); + GetSystemInfo(&mut sysinfo); + sysinfo.dwNumberOfProcessors as usize + } + } + + #[cfg(target_os = "vxworks")] + fn num_cpus() -> usize { + // FIXME: Implement num_cpus on vxWorks + 1 + } + + #[cfg(target_os = "redox")] + fn num_cpus() -> usize { + // FIXME: Implement num_cpus on Redox + 1 + } + + #[cfg(any( + all(target_arch = "wasm32", not(target_os = "emscripten")), + all(target_vendor = "fortanix", target_env = "sgx") + ))] + fn num_cpus() -> usize { + 1 + } + + #[cfg(any( + target_os = "android", + target_os = "cloudabi", + target_os = "emscripten", + target_os = "fuchsia", + target_os = "ios", + target_os = "linux", + target_os = "macos", + target_os = "solaris", + ))] + fn num_cpus() -> usize { + unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize } + } + + #[cfg(any( + target_os = "freebsd", + target_os = "dragonfly", + target_os = "netbsd" + ))] + fn num_cpus() -> usize { + use std::ptr; + + let mut cpus: libc::c_uint = 0; + let mut cpus_size = std::mem::size_of_val(&cpus); + + unsafe { + cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint; + } + if cpus < 1 { + let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0]; + unsafe { + libc::sysctl( + mib.as_mut_ptr(), + 2, + &mut cpus as *mut _ as *mut _, + &mut cpus_size as *mut _ as *mut _, + ptr::null_mut(), + 0, + ); + } + if cpus < 1 { + cpus = 1; + } + } + cpus as usize + } + + #[cfg(target_os = "openbsd")] + fn num_cpus() -> usize { + use std::ptr; + + let mut cpus: libc::c_uint = 0; + let mut cpus_size = std::mem::size_of_val(&cpus); + let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0]; + + unsafe { + libc::sysctl( + mib.as_mut_ptr(), + 2, + &mut cpus as *mut _ as *mut _, + &mut cpus_size as *mut _ as *mut _, + ptr::null_mut(), + 0, + ); + } + if cpus < 1 { + cpus = 1; + } + cpus as usize + } + + #[cfg(target_os = "haiku")] + fn num_cpus() -> usize { + // FIXME: implement + 1 + } + + #[cfg(target_os = "l4re")] + fn num_cpus() -> usize { + // FIXME: implement + 1 + } +} diff --git a/src/libtest/helpers/isatty.rs b/src/libtest/helpers/isatty.rs new file mode 100644 index 0000000000000..638328aea18cf --- /dev/null +++ b/src/libtest/helpers/isatty.rs @@ -0,0 +1,33 @@ +//! Helper module which provides a function to test +//! if stdout is a tty. + +#[cfg(any( + target_os = "cloudabi", + all(target_arch = "wasm32", not(target_os = "emscripten")), + all(target_vendor = "fortanix", target_env = "sgx") +))] +pub fn stdout_isatty() -> bool { + // FIXME: Implement isatty on SGX + false +} +#[cfg(unix)] +pub fn stdout_isatty() -> bool { + unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 } +} +#[cfg(windows)] +pub fn stdout_isatty() -> bool { + type DWORD = u32; + type BOOL = i32; + type HANDLE = *mut u8; + type LPDWORD = *mut u32; + const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD; + extern "system" { + fn GetStdHandle(which: DWORD) -> HANDLE; + fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL; + } + unsafe { + let handle = GetStdHandle(STD_OUTPUT_HANDLE); + let mut out = 0; + GetConsoleMode(handle, &mut out) != 0 + } +} \ No newline at end of file diff --git a/src/libtest/helpers/metrics.rs b/src/libtest/helpers/metrics.rs new file mode 100644 index 0000000000000..f77a23e6875b2 --- /dev/null +++ b/src/libtest/helpers/metrics.rs @@ -0,0 +1,50 @@ +//! Benchmark metrics. +use std::collections::BTreeMap; + +#[derive(Clone, PartialEq, Debug, Copy)] +pub struct Metric { + value: f64, + noise: f64, +} + +impl Metric { + pub fn new(value: f64, noise: f64) -> Metric { + Metric { value, noise } + } +} + +#[derive(Clone, PartialEq)] +pub struct MetricMap(BTreeMap); + +impl MetricMap { + pub fn new() -> MetricMap { + MetricMap(BTreeMap::new()) + } + + /// Insert a named `value` (+/- `noise`) metric into the map. The value + /// must be non-negative. The `noise` indicates the uncertainty of the + /// metric, which doubles as the "noise range" of acceptable + /// pairwise-regressions on this named value, when comparing from one + /// metric to the next using `compare_to_old`. + /// + /// If `noise` is positive, then it means this metric is of a value + /// you want to see grow smaller, so a change larger than `noise` in the + /// positive direction represents a regression. + /// + /// If `noise` is negative, then it means this metric is of a value + /// you want to see grow larger, so a change larger than `noise` in the + /// negative direction represents a regression. + pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) { + let m = Metric { value, noise }; + self.0.insert(name.to_owned(), m); + } + + pub fn fmt_metrics(&self) -> String { + let v = self + .0 + .iter() + .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise)) + .collect::>(); + v.join(", ") + } +} diff --git a/src/libtest/helpers/mod.rs b/src/libtest/helpers/mod.rs new file mode 100644 index 0000000000000..0bbe77b1c50af --- /dev/null +++ b/src/libtest/helpers/mod.rs @@ -0,0 +1,6 @@ +//! Module with common helpers not directly related to tests +//! but used in `libtest`. + +pub mod concurrency; +pub mod isatty; +pub mod metrics; diff --git a/src/libtest/lib.rs b/src/libtest/lib.rs index 4c3cbeb4acced..f79994671114f 100644 --- a/src/libtest/lib.rs +++ b/src/libtest/lib.rs @@ -30,33 +30,21 @@ #![feature(termination_trait_lib)] #![feature(test)] -use getopts; -#[cfg(any(unix, target_os = "cloudabi"))] -extern crate libc; use term; pub use self::ColorConfig::*; -use self::NamePadding::*; use self::OutputLocation::*; use self::TestEvent::*; -pub use self::TestFn::*; -pub use self::TestName::*; -pub use self::TestResult::*; +pub use self::types::TestName::*; -use std::any::Any; use std::borrow::Cow; -use std::cmp; -use std::collections::BTreeMap; use std::env; -use std::fmt; use std::fs::File; use std::io; use std::io::prelude::*; use std::panic::{self, catch_unwind, AssertUnwindSafe, PanicInfo}; -use std::path::PathBuf; use std::process; use std::process::{ExitStatus, Command, Termination}; -use std::str::FromStr; use std::sync::mpsc::{channel, Sender}; use std::sync::{Arc, Mutex}; use std::thread; @@ -65,276 +53,48 @@ use std::time::{Duration, Instant}; #[cfg(test)] mod tests; -const TEST_WARN_TIMEOUT_S: u64 = 60; const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode const SECONDARY_TEST_INVOKER_VAR: &'static str = "__RUST_TEST_INVOKE"; -// Return codes for secondary process. -// Start somewhere other than 0 so we know the return code means what we think -// it means. -const TR_OK: i32 = 50; -const TR_FAILED: i32 = 51; - -/// This small module contains constants used by `report-time` option. -/// Those constants values will be used if corresponding environment variables are not set. -/// -/// To override values for unit-tests, use a constant `RUST_TEST_TIME_UNIT`, -/// To override values for integration tests, use a constant `RUST_TEST_TIME_INTEGRATION`, -/// To override values for doctests, use a constant `RUST_TEST_TIME_DOCTEST`. -/// -/// Example of the expected format is `RUST_TEST_TIME_xxx=100,200`, where 100 means -/// warn time, and 200 means critical time. -pub mod time_constants { - use std::time::Duration; - use super::TEST_WARN_TIMEOUT_S; - - /// Environment variable for overriding default threshold for unit-tests. - pub const UNIT_ENV_NAME: &str = "RUST_TEST_TIME_UNIT"; - - // Unit tests are supposed to be really quick. - pub const UNIT_WARN: Duration = Duration::from_millis(50); - pub const UNIT_CRITICAL: Duration = Duration::from_millis(100); - - /// Environment variable for overriding default threshold for unit-tests. - pub const INTEGRATION_ENV_NAME: &str = "RUST_TEST_TIME_INTEGRATION"; - - // Integration tests may have a lot of work, so they can take longer to execute. - pub const INTEGRATION_WARN: Duration = Duration::from_millis(500); - pub const INTEGRATION_CRITICAL: Duration = Duration::from_millis(1000); - - /// Environment variable for overriding default threshold for unit-tests. - pub const DOCTEST_ENV_NAME: &str = "RUST_TEST_TIME_DOCTEST"; - - // Doctests are similar to integration tests, because they can include a lot of - // initialization code. - pub const DOCTEST_WARN: Duration = INTEGRATION_WARN; - pub const DOCTEST_CRITICAL: Duration = INTEGRATION_CRITICAL; - - // Do not suppose anything about unknown tests, base limits on the - // `TEST_WARN_TIMEOUT_S` constant. - pub const UNKNOWN_WARN: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S); - pub const UNKNOWN_CRITICAL: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S * 2); -} - // to be used by rustc to compile tests in libtest pub mod test { pub use crate::{ - assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static, - Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, RunIgnored, RunStrategy, - ShouldPanic, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName, - TestOpts, TestTimeOptions, TestType, TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk, + bench::Bencher, + cli::{parse_opts, TestOpts}, + helpers::metrics::{Metric, MetricMap}, + options::{ShouldPanic, Options, RunIgnored, RunStrategy}, + test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk}, + time::TestTimeOptions, + types::{ + DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, + TestName, TestType, + }, + assert_test_result, filter_tests, run_test, test_main, test_main_static, }; } -mod formatters; -pub mod stats; - -use crate::formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter}; - -/// Whether to execute tests concurrently or not -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum Concurrent { - Yes, - No, -} - -/// Type of the test according to the [rust book](https://doc.rust-lang.org/cargo/guide/tests.html) -/// conventions. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub enum TestType { - /// Unit-tests are expected to be in the `src` folder of the crate. - UnitTest, - /// Integration-style tests are expected to be in the `tests` folder of the crate. - IntegrationTest, - /// Doctests are created by the `librustdoc` manually, so it's a different type of test. - DocTest, - /// Tests for the sources that don't follow the project layout convention - /// (e.g. tests in raw `main.rs` compiled by calling `rustc --test` directly). - Unknown, -} - -// The name of a test. By convention this follows the rules for rust -// paths; i.e., it should be a series of identifiers separated by double -// colons. This way if some test runner wants to arrange the tests -// hierarchically it may. - -#[derive(Clone, PartialEq, Eq, Hash, Debug)] -pub enum TestName { - StaticTestName(&'static str), - DynTestName(String), - AlignedTestName(Cow<'static, str>, NamePadding), -} -impl TestName { - fn as_slice(&self) -> &str { - match *self { - StaticTestName(s) => s, - DynTestName(ref s) => s, - AlignedTestName(ref s, _) => &*s, - } - } - - fn padding(&self) -> NamePadding { - match self { - &AlignedTestName(_, p) => p, - _ => PadNone, - } - } - - fn with_padding(&self, padding: NamePadding) -> TestName { - let name = match self { - &TestName::StaticTestName(name) => Cow::Borrowed(name), - &TestName::DynTestName(ref name) => Cow::Owned(name.clone()), - &TestName::AlignedTestName(ref name, _) => name.clone(), - }; - - TestName::AlignedTestName(name, padding) - } -} -impl fmt::Display for TestName { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self.as_slice(), f) - } -} - -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] -pub enum NamePadding { - PadNone, - PadOnRight, -} - -impl TestDesc { - fn padded_name(&self, column_count: usize, align: NamePadding) -> String { - let mut name = String::from(self.name.as_slice()); - let fill = column_count.saturating_sub(name.len()); - let pad = " ".repeat(fill); - match align { - PadNone => name, - PadOnRight => { - name.push_str(&pad); - name - } - } - } -} - -/// Represents a benchmark function. -pub trait TDynBenchFn: Send { - fn run(&self, harness: &mut Bencher); -} - -// A function that runs a test. If the function returns successfully, -// the test succeeds; if the function panics then the test fails. We -// may need to come up with a more clever definition of test in order -// to support isolation of tests into threads. -pub enum TestFn { - StaticTestFn(fn()), - StaticBenchFn(fn(&mut Bencher)), - DynTestFn(Box), - DynBenchFn(Box), -} - -impl TestFn { - fn padding(&self) -> NamePadding { - match *self { - StaticTestFn(..) => PadNone, - StaticBenchFn(..) => PadOnRight, - DynTestFn(..) => PadNone, - DynBenchFn(..) => PadOnRight, - } - } -} - -impl fmt::Debug for TestFn { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(match *self { - StaticTestFn(..) => "StaticTestFn(..)", - StaticBenchFn(..) => "StaticBenchFn(..)", - DynTestFn(..) => "DynTestFn(..)", - DynBenchFn(..) => "DynBenchFn(..)", - }) - } -} - -/// Manager of the benchmarking runs. -/// -/// This is fed into functions marked with `#[bench]` to allow for -/// set-up & tear-down before running a piece of code repeatedly via a -/// call to `iter`. -#[derive(Clone)] -pub struct Bencher { - mode: BenchMode, - summary: Option, - pub bytes: u64, -} - -#[derive(Clone, PartialEq, Eq)] -pub enum BenchMode { - Auto, - Single, -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub enum ShouldPanic { - No, - Yes, - YesWithMessage(&'static str), -} - -// The definition of a single test. A test runner will run a list of -// these. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct TestDesc { - pub name: TestName, - pub ignore: bool, - pub should_panic: ShouldPanic, - pub allow_fail: bool, - pub test_type: TestType, -} - -#[derive(Debug)] -pub struct TestDescAndFn { - pub desc: TestDesc, - pub testfn: TestFn, -} - -#[derive(Clone, PartialEq, Debug, Copy)] -pub struct Metric { - value: f64, - noise: f64, -} - -impl Metric { - pub fn new(value: f64, noise: f64) -> Metric { - Metric { value, noise } - } -} +use bench::*; +use test_result::*; +use types::*; +use options::*; +use cli::*; -/// In case we want to add other options as well, just add them in this struct. -#[derive(Copy, Clone, Debug)] -pub struct Options { - display_output: bool, - panic_abort: bool, -} +use helpers::concurrency::get_concurrency; +use helpers::metrics::MetricMap; -impl Options { - pub fn new() -> Options { - Options { - display_output: false, - panic_abort: false, - } - } +mod formatters; +pub mod stats; - pub fn display_output(mut self, display_output: bool) -> Options { - self.display_output = display_output; - self - } +mod cli; +mod helpers; +mod time; +mod types; +mod options; +mod bench; +mod test_result; - pub fn panic_abort(mut self, panic_abort: bool) -> Options { - self.panic_abort = panic_abort; - self - } -} +use crate::formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter}; // The default console test runner. It accepts the command line // arguments and a vector of test_descs. @@ -440,556 +200,6 @@ pub fn assert_test_result(result: T) { ); } -#[derive(Copy, Clone, Debug)] -pub enum ColorConfig { - AutoColor, - AlwaysColor, - NeverColor, -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum OutputFormat { - Pretty, - Terse, - Json, -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum RunIgnored { - Yes, - No, - Only, -} - -/// Structure denoting time limits for test execution. -#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] -pub struct TimeThreshold { - pub warn: Duration, - pub critical: Duration, -} - -impl TimeThreshold { - /// Creates a new `TimeThreshold` instance with provided durations. - pub fn new(warn: Duration, critical: Duration) -> Self { - Self { - warn, - critical, - } - } - - /// Attempts to create a `TimeThreshold` instance with values obtained - /// from the environment variable, and returns `None` if the variable - /// is not set. - /// Environment variable format is expected to match `\d+,\d+`. - /// - /// # Panics - /// - /// Panics if variable with provided name is set but contains inappropriate - /// value. - pub fn from_env_var(env_var_name: &str) -> Option { - let durations_str = env::var(env_var_name).ok()?; - - // Split string into 2 substrings by comma and try to parse numbers. - let mut durations = durations_str - .splitn(2, ',') - .map(|v| { - u64::from_str(v).unwrap_or_else(|_| { - panic!( - "Duration value in variable {} is expected to be a number, but got {}", - env_var_name, v - ) - }) - }); - - // Callback to be called if the environment variable has unexpected structure. - let panic_on_incorrect_value = || { - panic!( - "Duration variable {} expected to have 2 numbers separated by comma, but got {}", - env_var_name, durations_str - ); - }; - - let (warn, critical) = ( - durations.next().unwrap_or_else(panic_on_incorrect_value), - durations.next().unwrap_or_else(panic_on_incorrect_value) - ); - - if warn > critical { - panic!("Test execution warn time should be less or equal to the critical time"); - } - - Some(Self::new(Duration::from_millis(warn), Duration::from_millis(critical))) - } -} - -/// Structure with parameters for calculating test execution time. -#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] -pub struct TestTimeOptions { - /// Denotes if the test critical execution time limit excess should be considered - /// a test failure. - pub error_on_excess: bool, - pub colored: bool, - pub unit_threshold: TimeThreshold, - pub integration_threshold: TimeThreshold, - pub doctest_threshold: TimeThreshold, -} - -impl TestTimeOptions { - pub fn new_from_env(error_on_excess: bool, colored: bool) -> Self { - let unit_threshold = - TimeThreshold::from_env_var(time_constants::UNIT_ENV_NAME) - .unwrap_or_else(Self::default_unit); - - let integration_threshold = - TimeThreshold::from_env_var(time_constants::INTEGRATION_ENV_NAME) - .unwrap_or_else(Self::default_integration); - - let doctest_threshold = - TimeThreshold::from_env_var(time_constants::DOCTEST_ENV_NAME) - .unwrap_or_else(Self::default_doctest); - - Self { - error_on_excess, - colored, - unit_threshold, - integration_threshold, - doctest_threshold, - } - } - - pub fn is_warn(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool { - exec_time.0 >= self.warn_time(test) - } - - pub fn is_critical(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool { - exec_time.0 >= self.critical_time(test) - } - - fn warn_time(&self, test: &TestDesc) -> Duration { - match test.test_type { - TestType::UnitTest => self.unit_threshold.warn, - TestType::IntegrationTest => self.integration_threshold.warn, - TestType::DocTest => self.doctest_threshold.warn, - TestType::Unknown => time_constants::UNKNOWN_WARN, - } - } - - fn critical_time(&self, test: &TestDesc) -> Duration { - match test.test_type { - TestType::UnitTest => self.unit_threshold.critical, - TestType::IntegrationTest => self.integration_threshold.critical, - TestType::DocTest => self.doctest_threshold.critical, - TestType::Unknown => time_constants::UNKNOWN_CRITICAL, - } - } - - fn default_unit() -> TimeThreshold { - TimeThreshold::new(time_constants::UNIT_WARN, time_constants::UNIT_CRITICAL) - } - - fn default_integration() -> TimeThreshold { - TimeThreshold::new(time_constants::INTEGRATION_WARN, time_constants::INTEGRATION_CRITICAL) - } - - fn default_doctest() -> TimeThreshold { - TimeThreshold::new(time_constants::DOCTEST_WARN, time_constants::DOCTEST_CRITICAL) - } -} - -#[derive(Debug)] -pub struct TestOpts { - pub list: bool, - pub filter: Option, - pub filter_exact: bool, - pub exclude_should_panic: bool, - pub run_ignored: RunIgnored, - pub run_tests: bool, - pub bench_benchmarks: bool, - pub logfile: Option, - pub nocapture: bool, - pub color: ColorConfig, - pub format: OutputFormat, - pub test_threads: Option, - pub skip: Vec, - pub time_options: Option, - pub options: Options, -} - -/// Result of parsing the options. -pub type OptRes = Result; -/// Result of parsing the option part. -type OptPartRes = Result, String>; - -fn optgroups() -> getopts::Options { - let mut opts = getopts::Options::new(); - opts.optflag("", "include-ignored", "Run ignored and not ignored tests") - .optflag("", "ignored", "Run only ignored tests") - .optflag("", "exclude-should-panic", "Excludes tests marked as should_panic") - .optflag("", "test", "Run tests and not benchmarks") - .optflag("", "bench", "Run benchmarks instead of tests") - .optflag("", "list", "List all tests and benchmarks") - .optflag("h", "help", "Display this message (longer with --help)") - .optopt( - "", - "logfile", - "Write logs to the specified file instead \ - of stdout", - "PATH", - ) - .optflag( - "", - "nocapture", - "don't capture stdout/stderr of each \ - task, allow printing directly", - ) - .optopt( - "", - "test-threads", - "Number of threads used for running tests \ - in parallel", - "n_threads", - ) - .optmulti( - "", - "skip", - "Skip tests whose names contain FILTER (this flag can \ - be used multiple times)", - "FILTER", - ) - .optflag( - "q", - "quiet", - "Display one character per test instead of one line. \ - Alias to --format=terse", - ) - .optflag( - "", - "exact", - "Exactly match filters rather than by substring", - ) - .optopt( - "", - "color", - "Configure coloring of output: - auto = colorize if stdout is a tty and tests are run on serially (default); - always = always colorize output; - never = never colorize output;", - "auto|always|never", - ) - .optopt( - "", - "format", - "Configure formatting of output: - pretty = Print verbose output; - terse = Display one character per test; - json = Output a json document", - "pretty|terse|json", - ) - .optflag( - "", - "show-output", - "Show captured stdout of successful tests" - ) - .optopt( - "Z", - "", - "Enable nightly-only flags: - unstable-options = Allow use of experimental features", - "unstable-options", - ) - .optflagopt( - "", - "report-time", - "Show execution time of each test. Awailable values: - plain = do not colorize the execution time (default); - colored = colorize output according to the `color` parameter value; - - Threshold values for colorized output can be configured via - `RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and - `RUST_TEST_TIME_DOCTEST` environment variables. - - Expected format of environment variable is `VARIABLE=WARN_TIME,CRITICAL_TIME`. - - Not available for --format=terse", - "plain|colored" - ) - .optflag( - "", - "ensure-time", - "Treat excess of the test execution time limit as error. - - Threshold values for this option can be configured via - `RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and - `RUST_TEST_TIME_DOCTEST` environment variables. - - Expected format of environment variable is `VARIABLE=WARN_TIME,CRITICAL_TIME`. - - `CRITICAL_TIME` here means the limit that should not be exceeded by test. - " - ); - return opts; -} - -fn usage(binary: &str, options: &getopts::Options) { - let message = format!("Usage: {} [OPTIONS] [FILTER]", binary); - println!( - r#"{usage} - -The FILTER string is tested against the name of all tests, and only those -tests whose names contain the filter are run. - -By default, all tests are run in parallel. This can be altered with the ---test-threads flag or the RUST_TEST_THREADS environment variable when running -tests (set it to 1). - -All tests have their standard output and standard error captured by default. -This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE -environment variable to a value other than "0". Logging is not captured by default. - -Test Attributes: - - `#[test]` - Indicates a function is a test to be run. This function - takes no arguments. - `#[bench]` - Indicates a function is a benchmark to be run. This - function takes one argument (test::Bencher). - `#[should_panic]` - This function (also labeled with `#[test]`) will only pass if - the code causes a panic (an assertion failure or panic!) - A message may be provided, which the failure string must - contain: #[should_panic(expected = "foo")]. - `#[ignore]` - When applied to a function which is already attributed as a - test, then the test runner will ignore these tests during - normal test runs. Running with --ignored or --include-ignored will run - these tests."#, - usage = options.usage(&message) - ); -} - -// FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566 -fn is_nightly() -> bool { - // Whether this is a feature-staged build, i.e., on the beta or stable channel - let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some(); - // Whether we should enable unstable features for bootstrapping - let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok(); - - bootstrap || !disable_unstable_features -} - -// Gets the option value and checks if unstable features are enabled. -macro_rules! unstable_optflag { - ($matches:ident, $allow_unstable:ident, $option_name:literal) => {{ - let opt = $matches.opt_present($option_name); - if !$allow_unstable && opt { - return Some(Err(format!( - "The \"{}\" flag is only accepted on the nightly compiler", - $option_name - ))); - } - - opt - }}; -} - -// Gets the CLI options assotiated with `report-time` feature. -fn get_time_options( - matches: &getopts::Matches, - allow_unstable: bool) --> Option> { - let report_time = unstable_optflag!(matches, allow_unstable, "report-time"); - let colored_opt_str = matches.opt_str("report-time"); - let mut report_time_colored = report_time && colored_opt_str == Some("colored".into()); - let ensure_test_time = unstable_optflag!(matches, allow_unstable, "ensure-time"); - - // If `ensure-test-time` option is provided, time output is enforced, - // so user won't be confused if any of tests will silently fail. - let options = if report_time || ensure_test_time { - if ensure_test_time && !report_time { - report_time_colored = true; - } - Some(TestTimeOptions::new_from_env(ensure_test_time, report_time_colored)) - } else { - None - }; - - Some(Ok(options)) -} - -// Parses command line arguments into test options -pub fn parse_opts(args: &[String]) -> Option { - let mut allow_unstable = false; - let opts = optgroups(); - let args = args.get(1..).unwrap_or(args); - let matches = match opts.parse(args) { - Ok(m) => m, - Err(f) => return Some(Err(f.to_string())), - }; - - if let Some(opt) = matches.opt_str("Z") { - if !is_nightly() { - return Some(Err( - "the option `Z` is only accepted on the nightly compiler".into(), - )); - } - - match &*opt { - "unstable-options" => { - allow_unstable = true; - } - _ => { - return Some(Err("Unrecognized option to `Z`".into())); - } - } - }; - - if matches.opt_present("h") { - usage(&args[0], &opts); - return None; - } - - let filter = if !matches.free.is_empty() { - Some(matches.free[0].clone()) - } else { - None - }; - - let exclude_should_panic = unstable_optflag!(matches, allow_unstable, "exclude-should-panic"); - - let include_ignored = unstable_optflag!(matches, allow_unstable, "include-ignored"); - - let run_ignored = match (include_ignored, matches.opt_present("ignored")) { - (true, true) => { - return Some(Err( - "the options --include-ignored and --ignored are mutually exclusive".into(), - )); - } - (true, false) => RunIgnored::Yes, - (false, true) => RunIgnored::Only, - (false, false) => RunIgnored::No, - }; - let quiet = matches.opt_present("quiet"); - let exact = matches.opt_present("exact"); - let list = matches.opt_present("list"); - - let logfile = matches.opt_str("logfile"); - let logfile = logfile.map(|s| PathBuf::from(&s)); - - let bench_benchmarks = matches.opt_present("bench"); - let run_tests = !bench_benchmarks || matches.opt_present("test"); - - let mut nocapture = matches.opt_present("nocapture"); - if !nocapture { - nocapture = match env::var("RUST_TEST_NOCAPTURE") { - Ok(val) => &val != "0", - Err(_) => false, - }; - } - - let time_options = match get_time_options(&matches, allow_unstable) { - Some(Ok(val)) => val, - Some(Err(e)) => return Some(Err(e)), - None => panic!("Unexpected output from `get_time_options`"), - }; - - let test_threads = match matches.opt_str("test-threads") { - Some(n_str) => match n_str.parse::() { - Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())), - Ok(n) => Some(n), - Err(e) => { - return Some(Err(format!( - "argument for --test-threads must be a number > 0 \ - (error: {})", - e - ))); - } - }, - None => None, - }; - - let color = match matches.opt_str("color").as_ref().map(|s| &**s) { - Some("auto") | None => AutoColor, - Some("always") => AlwaysColor, - Some("never") => NeverColor, - - Some(v) => { - return Some(Err(format!( - "argument for --color must be auto, always, or never (was \ - {})", - v - ))); - } - }; - - let format = match matches.opt_str("format").as_ref().map(|s| &**s) { - None if quiet => OutputFormat::Terse, - Some("pretty") | None => OutputFormat::Pretty, - Some("terse") => OutputFormat::Terse, - Some("json") => { - if !allow_unstable { - return Some(Err( - "The \"json\" format is only accepted on the nightly compiler".into(), - )); - } - OutputFormat::Json - } - - Some(v) => { - return Some(Err(format!( - "argument for --format must be pretty, terse, or json (was \ - {})", - v - ))); - } - }; - - let test_opts = TestOpts { - list, - filter, - filter_exact: exact, - exclude_should_panic, - run_ignored, - run_tests, - bench_benchmarks, - logfile, - nocapture, - color, - format, - test_threads, - skip: matches.opt_strs("skip"), - time_options, - options: Options::new().display_output(matches.opt_present("show-output")), - }; - - Some(Ok(test_opts)) -} - -#[derive(Debug, Clone, PartialEq)] -pub struct BenchSamples { - ns_iter_summ: stats::Summary, - mb_s: usize, -} - -#[derive(Debug, Clone, PartialEq)] -pub enum TestResult { - TrOk, - TrFailed, - TrFailedMsg(String), - TrIgnored, - TrAllowedFail, - TrBench(BenchSamples), - TrTimedFail, -} - -unsafe impl Send for TestResult {} - -/// The meassured execution time of a unit test. -#[derive(Clone, PartialEq)] -pub struct TestExecTime(Duration); - -impl fmt::Display for TestExecTime { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:.3}s", self.0.as_secs_f64()) - } -} - enum OutputLocation { Pretty(Box), Raw(T), @@ -1071,7 +281,7 @@ impl ConsoleTestState { pub fn write_log_result(&mut self,test: &TestDesc, result: &TestResult, - exec_time: Option<&TestExecTime>, + exec_time: Option<&time::TestExecTime>, ) -> io::Result<()> { self.write_log(|| format!( "{} {}", @@ -1097,52 +307,6 @@ impl ConsoleTestState { } } -// Format a number with thousands separators -fn fmt_thousands_sep(mut n: usize, sep: char) -> String { - use std::fmt::Write; - let mut output = String::new(); - let mut trailing = false; - for &pow in &[9, 6, 3, 0] { - let base = 10_usize.pow(pow); - if pow == 0 || trailing || n / base != 0 { - if !trailing { - output.write_fmt(format_args!("{}", n / base)).unwrap(); - } else { - output.write_fmt(format_args!("{:03}", n / base)).unwrap(); - } - if pow != 0 { - output.push(sep); - } - trailing = true; - } - n %= base; - } - - output -} - -pub fn fmt_bench_samples(bs: &BenchSamples) -> String { - use std::fmt::Write; - let mut output = String::new(); - - let median = bs.ns_iter_summ.median as usize; - let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize; - - output - .write_fmt(format_args!( - "{:>11} ns/iter (+/- {})", - fmt_thousands_sep(median, ','), - fmt_thousands_sep(deviation, ',') - )) - .unwrap(); - if bs.mb_s != 0 { - output - .write_fmt(format_args!(" = {} MB/s", bs.mb_s)) - .unwrap(); - } - output -} - // List the tests to console, and optionally to logfile. Filters are honored. pub fn list_tests_console(opts: &TestOpts, tests: Vec) -> io::Result<()> { let mut output = match term::stdout() { @@ -1271,14 +435,14 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec) -> io::Resu let mut out: Box = match opts.format { OutputFormat::Pretty => Box::new(PrettyFormatter::new( output, - use_color(opts), + opts.use_color(), max_name_len, is_multithreaded, opts.time_options, )), OutputFormat::Terse => Box::new(TerseFormatter::new( output, - use_color(opts), + opts.use_color(), max_name_len, is_multithreaded, )), @@ -1299,55 +463,16 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec) -> io::Resu return out.write_run_finish(&st); } -fn use_color(opts: &TestOpts) -> bool { - match opts.color { - AutoColor => !opts.nocapture && stdout_isatty(), - AlwaysColor => true, - NeverColor => false, - } -} - -#[cfg(any( - target_os = "cloudabi", - all(target_arch = "wasm32", not(target_os = "emscripten")), - all(target_vendor = "fortanix", target_env = "sgx") -))] -fn stdout_isatty() -> bool { - // FIXME: Implement isatty on SGX - false -} -#[cfg(unix)] -fn stdout_isatty() -> bool { - unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 } -} -#[cfg(windows)] -fn stdout_isatty() -> bool { - type DWORD = u32; - type BOOL = i32; - type HANDLE = *mut u8; - type LPDWORD = *mut u32; - const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD; - extern "system" { - fn GetStdHandle(which: DWORD) -> HANDLE; - fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL; - } - unsafe { - let handle = GetStdHandle(STD_OUTPUT_HANDLE); - let mut out = 0; - GetConsoleMode(handle, &mut out) != 0 - } -} - #[derive(Clone)] pub enum TestEvent { TeFiltered(Vec), TeWait(TestDesc), - TeResult(TestDesc, TestResult, Option, Vec), + TeResult(TestDesc, TestResult, Option, Vec), TeTimeout(TestDesc), TeFilteredOut(usize), } -pub type MonitorMsg = (TestDesc, TestResult, Option, Vec); +pub type MonitorMsg = (TestDesc, TestResult, Option, Vec); struct Sink(Arc>>); impl Write for Sink { @@ -1359,18 +484,6 @@ impl Write for Sink { } } -#[derive(Clone, Copy)] -pub enum RunStrategy { - /// Runs the test in the current process, and sends the result back over the - /// supplied channel. - InProcess, - - /// Spawns a subprocess to run the test, and sends the result back over the - /// supplied channel. Requires `argv[0]` to exist and point to the binary - /// that's currently running. - SpawnPrimary, -} - pub fn run_tests(opts: &TestOpts, tests: Vec, mut callback: F) -> io::Result<()> where F: FnMut(TestEvent) -> io::Result<()>, @@ -1467,7 +580,7 @@ where while pending > 0 || !remaining.is_empty() { while pending < concurrency && !remaining.is_empty() { let test = remaining.pop().unwrap(); - let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S); + let timeout = time::get_default_test_timeout(); running_tests.insert(test.desc.clone(), timeout); callback(TeWait(test.desc.clone()))?; //here no pad run_test(opts, !opts.run_tests, test, run_strategy, tx.clone(), Concurrent::Yes); @@ -1510,153 +623,6 @@ where Ok(()) } -#[allow(deprecated)] -fn get_concurrency() -> usize { - return match env::var("RUST_TEST_THREADS") { - Ok(s) => { - let opt_n: Option = s.parse().ok(); - match opt_n { - Some(n) if n > 0 => n, - _ => panic!( - "RUST_TEST_THREADS is `{}`, should be a positive integer.", - s - ), - } - } - Err(..) => num_cpus(), - }; - - #[cfg(windows)] - #[allow(nonstandard_style)] - fn num_cpus() -> usize { - #[repr(C)] - struct SYSTEM_INFO { - wProcessorArchitecture: u16, - wReserved: u16, - dwPageSize: u32, - lpMinimumApplicationAddress: *mut u8, - lpMaximumApplicationAddress: *mut u8, - dwActiveProcessorMask: *mut u8, - dwNumberOfProcessors: u32, - dwProcessorType: u32, - dwAllocationGranularity: u32, - wProcessorLevel: u16, - wProcessorRevision: u16, - } - extern "system" { - fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32; - } - unsafe { - let mut sysinfo = std::mem::zeroed(); - GetSystemInfo(&mut sysinfo); - sysinfo.dwNumberOfProcessors as usize - } - } - - #[cfg(target_os = "vxworks")] - fn num_cpus() -> usize { - // FIXME: Implement num_cpus on vxWorks - 1 - } - - #[cfg(target_os = "redox")] - fn num_cpus() -> usize { - // FIXME: Implement num_cpus on Redox - 1 - } - - #[cfg(any( - all(target_arch = "wasm32", not(target_os = "emscripten")), - all(target_vendor = "fortanix", target_env = "sgx") - ))] - fn num_cpus() -> usize { - 1 - } - - #[cfg(any( - target_os = "android", - target_os = "cloudabi", - target_os = "emscripten", - target_os = "fuchsia", - target_os = "ios", - target_os = "linux", - target_os = "macos", - target_os = "solaris", - ))] - fn num_cpus() -> usize { - unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize } - } - - #[cfg(any( - target_os = "freebsd", - target_os = "dragonfly", - target_os = "netbsd" - ))] - fn num_cpus() -> usize { - use std::ptr; - - let mut cpus: libc::c_uint = 0; - let mut cpus_size = std::mem::size_of_val(&cpus); - - unsafe { - cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint; - } - if cpus < 1 { - let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0]; - unsafe { - libc::sysctl( - mib.as_mut_ptr(), - 2, - &mut cpus as *mut _ as *mut _, - &mut cpus_size as *mut _ as *mut _, - ptr::null_mut(), - 0, - ); - } - if cpus < 1 { - cpus = 1; - } - } - cpus as usize - } - - #[cfg(target_os = "openbsd")] - fn num_cpus() -> usize { - use std::ptr; - - let mut cpus: libc::c_uint = 0; - let mut cpus_size = std::mem::size_of_val(&cpus); - let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0]; - - unsafe { - libc::sysctl( - mib.as_mut_ptr(), - 2, - &mut cpus as *mut _ as *mut _, - &mut cpus_size as *mut _ as *mut _, - ptr::null_mut(), - 0, - ); - } - if cpus < 1 { - cpus = 1; - } - cpus as usize - } - - #[cfg(target_os = "haiku")] - fn num_cpus() -> usize { - // FIXME: implement - 1 - } - - #[cfg(target_os = "l4re")] - fn num_cpus() -> usize { - // FIXME: implement - 1 - } -} - pub fn filter_tests(opts: &TestOpts, tests: Vec) -> Vec { let mut filtered = tests; let matches_filter = |test: &TestDescAndFn, filter: &str| { @@ -1748,7 +714,7 @@ pub fn run_test( pub strategy: RunStrategy, pub nocapture: bool, pub concurrency: Concurrent, - pub time: Option, + pub time: Option, } fn run_test_inner( @@ -1835,86 +801,13 @@ fn __rust_begin_short_backtrace(f: F) { f() } -fn calc_result<'a>( - desc: &TestDesc, - task_result: Result<(), &'a (dyn Any + 'static + Send)>, - time_opts: &Option, - exec_time: &Option -) -> TestResult { - let result = match (&desc.should_panic, task_result) { - (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk, - (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => { - if err - .downcast_ref::() - .map(|e| &**e) - .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e)) - .map(|e| e.contains(msg)) - .unwrap_or(false) - { - TrOk - } else { - if desc.allow_fail { - TrAllowedFail - } else { - TrFailedMsg(format!("panic did not include expected string '{}'", msg)) - } - } - } - (&ShouldPanic::Yes, Ok(())) => TrFailedMsg("test did not panic as expected".to_string()), - _ if desc.allow_fail => TrAllowedFail, - _ => TrFailed, - }; - - // If test is already failed (or allowed to fail), do not change the result. - if result != TrOk { - return result; - } - - // Check if test is failed due to timeout. - if let (Some(opts), Some(time)) = (time_opts, exec_time) { - if opts.error_on_excess && opts.is_critical(desc, time) { - return TrTimedFail; - } - } - - result -} - -fn get_result_from_exit_code( - desc: &TestDesc, - code: i32, - time_opts: &Option, - exec_time: &Option, -) -> TestResult { - let result = match (desc.allow_fail, code) { - (_, TR_OK) => TrOk, - (true, TR_FAILED) => TrAllowedFail, - (false, TR_FAILED) => TrFailed, - (_, _) => TrFailedMsg(format!("got unexpected return code {}", code)), - }; - - // If test is already failed (or allowed to fail), do not change the result. - if result != TrOk { - return result; - } - - // Check if test is failed due to timeout. - if let (Some(opts), Some(time)) = (time_opts, exec_time) { - if opts.error_on_excess && opts.is_critical(desc, time) { - return TrTimedFail; - } - } - - result -} - fn run_test_in_process( desc: TestDesc, nocapture: bool, report_time: bool, testfn: Box, monitor_ch: Sender, - time_opts: Option, + time_opts: Option, ) { // Buffer for capturing standard I/O let data = Arc::new(Mutex::new(Vec::new())); @@ -1936,7 +829,7 @@ fn run_test_in_process( let result = catch_unwind(AssertUnwindSafe(testfn)); let exec_time = start.map(|start| { let duration = start.elapsed(); - TestExecTime(duration) + time::TestExecTime(duration) }); if let Some((printio, panicio)) = oldio { @@ -1956,7 +849,7 @@ fn spawn_test_subprocess( desc: TestDesc, report_time: bool, monitor_ch: Sender, - time_opts: Option, + time_opts: Option, ) { let (result, test_output, exec_time) = (|| { let args = env::args().collect::>(); @@ -1978,7 +871,7 @@ fn spawn_test_subprocess( }; let exec_time = start.map(|start| { let duration = start.elapsed(); - TestExecTime(duration) + time::TestExecTime(duration) }); let std::process::Output { stdout, stderr, status } = output; @@ -2025,9 +918,9 @@ fn run_test_in_spawned_subprocess( } if let TrOk = test_result { - process::exit(TR_OK); + process::exit(test_result::TR_OK); } else { - process::exit(TR_FAILED); + process::exit(test_result::TR_FAILED); } }); let record_result2 = record_result.clone(); @@ -2053,229 +946,3 @@ fn get_exit_code(status: ExitStatus) -> Result { } } } - -#[derive(Clone, PartialEq)] -pub struct MetricMap(BTreeMap); - -impl MetricMap { - pub fn new() -> MetricMap { - MetricMap(BTreeMap::new()) - } - - /// Insert a named `value` (+/- `noise`) metric into the map. The value - /// must be non-negative. The `noise` indicates the uncertainty of the - /// metric, which doubles as the "noise range" of acceptable - /// pairwise-regressions on this named value, when comparing from one - /// metric to the next using `compare_to_old`. - /// - /// If `noise` is positive, then it means this metric is of a value - /// you want to see grow smaller, so a change larger than `noise` in the - /// positive direction represents a regression. - /// - /// If `noise` is negative, then it means this metric is of a value - /// you want to see grow larger, so a change larger than `noise` in the - /// negative direction represents a regression. - pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) { - let m = Metric { value, noise }; - self.0.insert(name.to_owned(), m); - } - - pub fn fmt_metrics(&self) -> String { - let v = self - .0 - .iter() - .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise)) - .collect::>(); - v.join(", ") - } -} - -// Benchmarking - -pub use std::hint::black_box; - -impl Bencher { - /// Callback for benchmark functions to run in their body. - pub fn iter(&mut self, mut inner: F) - where - F: FnMut() -> T, - { - if self.mode == BenchMode::Single { - ns_iter_inner(&mut inner, 1); - return; - } - - self.summary = Some(iter(&mut inner)); - } - - pub fn bench(&mut self, mut f: F) -> Option - where - F: FnMut(&mut Bencher), - { - f(self); - return self.summary; - } -} - -fn ns_from_dur(dur: Duration) -> u64 { - dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64) -} - -fn ns_iter_inner(inner: &mut F, k: u64) -> u64 -where - F: FnMut() -> T, -{ - let start = Instant::now(); - for _ in 0..k { - black_box(inner()); - } - return ns_from_dur(start.elapsed()); -} - -pub fn iter(inner: &mut F) -> stats::Summary -where - F: FnMut() -> T, -{ - // Initial bench run to get ballpark figure. - let ns_single = ns_iter_inner(inner, 1); - - // Try to estimate iter count for 1ms falling back to 1m - // iterations if first run took < 1ns. - let ns_target_total = 1_000_000; // 1ms - let mut n = ns_target_total / cmp::max(1, ns_single); - - // if the first run took more than 1ms we don't want to just - // be left doing 0 iterations on every loop. The unfortunate - // side effect of not being able to do as many runs is - // automatically handled by the statistical analysis below - // (i.e., larger error bars). - n = cmp::max(1, n); - - let mut total_run = Duration::new(0, 0); - let samples: &mut [f64] = &mut [0.0_f64; 50]; - loop { - let loop_start = Instant::now(); - - for p in &mut *samples { - *p = ns_iter_inner(inner, n) as f64 / n as f64; - } - - stats::winsorize(samples, 5.0); - let summ = stats::Summary::new(samples); - - for p in &mut *samples { - let ns = ns_iter_inner(inner, 5 * n); - *p = ns as f64 / (5 * n) as f64; - } - - stats::winsorize(samples, 5.0); - let summ5 = stats::Summary::new(samples); - - let loop_run = loop_start.elapsed(); - - // If we've run for 100ms and seem to have converged to a - // stable median. - if loop_run > Duration::from_millis(100) - && summ.median_abs_dev_pct < 1.0 - && summ.median - summ5.median < summ5.median_abs_dev - { - return summ5; - } - - total_run = total_run + loop_run; - // Longest we ever run for is 3s. - if total_run > Duration::from_secs(3) { - return summ5; - } - - // If we overflow here just return the results so far. We check a - // multiplier of 10 because we're about to multiply by 2 and the - // next iteration of the loop will also multiply by 5 (to calculate - // the summ5 result) - n = match n.checked_mul(10) { - Some(_) => n * 2, - None => { - return summ5; - } - }; - } -} - -pub mod bench { - use super::{ - BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult - }; - use crate::stats; - use std::cmp; - use std::io; - use std::panic::{catch_unwind, AssertUnwindSafe}; - use std::sync::{Arc, Mutex}; - - pub fn benchmark(desc: TestDesc, monitor_ch: Sender, nocapture: bool, f: F) - where - F: FnMut(&mut Bencher), - { - let mut bs = Bencher { - mode: BenchMode::Auto, - summary: None, - bytes: 0, - }; - - let data = Arc::new(Mutex::new(Vec::new())); - let oldio = if !nocapture { - Some(( - io::set_print(Some(Box::new(Sink(data.clone())))), - io::set_panic(Some(Box::new(Sink(data.clone())))), - )) - } else { - None - }; - - let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f))); - - if let Some((printio, panicio)) = oldio { - io::set_print(printio); - io::set_panic(panicio); - } - - let test_result = match result { - //bs.bench(f) { - Ok(Some(ns_iter_summ)) => { - let ns_iter = cmp::max(ns_iter_summ.median as u64, 1); - let mb_s = bs.bytes * 1000 / ns_iter; - - let bs = BenchSamples { - ns_iter_summ, - mb_s: mb_s as usize, - }; - TestResult::TrBench(bs) - } - Ok(None) => { - // iter not called, so no data. - // FIXME: error in this case? - let samples: &mut [f64] = &mut [0.0_f64; 1]; - let bs = BenchSamples { - ns_iter_summ: stats::Summary::new(samples), - mb_s: 0, - }; - TestResult::TrBench(bs) - } - Err(_) => TestResult::TrFailed, - }; - - let stdout = data.lock().unwrap().to_vec(); - monitor_ch.send((desc, test_result, None, stdout)).unwrap(); - } - - pub fn run_once(f: F) - where - F: FnMut(&mut Bencher), - { - let mut bs = Bencher { - mode: BenchMode::Single, - summary: None, - bytes: 0, - }; - bs.bench(f); - } -} diff --git a/src/libtest/options.rs b/src/libtest/options.rs new file mode 100644 index 0000000000000..0a604cae0ca33 --- /dev/null +++ b/src/libtest/options.rs @@ -0,0 +1,80 @@ +//! Enums denoting options for test execution. + +/// Whether to execute tests concurrently or not +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum Concurrent { + Yes, + No, +} + +#[derive(Clone, PartialEq, Eq)] +pub enum BenchMode { + Auto, + Single, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub enum ShouldPanic { + No, + Yes, + YesWithMessage(&'static str), +} + +#[derive(Copy, Clone, Debug)] +pub enum ColorConfig { + AutoColor, + AlwaysColor, + NeverColor, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum OutputFormat { + Pretty, + Terse, + Json, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum RunIgnored { + Yes, + No, + Only, +} + +#[derive(Clone, Copy)] +pub enum RunStrategy { + /// Runs the test in the current process, and sends the result back over the + /// supplied channel. + InProcess, + + /// Spawns a subprocess to run the test, and sends the result back over the + /// supplied channel. Requires `argv[0]` to exist and point to the binary + /// that's currently running. + SpawnPrimary, +} + +/// In case we want to add other options as well, just add them in this struct. +#[derive(Copy, Clone, Debug)] +pub struct Options { + pub display_output: bool, + pub panic_abort: bool, +} + +impl Options { + pub fn new() -> Options { + Options { + display_output: false, + panic_abort: false, + } + } + + pub fn display_output(mut self, display_output: bool) -> Options { + self.display_output = display_output; + self + } + + pub fn panic_abort(mut self, panic_abort: bool) -> Options { + self.panic_abort = panic_abort; + self + } +} diff --git a/src/libtest/test_result.rs b/src/libtest/test_result.rs new file mode 100644 index 0000000000000..4eb3f93e2a42b --- /dev/null +++ b/src/libtest/test_result.rs @@ -0,0 +1,102 @@ + +use std::any::Any; + +use super::bench::BenchSamples; +use super::time; +use super::types::TestDesc; +use super::options::ShouldPanic; + +pub use self::TestResult::*; + +// Return codes for secondary process. +// Start somewhere other than 0 so we know the return code means what we think +// it means. +pub const TR_OK: i32 = 50; +pub const TR_FAILED: i32 = 51; + +#[derive(Debug, Clone, PartialEq)] +pub enum TestResult { + TrOk, + TrFailed, + TrFailedMsg(String), + TrIgnored, + TrAllowedFail, + TrBench(BenchSamples), + TrTimedFail, +} + +unsafe impl Send for TestResult {} + + +pub fn calc_result<'a>( + desc: &TestDesc, + task_result: Result<(), &'a (dyn Any + 'static + Send)>, + time_opts: &Option, + exec_time: &Option +) -> TestResult { + let result = match (&desc.should_panic, task_result) { + (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TestResult::TrOk, + (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => { + if err + .downcast_ref::() + .map(|e| &**e) + .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e)) + .map(|e| e.contains(msg)) + .unwrap_or(false) + { + TestResult::TrOk + } else { + if desc.allow_fail { + TestResult::TrAllowedFail + } else { + TestResult::TrFailedMsg(format!("panic did not include expected string '{}'", msg)) + } + } + } + (&ShouldPanic::Yes, Ok(())) => TestResult::TrFailedMsg("test did not panic as expected".to_string()), + _ if desc.allow_fail => TestResult::TrAllowedFail, + _ => TestResult::TrFailed, + }; + + // If test is already failed (or allowed to fail), do not change the result. + if result != TestResult::TrOk { + return result; + } + + // Check if test is failed due to timeout. + if let (Some(opts), Some(time)) = (time_opts, exec_time) { + if opts.error_on_excess && opts.is_critical(desc, time) { + return TestResult::TrTimedFail; + } + } + + result +} + +pub fn get_result_from_exit_code( + desc: &TestDesc, + code: i32, + time_opts: &Option, + exec_time: &Option, +) -> TestResult { + let result = match (desc.allow_fail, code) { + (_, TR_OK) => TestResult::TrOk, + (true, TR_FAILED) => TestResult::TrAllowedFail, + (false, TR_FAILED) => TestResult::TrFailed, + (_, _) => TestResult::TrFailedMsg(format!("got unexpected return code {}", code)), + }; + + // If test is already failed (or allowed to fail), do not change the result. + if result != TestResult::TrOk { + return result; + } + + // Check if test is failed due to timeout. + if let (Some(opts), Some(time)) = (time_opts, exec_time) { + if opts.error_on_excess && opts.is_critical(desc, time) { + return TestResult::TrTimedFail; + } + } + + result +} diff --git a/src/libtest/time.rs b/src/libtest/time.rs new file mode 100644 index 0000000000000..b7ce764505bfe --- /dev/null +++ b/src/libtest/time.rs @@ -0,0 +1,206 @@ +//! Module `time` contains everything related to the time measurement of unit tests +//! execution. +//! Two main purposes of this module: +//! - Check whether test is timed out. +//! - Provide helpers for `report-time` and `measure-time` options. + +use std::time::{Duration, Instant}; +use std::str::FromStr; +use std::fmt; +use std::env; + +use super::types::{TestDesc, TestType}; + +pub const TEST_WARN_TIMEOUT_S: u64 = 60; + +/// This small module contains constants used by `report-time` option. +/// Those constants values will be used if corresponding environment variables are not set. +/// +/// To override values for unit-tests, use a constant `RUST_TEST_TIME_UNIT`, +/// To override values for integration tests, use a constant `RUST_TEST_TIME_INTEGRATION`, +/// To override values for doctests, use a constant `RUST_TEST_TIME_DOCTEST`. +/// +/// Example of the expected format is `RUST_TEST_TIME_xxx=100,200`, where 100 means +/// warn time, and 200 means critical time. +pub mod time_constants { + use std::time::Duration; + use super::TEST_WARN_TIMEOUT_S; + + /// Environment variable for overriding default threshold for unit-tests. + pub const UNIT_ENV_NAME: &str = "RUST_TEST_TIME_UNIT"; + + // Unit tests are supposed to be really quick. + pub const UNIT_WARN: Duration = Duration::from_millis(50); + pub const UNIT_CRITICAL: Duration = Duration::from_millis(100); + + /// Environment variable for overriding default threshold for unit-tests. + pub const INTEGRATION_ENV_NAME: &str = "RUST_TEST_TIME_INTEGRATION"; + + // Integration tests may have a lot of work, so they can take longer to execute. + pub const INTEGRATION_WARN: Duration = Duration::from_millis(500); + pub const INTEGRATION_CRITICAL: Duration = Duration::from_millis(1000); + + /// Environment variable for overriding default threshold for unit-tests. + pub const DOCTEST_ENV_NAME: &str = "RUST_TEST_TIME_DOCTEST"; + + // Doctests are similar to integration tests, because they can include a lot of + // initialization code. + pub const DOCTEST_WARN: Duration = INTEGRATION_WARN; + pub const DOCTEST_CRITICAL: Duration = INTEGRATION_CRITICAL; + + // Do not suppose anything about unknown tests, base limits on the + // `TEST_WARN_TIMEOUT_S` constant. + pub const UNKNOWN_WARN: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S); + pub const UNKNOWN_CRITICAL: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S * 2); +} + +/// Returns an `Instance` object denoting when the test should be considered +/// timed out. +pub fn get_default_test_timeout() -> Instant { + Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S) +} + +/// The meassured execution time of a unit test. +#[derive(Clone, PartialEq)] +pub struct TestExecTime(pub Duration); + +impl fmt::Display for TestExecTime { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:.3}s", self.0.as_secs_f64()) + } +} + +/// Structure denoting time limits for test execution. +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] +pub struct TimeThreshold { + pub warn: Duration, + pub critical: Duration, +} + +impl TimeThreshold { + /// Creates a new `TimeThreshold` instance with provided durations. + pub fn new(warn: Duration, critical: Duration) -> Self { + Self { + warn, + critical, + } + } + + /// Attempts to create a `TimeThreshold` instance with values obtained + /// from the environment variable, and returns `None` if the variable + /// is not set. + /// Environment variable format is expected to match `\d+,\d+`. + /// + /// # Panics + /// + /// Panics if variable with provided name is set but contains inappropriate + /// value. + pub fn from_env_var(env_var_name: &str) -> Option { + let durations_str = env::var(env_var_name).ok()?; + + // Split string into 2 substrings by comma and try to parse numbers. + let mut durations = durations_str + .splitn(2, ',') + .map(|v| { + u64::from_str(v).unwrap_or_else(|_| { + panic!( + "Duration value in variable {} is expected to be a number, but got {}", + env_var_name, v + ) + }) + }); + + // Callback to be called if the environment variable has unexpected structure. + let panic_on_incorrect_value = || { + panic!( + "Duration variable {} expected to have 2 numbers separated by comma, but got {}", + env_var_name, durations_str + ); + }; + + let (warn, critical) = ( + durations.next().unwrap_or_else(panic_on_incorrect_value), + durations.next().unwrap_or_else(panic_on_incorrect_value) + ); + + if warn > critical { + panic!("Test execution warn time should be less or equal to the critical time"); + } + + Some(Self::new(Duration::from_millis(warn), Duration::from_millis(critical))) + } +} + +/// Structure with parameters for calculating test execution time. +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] +pub struct TestTimeOptions { + /// Denotes if the test critical execution time limit excess should be considered + /// a test failure. + pub error_on_excess: bool, + pub colored: bool, + pub unit_threshold: TimeThreshold, + pub integration_threshold: TimeThreshold, + pub doctest_threshold: TimeThreshold, +} + +impl TestTimeOptions { + pub fn new_from_env(error_on_excess: bool, colored: bool) -> Self { + let unit_threshold = + TimeThreshold::from_env_var(time_constants::UNIT_ENV_NAME) + .unwrap_or_else(Self::default_unit); + + let integration_threshold = + TimeThreshold::from_env_var(time_constants::INTEGRATION_ENV_NAME) + .unwrap_or_else(Self::default_integration); + + let doctest_threshold = + TimeThreshold::from_env_var(time_constants::DOCTEST_ENV_NAME) + .unwrap_or_else(Self::default_doctest); + + Self { + error_on_excess, + colored, + unit_threshold, + integration_threshold, + doctest_threshold, + } + } + + pub fn is_warn(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool { + exec_time.0 >= self.warn_time(test) + } + + pub fn is_critical(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool { + exec_time.0 >= self.critical_time(test) + } + + fn warn_time(&self, test: &TestDesc) -> Duration { + match test.test_type { + TestType::UnitTest => self.unit_threshold.warn, + TestType::IntegrationTest => self.integration_threshold.warn, + TestType::DocTest => self.doctest_threshold.warn, + TestType::Unknown => time_constants::UNKNOWN_WARN, + } + } + + fn critical_time(&self, test: &TestDesc) -> Duration { + match test.test_type { + TestType::UnitTest => self.unit_threshold.critical, + TestType::IntegrationTest => self.integration_threshold.critical, + TestType::DocTest => self.doctest_threshold.critical, + TestType::Unknown => time_constants::UNKNOWN_CRITICAL, + } + } + + fn default_unit() -> TimeThreshold { + TimeThreshold::new(time_constants::UNIT_WARN, time_constants::UNIT_CRITICAL) + } + + fn default_integration() -> TimeThreshold { + TimeThreshold::new(time_constants::INTEGRATION_WARN, time_constants::INTEGRATION_CRITICAL) + } + + fn default_doctest() -> TimeThreshold { + TimeThreshold::new(time_constants::DOCTEST_WARN, time_constants::DOCTEST_CRITICAL) + } +} diff --git a/src/libtest/types.rs b/src/libtest/types.rs new file mode 100644 index 0000000000000..89bcf2cf2853b --- /dev/null +++ b/src/libtest/types.rs @@ -0,0 +1,145 @@ +//! Common types used by `libtest`. + +use std::fmt; +use std::borrow::Cow; + +use super::options; +use super::bench::Bencher; + +pub use NamePadding::*; +pub use TestName::*; +pub use TestFn::*; + +/// Type of the test according to the [rust book](https://doc.rust-lang.org/cargo/guide/tests.html) +/// conventions. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub enum TestType { + /// Unit-tests are expected to be in the `src` folder of the crate. + UnitTest, + /// Integration-style tests are expected to be in the `tests` folder of the crate. + IntegrationTest, + /// Doctests are created by the `librustdoc` manually, so it's a different type of test. + DocTest, + /// Tests for the sources that don't follow the project layout convention + /// (e.g. tests in raw `main.rs` compiled by calling `rustc --test` directly). + Unknown, +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum NamePadding { + PadNone, + PadOnRight, +} + +// The name of a test. By convention this follows the rules for rust +// paths; i.e., it should be a series of identifiers separated by double +// colons. This way if some test runner wants to arrange the tests +// hierarchically it may. +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +pub enum TestName { + StaticTestName(&'static str), + DynTestName(String), + AlignedTestName(Cow<'static, str>, NamePadding), +} + +impl TestName { + pub fn as_slice(&self) -> &str { + match *self { + StaticTestName(s) => s, + DynTestName(ref s) => s, + AlignedTestName(ref s, _) => &*s, + } + } + + pub fn padding(&self) -> NamePadding { + match self { + &AlignedTestName(_, p) => p, + _ => PadNone, + } + } + + pub fn with_padding(&self, padding: NamePadding) -> TestName { + let name = match self { + &TestName::StaticTestName(name) => Cow::Borrowed(name), + &TestName::DynTestName(ref name) => Cow::Owned(name.clone()), + &TestName::AlignedTestName(ref name, _) => name.clone(), + }; + + TestName::AlignedTestName(name, padding) + } +} +impl fmt::Display for TestName { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self.as_slice(), f) + } +} + +/// Represents a benchmark function. +pub trait TDynBenchFn: Send { + fn run(&self, harness: &mut Bencher); +} + +// A function that runs a test. If the function returns successfully, +// the test succeeds; if the function panics then the test fails. We +// may need to come up with a more clever definition of test in order +// to support isolation of tests into threads. +pub enum TestFn { + StaticTestFn(fn()), + StaticBenchFn(fn(&mut Bencher)), + DynTestFn(Box), + DynBenchFn(Box), +} + +impl TestFn { + pub fn padding(&self) -> NamePadding { + match *self { + StaticTestFn(..) => PadNone, + StaticBenchFn(..) => PadOnRight, + DynTestFn(..) => PadNone, + DynBenchFn(..) => PadOnRight, + } + } +} + +impl fmt::Debug for TestFn { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(match *self { + StaticTestFn(..) => "StaticTestFn(..)", + StaticBenchFn(..) => "StaticBenchFn(..)", + DynTestFn(..) => "DynTestFn(..)", + DynBenchFn(..) => "DynBenchFn(..)", + }) + } +} + +// The definition of a single test. A test runner will run a list of +// these. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct TestDesc { + pub name: TestName, + pub ignore: bool, + pub should_panic: options::ShouldPanic, + pub allow_fail: bool, + pub test_type: TestType, +} + +impl TestDesc { + pub fn padded_name(&self, column_count: usize, align: NamePadding) -> String { + let mut name = String::from(self.name.as_slice()); + let fill = column_count.saturating_sub(name.len()); + let pad = " ".repeat(fill); + match align { + PadNone => name, + PadOnRight => { + name.push_str(&pad); + name + } + } + } +} + +#[derive(Debug)] +pub struct TestDescAndFn { + pub desc: TestDesc, + pub testfn: TestFn, +} From c951882c7364c672fe127f61d25b458e810572ff Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Thu, 17 Oct 2019 10:12:02 +0300 Subject: [PATCH 2/8] Extract ConsoleTestState --- src/libtest/console.rs | 288 +++++++++++++++++++++++++++++++ src/libtest/event.rs | 15 ++ src/libtest/formatters/json.rs | 1 + src/libtest/formatters/mod.rs | 1 + src/libtest/formatters/pretty.rs | 5 +- src/libtest/formatters/terse.rs | 5 +- src/libtest/lib.rs | 288 +------------------------------ 7 files changed, 317 insertions(+), 286 deletions(-) create mode 100644 src/libtest/console.rs create mode 100644 src/libtest/event.rs diff --git a/src/libtest/console.rs b/src/libtest/console.rs new file mode 100644 index 0000000000000..851c0389ff377 --- /dev/null +++ b/src/libtest/console.rs @@ -0,0 +1,288 @@ +//! Module providing interface for running tests in the console. + +use std::fs::File; +use std::io::prelude::*; +use std::io; + +use term; + +use super::{ + helpers::{ + concurrency::get_concurrency, + metrics::MetricMap, + }, + types::{TestDesc, TestDescAndFn, NamePadding}, + options::{Options, OutputFormat}, + bench::fmt_bench_samples, + test_result::TestResult, + time::TestExecTime, + cli::TestOpts, + event::TestEvent, + run_tests, + filter_tests, +}; + +pub enum OutputLocation { + Pretty(Box), + Raw(T), +} + +impl Write for OutputLocation { + fn write(&mut self, buf: &[u8]) -> io::Result { + match *self { + OutputLocation::Pretty(ref mut term) => term.write(buf), + OutputLocation::Raw(ref mut stdout) => stdout.write(buf), + } + } + + fn flush(&mut self) -> io::Result<()> { + match *self { + OutputLocation::Pretty(ref mut term) => term.flush(), + OutputLocation::Raw(ref mut stdout) => stdout.flush(), + } + } +} + +use crate::formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter}; + +pub struct ConsoleTestState { + pub log_out: Option, + pub total: usize, + pub passed: usize, + pub failed: usize, + pub ignored: usize, + pub allowed_fail: usize, + pub filtered_out: usize, + pub measured: usize, + pub metrics: MetricMap, + pub failures: Vec<(TestDesc, Vec)>, + pub not_failures: Vec<(TestDesc, Vec)>, + pub time_failures: Vec<(TestDesc, Vec)>, + pub options: Options, +} + +impl ConsoleTestState { + pub fn new(opts: &TestOpts) -> io::Result { + let log_out = match opts.logfile { + Some(ref path) => Some(File::create(path)?), + None => None, + }; + + Ok(ConsoleTestState { + log_out, + total: 0, + passed: 0, + failed: 0, + ignored: 0, + allowed_fail: 0, + filtered_out: 0, + measured: 0, + metrics: MetricMap::new(), + failures: Vec::new(), + not_failures: Vec::new(), + time_failures: Vec::new(), + options: opts.options, + }) + } + + pub fn write_log( + &mut self, + msg: F, + ) -> io::Result<()> + where + S: AsRef, + F: FnOnce() -> S, + { + match self.log_out { + None => Ok(()), + Some(ref mut o) => { + let msg = msg(); + let msg = msg.as_ref(); + o.write_all(msg.as_bytes()) + }, + } + } + + pub fn write_log_result(&mut self,test: &TestDesc, + result: &TestResult, + exec_time: Option<&TestExecTime>, + ) -> io::Result<()> { + self.write_log(|| format!( + "{} {}", + match *result { + TestResult::TrOk => "ok".to_owned(), + TestResult::TrFailed => "failed".to_owned(), + TestResult::TrFailedMsg(ref msg) => format!("failed: {}", msg), + TestResult::TrIgnored => "ignored".to_owned(), + TestResult::TrAllowedFail => "failed (allowed)".to_owned(), + TestResult::TrBench(ref bs) => fmt_bench_samples(bs), + TestResult::TrTimedFail => "failed (time limit exceeded)".to_owned(), + }, + test.name, + ))?; + if let Some(exec_time) = exec_time { + self.write_log(|| format!(" <{}>", exec_time))?; + } + self.write_log(|| "\n") + } + + fn current_test_count(&self) -> usize { + self.passed + self.failed + self.ignored + self.measured + self.allowed_fail + } +} + +// List the tests to console, and optionally to logfile. Filters are honored. +pub fn list_tests_console(opts: &TestOpts, tests: Vec) -> io::Result<()> { + let mut output = match term::stdout() { + None => OutputLocation::Raw(io::stdout()), + Some(t) => OutputLocation::Pretty(t), + }; + + let quiet = opts.format == OutputFormat::Terse; + let mut st = ConsoleTestState::new(opts)?; + + let mut ntest = 0; + let mut nbench = 0; + + for test in filter_tests(&opts, tests) { + use crate::TestFn::*; + + let TestDescAndFn { + desc: TestDesc { name, .. }, + testfn, + } = test; + + let fntype = match testfn { + StaticTestFn(..) | DynTestFn(..) => { + ntest += 1; + "test" + } + StaticBenchFn(..) | DynBenchFn(..) => { + nbench += 1; + "benchmark" + } + }; + + writeln!(output, "{}: {}", name, fntype)?; + st.write_log(|| format!("{} {}\n", fntype, name))?; + } + + fn plural(count: u32, s: &str) -> String { + match count { + 1 => format!("{} {}", 1, s), + n => format!("{} {}s", n, s), + } + } + + if !quiet { + if ntest != 0 || nbench != 0 { + writeln!(output, "")?; + } + + writeln!( + output, + "{}, {}", + plural(ntest, "test"), + plural(nbench, "benchmark") + )?; + } + + Ok(()) +} + +// A simple console test runner +pub fn run_tests_console(opts: &TestOpts, tests: Vec) -> io::Result { + fn callback( + event: &TestEvent, + st: &mut ConsoleTestState, + out: &mut dyn OutputFormatter, + ) -> io::Result<()> { + match (*event).clone() { + TestEvent::TeFiltered(ref filtered_tests) => { + st.total = filtered_tests.len(); + out.write_run_start(filtered_tests.len()) + } + TestEvent::TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out), + TestEvent::TeWait(ref test) => out.write_test_start(test), + TestEvent::TeTimeout(ref test) => out.write_timeout(test), + TestEvent::TeResult(test, result, exec_time, stdout) => { + st.write_log_result(&test, &result, exec_time.as_ref())?; + out.write_result(&test, &result, exec_time.as_ref(), &*stdout, &st)?; + match result { + TestResult::TrOk => { + st.passed += 1; + st.not_failures.push((test, stdout)); + } + TestResult::TrIgnored => st.ignored += 1, + TestResult::TrAllowedFail => st.allowed_fail += 1, + TestResult::TrBench(bs) => { + st.metrics.insert_metric( + test.name.as_slice(), + bs.ns_iter_summ.median, + bs.ns_iter_summ.max - bs.ns_iter_summ.min, + ); + st.measured += 1 + } + TestResult::TrFailed => { + st.failed += 1; + st.failures.push((test, stdout)); + } + TestResult::TrFailedMsg(msg) => { + st.failed += 1; + let mut stdout = stdout; + stdout.extend_from_slice(format!("note: {}", msg).as_bytes()); + st.failures.push((test, stdout)); + } + TestResult::TrTimedFail => { + st.failed += 1; + st.time_failures.push((test, stdout)); + } + } + Ok(()) + } + } + } + + let output = match term::stdout() { + None => OutputLocation::Raw(io::stdout()), + Some(t) => OutputLocation::Pretty(t), + }; + + let max_name_len = tests + .iter() + .max_by_key(|t| len_if_padded(*t)) + .map(|t| t.desc.name.as_slice().len()) + .unwrap_or(0); + + let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1; + + let mut out: Box = match opts.format { + OutputFormat::Pretty => Box::new(PrettyFormatter::new( + output, + opts.use_color(), + max_name_len, + is_multithreaded, + opts.time_options, + )), + OutputFormat::Terse => Box::new(TerseFormatter::new( + output, + opts.use_color(), + max_name_len, + is_multithreaded, + )), + OutputFormat::Json => Box::new(JsonFormatter::new(output)), + }; + let mut st = ConsoleTestState::new(opts)?; + fn len_if_padded(t: &TestDescAndFn) -> usize { + match t.testfn.padding() { + NamePadding::PadNone => 0, + NamePadding::PadOnRight => t.desc.name.as_slice().len(), + } + } + + run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?; + + assert!(st.current_test_count() == st.total); + + return out.write_run_finish(&st); +} diff --git a/src/libtest/event.rs b/src/libtest/event.rs new file mode 100644 index 0000000000000..b84551826c691 --- /dev/null +++ b/src/libtest/event.rs @@ -0,0 +1,15 @@ +//! Module containing different events that can occur +//! during tests execution process. + +use super::types::TestDesc; +use super::test_result::TestResult; +use super::time::TestExecTime; + +#[derive(Clone)] +pub enum TestEvent { + TeFiltered(Vec), + TeWait(TestDesc), + TeResult(TestDesc, TestResult, Option, Vec), + TeTimeout(TestDesc), + TeFilteredOut(usize), +} diff --git a/src/libtest/formatters/json.rs b/src/libtest/formatters/json.rs index ff756c456dae6..41a293195cc81 100644 --- a/src/libtest/formatters/json.rs +++ b/src/libtest/formatters/json.rs @@ -1,4 +1,5 @@ use super::*; +use super::console::{ConsoleTestState, OutputLocation}; pub(crate) struct JsonFormatter { out: OutputLocation, diff --git a/src/libtest/formatters/mod.rs b/src/libtest/formatters/mod.rs index 72432cd8e3c2b..4f3ffdafe3fbf 100644 --- a/src/libtest/formatters/mod.rs +++ b/src/libtest/formatters/mod.rs @@ -1,4 +1,5 @@ use super::*; +use super::console::ConsoleTestState; mod pretty; mod json; diff --git a/src/libtest/formatters/pretty.rs b/src/libtest/formatters/pretty.rs index 84e1a44dab807..6f2c56bdf45e0 100644 --- a/src/libtest/formatters/pretty.rs +++ b/src/libtest/formatters/pretty.rs @@ -1,4 +1,5 @@ use super::*; +use super::console::{ConsoleTestState, OutputLocation}; pub(crate) struct PrettyFormatter { out: OutputLocation, @@ -67,7 +68,7 @@ impl PrettyFormatter { pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> { match self.out { - Pretty(ref mut term) => { + OutputLocation::Pretty(ref mut term) => { if self.use_color { term.fg(color)?; } @@ -77,7 +78,7 @@ impl PrettyFormatter { } term.flush() } - Raw(ref mut stdout) => { + OutputLocation::Raw(ref mut stdout) => { stdout.write_all(word.as_bytes())?; stdout.flush() } diff --git a/src/libtest/formatters/terse.rs b/src/libtest/formatters/terse.rs index 50407d1130f86..96203d5ea4279 100644 --- a/src/libtest/formatters/terse.rs +++ b/src/libtest/formatters/terse.rs @@ -1,4 +1,5 @@ use super::*; +use super::console::{ConsoleTestState, OutputLocation}; pub(crate) struct TerseFormatter { out: OutputLocation, @@ -68,7 +69,7 @@ impl TerseFormatter { pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> { match self.out { - Pretty(ref mut term) => { + OutputLocation::Pretty(ref mut term) => { if self.use_color { term.fg(color)?; } @@ -78,7 +79,7 @@ impl TerseFormatter { } term.flush() } - Raw(ref mut stdout) => { + OutputLocation::Raw(ref mut stdout) => { stdout.write_all(word.as_bytes())?; stdout.flush() } diff --git a/src/libtest/lib.rs b/src/libtest/lib.rs index f79994671114f..6221140f60698 100644 --- a/src/libtest/lib.rs +++ b/src/libtest/lib.rs @@ -30,16 +30,12 @@ #![feature(termination_trait_lib)] #![feature(test)] -use term; - pub use self::ColorConfig::*; -use self::OutputLocation::*; -use self::TestEvent::*; +use self::event::TestEvent::*; pub use self::types::TestName::*; use std::borrow::Cow; use std::env; -use std::fs::File; use std::io; use std::io::prelude::*; use std::panic::{self, catch_unwind, AssertUnwindSafe, PanicInfo}; @@ -79,14 +75,16 @@ use test_result::*; use types::*; use options::*; use cli::*; +use event::*; use helpers::concurrency::get_concurrency; -use helpers::metrics::MetricMap; mod formatters; pub mod stats; mod cli; +mod console; +mod event; mod helpers; mod time; mod types; @@ -94,8 +92,6 @@ mod options; mod bench; mod test_result; -use crate::formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter}; - // The default console test runner. It accepts the command line // arguments and a vector of test_descs. pub fn test_main(args: &[String], tests: Vec, options: Option) { @@ -111,12 +107,12 @@ pub fn test_main(args: &[String], tests: Vec, options: Option {} Ok(false) => process::exit(101), Err(e) => { @@ -200,278 +196,6 @@ pub fn assert_test_result(result: T) { ); } -enum OutputLocation { - Pretty(Box), - Raw(T), -} - -impl Write for OutputLocation { - fn write(&mut self, buf: &[u8]) -> io::Result { - match *self { - Pretty(ref mut term) => term.write(buf), - Raw(ref mut stdout) => stdout.write(buf), - } - } - - fn flush(&mut self) -> io::Result<()> { - match *self { - Pretty(ref mut term) => term.flush(), - Raw(ref mut stdout) => stdout.flush(), - } - } -} - -struct ConsoleTestState { - log_out: Option, - total: usize, - passed: usize, - failed: usize, - ignored: usize, - allowed_fail: usize, - filtered_out: usize, - measured: usize, - metrics: MetricMap, - failures: Vec<(TestDesc, Vec)>, - not_failures: Vec<(TestDesc, Vec)>, - time_failures: Vec<(TestDesc, Vec)>, - options: Options, -} - -impl ConsoleTestState { - pub fn new(opts: &TestOpts) -> io::Result { - let log_out = match opts.logfile { - Some(ref path) => Some(File::create(path)?), - None => None, - }; - - Ok(ConsoleTestState { - log_out, - total: 0, - passed: 0, - failed: 0, - ignored: 0, - allowed_fail: 0, - filtered_out: 0, - measured: 0, - metrics: MetricMap::new(), - failures: Vec::new(), - not_failures: Vec::new(), - time_failures: Vec::new(), - options: opts.options, - }) - } - - pub fn write_log( - &mut self, - msg: F, - ) -> io::Result<()> - where - S: AsRef, - F: FnOnce() -> S, - { - match self.log_out { - None => Ok(()), - Some(ref mut o) => { - let msg = msg(); - let msg = msg.as_ref(); - o.write_all(msg.as_bytes()) - }, - } - } - - pub fn write_log_result(&mut self,test: &TestDesc, - result: &TestResult, - exec_time: Option<&time::TestExecTime>, - ) -> io::Result<()> { - self.write_log(|| format!( - "{} {}", - match *result { - TrOk => "ok".to_owned(), - TrFailed => "failed".to_owned(), - TrFailedMsg(ref msg) => format!("failed: {}", msg), - TrIgnored => "ignored".to_owned(), - TrAllowedFail => "failed (allowed)".to_owned(), - TrBench(ref bs) => fmt_bench_samples(bs), - TrTimedFail => "failed (time limit exceeded)".to_owned(), - }, - test.name, - ))?; - if let Some(exec_time) = exec_time { - self.write_log(|| format!(" <{}>", exec_time))?; - } - self.write_log(|| "\n") - } - - fn current_test_count(&self) -> usize { - self.passed + self.failed + self.ignored + self.measured + self.allowed_fail - } -} - -// List the tests to console, and optionally to logfile. Filters are honored. -pub fn list_tests_console(opts: &TestOpts, tests: Vec) -> io::Result<()> { - let mut output = match term::stdout() { - None => Raw(io::stdout()), - Some(t) => Pretty(t), - }; - - let quiet = opts.format == OutputFormat::Terse; - let mut st = ConsoleTestState::new(opts)?; - - let mut ntest = 0; - let mut nbench = 0; - - for test in filter_tests(&opts, tests) { - use crate::TestFn::*; - - let TestDescAndFn { - desc: TestDesc { name, .. }, - testfn, - } = test; - - let fntype = match testfn { - StaticTestFn(..) | DynTestFn(..) => { - ntest += 1; - "test" - } - StaticBenchFn(..) | DynBenchFn(..) => { - nbench += 1; - "benchmark" - } - }; - - writeln!(output, "{}: {}", name, fntype)?; - st.write_log(|| format!("{} {}\n", fntype, name))?; - } - - fn plural(count: u32, s: &str) -> String { - match count { - 1 => format!("{} {}", 1, s), - n => format!("{} {}s", n, s), - } - } - - if !quiet { - if ntest != 0 || nbench != 0 { - writeln!(output, "")?; - } - - writeln!( - output, - "{}, {}", - plural(ntest, "test"), - plural(nbench, "benchmark") - )?; - } - - Ok(()) -} - -// A simple console test runner -pub fn run_tests_console(opts: &TestOpts, tests: Vec) -> io::Result { - fn callback( - event: &TestEvent, - st: &mut ConsoleTestState, - out: &mut dyn OutputFormatter, - ) -> io::Result<()> { - match (*event).clone() { - TeFiltered(ref filtered_tests) => { - st.total = filtered_tests.len(); - out.write_run_start(filtered_tests.len()) - } - TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out), - TeWait(ref test) => out.write_test_start(test), - TeTimeout(ref test) => out.write_timeout(test), - TeResult(test, result, exec_time, stdout) => { - st.write_log_result(&test, &result, exec_time.as_ref())?; - out.write_result(&test, &result, exec_time.as_ref(), &*stdout, &st)?; - match result { - TrOk => { - st.passed += 1; - st.not_failures.push((test, stdout)); - } - TrIgnored => st.ignored += 1, - TrAllowedFail => st.allowed_fail += 1, - TrBench(bs) => { - st.metrics.insert_metric( - test.name.as_slice(), - bs.ns_iter_summ.median, - bs.ns_iter_summ.max - bs.ns_iter_summ.min, - ); - st.measured += 1 - } - TrFailed => { - st.failed += 1; - st.failures.push((test, stdout)); - } - TrFailedMsg(msg) => { - st.failed += 1; - let mut stdout = stdout; - stdout.extend_from_slice(format!("note: {}", msg).as_bytes()); - st.failures.push((test, stdout)); - } - TrTimedFail => { - st.failed += 1; - st.time_failures.push((test, stdout)); - } - } - Ok(()) - } - } - } - - let output = match term::stdout() { - None => Raw(io::stdout()), - Some(t) => Pretty(t), - }; - - let max_name_len = tests - .iter() - .max_by_key(|t| len_if_padded(*t)) - .map(|t| t.desc.name.as_slice().len()) - .unwrap_or(0); - - let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1; - - let mut out: Box = match opts.format { - OutputFormat::Pretty => Box::new(PrettyFormatter::new( - output, - opts.use_color(), - max_name_len, - is_multithreaded, - opts.time_options, - )), - OutputFormat::Terse => Box::new(TerseFormatter::new( - output, - opts.use_color(), - max_name_len, - is_multithreaded, - )), - OutputFormat::Json => Box::new(JsonFormatter::new(output)), - }; - let mut st = ConsoleTestState::new(opts)?; - fn len_if_padded(t: &TestDescAndFn) -> usize { - match t.testfn.padding() { - PadNone => 0, - PadOnRight => t.desc.name.as_slice().len(), - } - } - - run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?; - - assert!(st.current_test_count() == st.total); - - return out.write_run_finish(&st); -} - -#[derive(Clone)] -pub enum TestEvent { - TeFiltered(Vec), - TeWait(TestDesc), - TeResult(TestDesc, TestResult, Option, Vec), - TeTimeout(TestDesc), - TeFilteredOut(usize), -} - pub type MonitorMsg = (TestDesc, TestResult, Option, Vec); struct Sink(Arc>>); From 12397e9dd5a97460d76c884d449ca1c2d26da8ed Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Thu, 17 Oct 2019 17:38:16 +0300 Subject: [PATCH 3/8] Make enum usage explicit and fix tests --- src/libtest/bench.rs | 16 ++- src/libtest/console.rs | 18 ++- src/libtest/event.rs | 25 ++++- src/libtest/formatters/json.rs | 31 ++++-- src/libtest/formatters/mod.rs | 13 ++- src/libtest/formatters/pretty.rs | 27 +++-- src/libtest/formatters/terse.rs | 31 ++++-- src/libtest/helpers/exit_code.rs | 20 ++++ src/libtest/helpers/mod.rs | 2 + src/libtest/helpers/sink.rs | 24 ++++ src/libtest/lib.rs | 184 +++++++++++++++---------------- src/libtest/stats/tests.rs | 2 +- src/libtest/tests.rs | 55 +++++---- src/libtest/time.rs | 2 +- 14 files changed, 289 insertions(+), 161 deletions(-) create mode 100644 src/libtest/helpers/exit_code.rs create mode 100644 src/libtest/helpers/sink.rs diff --git a/src/libtest/bench.rs b/src/libtest/bench.rs index 055a74f691cd4..bb5b0d1da5337 100644 --- a/src/libtest/bench.rs +++ b/src/libtest/bench.rs @@ -1,6 +1,11 @@ //! Benchmarking module. use super::{ - BenchMode, MonitorMsg, Sender, Sink, TestDesc, TestResult + event::CompletedTest, + helpers::sink::Sink, + options::BenchMode, + types::TestDesc, + test_result::TestResult, + Sender, }; use crate::stats; @@ -182,7 +187,7 @@ where } } -pub fn benchmark(desc: TestDesc, monitor_ch: Sender, nocapture: bool, f: F) +pub fn benchmark(desc: TestDesc, monitor_ch: Sender, nocapture: bool, f: F) where F: FnMut(&mut Bencher), { @@ -195,8 +200,8 @@ where let data = Arc::new(Mutex::new(Vec::new())); let oldio = if !nocapture { Some(( - io::set_print(Some(Box::new(Sink(data.clone())))), - io::set_panic(Some(Box::new(Sink(data.clone())))), + io::set_print(Some(Sink::new_boxed(&data))), + io::set_panic(Some(Sink::new_boxed(&data))), )) } else { None @@ -235,7 +240,8 @@ where }; let stdout = data.lock().unwrap().to_vec(); - monitor_ch.send((desc, test_result, None, stdout)).unwrap(); + let message = CompletedTest::new(desc, test_result, None, stdout); + monitor_ch.send(message).unwrap(); } pub fn run_once(f: F) diff --git a/src/libtest/console.rs b/src/libtest/console.rs index 851c0389ff377..2c14e9a1591c7 100644 --- a/src/libtest/console.rs +++ b/src/libtest/console.rs @@ -1,7 +1,7 @@ //! Module providing interface for running tests in the console. use std::fs::File; -use std::io::prelude::*; +use std::io::prelude::Write; use std::io; use term; @@ -192,7 +192,8 @@ pub fn list_tests_console(opts: &TestOpts, tests: Vec) -> io::Res // A simple console test runner pub fn run_tests_console(opts: &TestOpts, tests: Vec) -> io::Result { - fn callback( + // A callback handling events that occure during test execution. + fn on_test_event( event: &TestEvent, st: &mut ConsoleTestState, out: &mut dyn OutputFormatter, @@ -205,9 +206,14 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec) -> io::Resu TestEvent::TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out), TestEvent::TeWait(ref test) => out.write_test_start(test), TestEvent::TeTimeout(ref test) => out.write_timeout(test), - TestEvent::TeResult(test, result, exec_time, stdout) => { - st.write_log_result(&test, &result, exec_time.as_ref())?; - out.write_result(&test, &result, exec_time.as_ref(), &*stdout, &st)?; + TestEvent::TeResult(completed_test) => { + let test = completed_test.desc; + let result = &completed_test.result; + let exec_time = &completed_test.exec_time; + let stdout = completed_test.stdout; + + st.write_log_result(&test, result, exec_time.as_ref())?; + out.write_result(&test, result, exec_time.as_ref(), &*stdout, st)?; match result { TestResult::TrOk => { st.passed += 1; @@ -280,7 +286,7 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec) -> io::Resu } } - run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?; + run_tests(opts, tests, |x| on_test_event(&x, &mut st, &mut *out))?; assert!(st.current_test_count() == st.total); diff --git a/src/libtest/event.rs b/src/libtest/event.rs index b84551826c691..e1b606149c505 100644 --- a/src/libtest/event.rs +++ b/src/libtest/event.rs @@ -5,11 +5,32 @@ use super::types::TestDesc; use super::test_result::TestResult; use super::time::TestExecTime; -#[derive(Clone)] +#[derive(Debug, Clone)] +pub struct CompletedTest { + pub desc: TestDesc, + pub result: TestResult, + pub exec_time: Option, + pub stdout: Vec, +} + +impl CompletedTest { + pub fn new(desc: TestDesc, result: TestResult, exec_time: Option, stdout: Vec) -> Self { + Self { + desc, + result, + exec_time, + stdout, + } + } +} + +unsafe impl Send for CompletedTest {} + +#[derive(Debug, Clone)] pub enum TestEvent { TeFiltered(Vec), TeWait(TestDesc), - TeResult(TestDesc, TestResult, Option, Vec), + TeResult(CompletedTest), TeTimeout(TestDesc), TeFilteredOut(usize), } diff --git a/src/libtest/formatters/json.rs b/src/libtest/formatters/json.rs index 41a293195cc81..fc677036dabf2 100644 --- a/src/libtest/formatters/json.rs +++ b/src/libtest/formatters/json.rs @@ -1,5 +1,16 @@ -use super::*; -use super::console::{ConsoleTestState, OutputLocation}; +use std::{ + io, + io::prelude::Write, + borrow::Cow, +}; + +use crate::{ + types::TestDesc, + time, + test_result::TestResult, + console::{ConsoleTestState, OutputLocation}, +}; +use super::OutputFormatter; pub(crate) struct JsonFormatter { out: OutputLocation, @@ -81,21 +92,21 @@ impl OutputFormatter for JsonFormatter { stdout: &[u8], state: &ConsoleTestState, ) -> io::Result<()> { - let stdout = if (state.options.display_output || *result != TrOk) && stdout.len() > 0 { + let stdout = if (state.options.display_output || *result != TestResult::TrOk) && stdout.len() > 0 { Some(String::from_utf8_lossy(stdout)) } else { None }; match *result { - TrOk => { + TestResult::TrOk => { self.write_event("test", desc.name.as_slice(), "ok", exec_time, stdout, None) } - TrFailed => { + TestResult::TrFailed => { self.write_event("test", desc.name.as_slice(), "failed", exec_time, stdout, None) } - TrTimedFail => self.write_event( + TestResult::TrTimedFail => self.write_event( "test", desc.name.as_slice(), "failed", @@ -104,7 +115,7 @@ impl OutputFormatter for JsonFormatter { Some(r#""reason": "time limit exceeded""#), ), - TrFailedMsg(ref m) => self.write_event( + TestResult::TrFailedMsg(ref m) => self.write_event( "test", desc.name.as_slice(), "failed", @@ -113,11 +124,11 @@ impl OutputFormatter for JsonFormatter { Some(&*format!(r#""message": "{}""#, EscapedString(m))), ), - TrIgnored => { + TestResult::TrIgnored => { self.write_event("test", desc.name.as_slice(), "ignored", exec_time, stdout, None) } - TrAllowedFail => self.write_event( + TestResult::TrAllowedFail => self.write_event( "test", desc.name.as_slice(), "allowed_failure", @@ -126,7 +137,7 @@ impl OutputFormatter for JsonFormatter { None, ), - TrBench(ref bs) => { + TestResult::TrBench(ref bs) => { let median = bs.ns_iter_summ.median as usize; let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize; diff --git a/src/libtest/formatters/mod.rs b/src/libtest/formatters/mod.rs index 4f3ffdafe3fbf..b6649a3effc7c 100644 --- a/src/libtest/formatters/mod.rs +++ b/src/libtest/formatters/mod.rs @@ -1,5 +1,14 @@ -use super::*; -use super::console::ConsoleTestState; +use std::{ + io, + io::prelude::Write, +}; + +use crate::{ + types::{TestDesc, TestName}, + time, + test_result::TestResult, + console::{ConsoleTestState}, +}; mod pretty; mod json; diff --git a/src/libtest/formatters/pretty.rs b/src/libtest/formatters/pretty.rs index 6f2c56bdf45e0..2fdbc63d51330 100644 --- a/src/libtest/formatters/pretty.rs +++ b/src/libtest/formatters/pretty.rs @@ -1,5 +1,16 @@ -use super::*; -use super::console::{ConsoleTestState, OutputLocation}; +use std::{ + io, + io::prelude::Write, +}; + +use crate::{ + types::TestDesc, + time, + test_result::TestResult, + console::{ConsoleTestState, OutputLocation}, + bench::fmt_bench_samples, +}; +use super::OutputFormatter; pub(crate) struct PrettyFormatter { out: OutputLocation, @@ -204,15 +215,15 @@ impl OutputFormatter for PrettyFormatter { } match *result { - TrOk => self.write_ok()?, - TrFailed | TrFailedMsg(_) => self.write_failed()?, - TrIgnored => self.write_ignored()?, - TrAllowedFail => self.write_allowed_fail()?, - TrBench(ref bs) => { + TestResult::TrOk => self.write_ok()?, + TestResult::TrFailed | TestResult::TrFailedMsg(_) => self.write_failed()?, + TestResult::TrIgnored => self.write_ignored()?, + TestResult::TrAllowedFail => self.write_allowed_fail()?, + TestResult::TrBench(ref bs) => { self.write_bench()?; self.write_plain(&format!(": {}", fmt_bench_samples(bs)))?; } - TrTimedFail => self.write_time_failed()?, + TestResult::TrTimedFail => self.write_time_failed()?, } self.write_time(desc, exec_time)?; diff --git a/src/libtest/formatters/terse.rs b/src/libtest/formatters/terse.rs index 96203d5ea4279..90eb62251fb42 100644 --- a/src/libtest/formatters/terse.rs +++ b/src/libtest/formatters/terse.rs @@ -1,5 +1,20 @@ -use super::*; -use super::console::{ConsoleTestState, OutputLocation}; +use std::{ + io, + io::prelude::Write, +}; + +use crate::{ + types::TestDesc, + time, + test_result::TestResult, + types::NamePadding, + console::{ConsoleTestState, OutputLocation}, + bench::fmt_bench_samples, +}; +use super::OutputFormatter; + +// insert a '\n' after 100 tests in quiet mode +const QUIET_MODE_MAX_COLUMN: usize = 100; pub(crate) struct TerseFormatter { out: OutputLocation, @@ -164,7 +179,7 @@ impl OutputFormatter for TerseFormatter { // in order to indicate benchmarks. // When running benchmarks, terse-mode should still print their name as if // it is the Pretty formatter. - if !self.is_multithreaded && desc.name.padding() == PadOnRight { + if !self.is_multithreaded && desc.name.padding() == NamePadding::PadOnRight { self.write_test_name(desc)?; } @@ -180,11 +195,11 @@ impl OutputFormatter for TerseFormatter { _: &ConsoleTestState, ) -> io::Result<()> { match *result { - TrOk => self.write_ok(), - TrFailed | TrFailedMsg(_) | TrTimedFail => self.write_failed(), - TrIgnored => self.write_ignored(), - TrAllowedFail => self.write_allowed_fail(), - TrBench(ref bs) => { + TestResult::TrOk => self.write_ok(), + TestResult::TrFailed | TestResult::TrFailedMsg(_) | TestResult::TrTimedFail => self.write_failed(), + TestResult::TrIgnored => self.write_ignored(), + TestResult::TrAllowedFail => self.write_allowed_fail(), + TestResult::TrBench(ref bs) => { if self.is_multithreaded { self.write_test_name(desc)?; } diff --git a/src/libtest/helpers/exit_code.rs b/src/libtest/helpers/exit_code.rs new file mode 100644 index 0000000000000..831bef3b118ac --- /dev/null +++ b/src/libtest/helpers/exit_code.rs @@ -0,0 +1,20 @@ +//! Helper module to detect subprocess exit code. + +use std::process::ExitStatus; + +#[cfg(not(unix))] +pub fn get_exit_code(status: ExitStatus) -> Result { + status.code().ok_or("received no exit code from child process".into()) +} + +#[cfg(unix)] +pub fn get_exit_code(status: ExitStatus) -> Result { + use std::os::unix::process::ExitStatusExt; + match status.code() { + Some(code) => Ok(code), + None => match status.signal() { + Some(signal) => Err(format!("child process exited with signal {}", signal)), + None => Err("child process exited with unknown signal".into()), + } + } +} diff --git a/src/libtest/helpers/mod.rs b/src/libtest/helpers/mod.rs index 0bbe77b1c50af..6a2ef6086cb92 100644 --- a/src/libtest/helpers/mod.rs +++ b/src/libtest/helpers/mod.rs @@ -4,3 +4,5 @@ pub mod concurrency; pub mod isatty; pub mod metrics; +pub mod sink; +pub mod exit_code; diff --git a/src/libtest/helpers/sink.rs b/src/libtest/helpers/sink.rs new file mode 100644 index 0000000000000..aa7fe2487730e --- /dev/null +++ b/src/libtest/helpers/sink.rs @@ -0,0 +1,24 @@ +//! Module providing a helper structure to capture output in subprocesses. + +use std::{ + io, + io::prelude::Write, + sync::{Arc, Mutex}, +}; + +pub struct Sink(Arc>>); + +impl Sink { + pub fn new_boxed(data: &Arc>>) -> Box { + Box::new(Self(data.clone())) + } +} + +impl Write for Sink { + fn write(&mut self, data: &[u8]) -> io::Result { + Write::write(&mut *self.0.lock().unwrap(), data) + } + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} diff --git a/src/libtest/lib.rs b/src/libtest/lib.rs index 6221140f60698..31da97b736a29 100644 --- a/src/libtest/lib.rs +++ b/src/libtest/lib.rs @@ -30,30 +30,13 @@ #![feature(termination_trait_lib)] #![feature(test)] +// Public reexports pub use self::ColorConfig::*; -use self::event::TestEvent::*; +pub use self::types::*; pub use self::types::TestName::*; +pub use self::options::{Options, ShouldPanic}; -use std::borrow::Cow; -use std::env; -use std::io; -use std::io::prelude::*; -use std::panic::{self, catch_unwind, AssertUnwindSafe, PanicInfo}; -use std::process; -use std::process::{ExitStatus, Command, Termination}; -use std::sync::mpsc::{channel, Sender}; -use std::sync::{Arc, Mutex}; -use std::thread; -use std::time::{Duration, Instant}; - -#[cfg(test)] -mod tests; - -const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode - -const SECONDARY_TEST_INVOKER_VAR: &'static str = "__RUST_TEST_INVOKE"; - -// to be used by rustc to compile tests in libtest +// Module to be used by rustc to compile tests in libtest pub mod test { pub use crate::{ bench::Bencher, @@ -61,7 +44,7 @@ pub mod test { helpers::metrics::{Metric, MetricMap}, options::{ShouldPanic, Options, RunIgnored, RunStrategy}, test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk}, - time::TestTimeOptions, + time::{TestTimeOptions, TestExecTime}, types::{ DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName, TestType, @@ -70,18 +53,21 @@ pub mod test { }; } -use bench::*; -use test_result::*; -use types::*; -use options::*; -use cli::*; -use event::*; - -use helpers::concurrency::get_concurrency; +use std::{ + env, + io, + io::prelude::Write, + panic::{self, catch_unwind, AssertUnwindSafe, PanicInfo}, + process, + process::{Command, Termination}, + sync::mpsc::{channel, Sender}, + sync::{Arc, Mutex}, + thread, + time::{Duration, Instant}, +}; -mod formatters; pub mod stats; - +mod formatters; mod cli; mod console; mod event; @@ -92,14 +78,31 @@ mod options; mod bench; mod test_result; +#[cfg(test)] +mod tests; + +use test_result::*; +use time::TestExecTime; +use options::{RunStrategy, Concurrent, RunIgnored, ColorConfig}; +use event::{CompletedTest, TestEvent}; +use cli::TestOpts; +use helpers::sink::Sink; +use helpers::concurrency::get_concurrency; +use helpers::exit_code::get_exit_code; + +// Process exit code to be used to indicate test failures. +const ERROR_EXIT_CODE: i32 = 101; + +const SECONDARY_TEST_INVOKER_VAR: &'static str = "__RUST_TEST_INVOKE"; + // The default console test runner. It accepts the command line // arguments and a vector of test_descs. pub fn test_main(args: &[String], tests: Vec, options: Option) { - let mut opts = match parse_opts(args) { + let mut opts = match cli::parse_opts(args) { Some(Ok(o)) => o, Some(Err(msg)) => { eprintln!("error: {}", msg); - process::exit(101); + process::exit(ERROR_EXIT_CODE); } None => return, }; @@ -109,15 +112,15 @@ pub fn test_main(args: &[String], tests: Vec, options: Option {} - Ok(false) => process::exit(101), + Ok(false) => process::exit(ERROR_EXIT_CODE), Err(e) => { eprintln!("error: io error when listing tests: {:?}", e); - process::exit(101); + process::exit(ERROR_EXIT_CODE); } } } @@ -196,19 +199,7 @@ pub fn assert_test_result(result: T) { ); } -pub type MonitorMsg = (TestDesc, TestResult, Option, Vec); - -struct Sink(Arc>>); -impl Write for Sink { - fn write(&mut self, data: &[u8]) -> io::Result { - Write::write(&mut *self.0.lock().unwrap(), data) - } - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } -} - -pub fn run_tests(opts: &TestOpts, tests: Vec, mut callback: F) -> io::Result<()> +pub fn run_tests(opts: &TestOpts, tests: Vec, mut notify_about_test_event: F) -> io::Result<()> where F: FnMut(TestEvent) -> io::Result<()>, { @@ -236,11 +227,13 @@ where }; let filtered_out = tests_len - filtered_tests.len(); - callback(TeFilteredOut(filtered_out))?; + let event = TestEvent::TeFilteredOut(filtered_out); + notify_about_test_event(event)?; let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect(); - callback(TeFiltered(filtered_descs))?; + let event = TestEvent::TeFiltered(filtered_descs); + notify_about_test_event(event)?; let (filtered_tests, filtered_benchs): (Vec<_>, _) = filtered_tests.into_iter().partition(|e| match e.testfn { @@ -254,7 +247,7 @@ where remaining.reverse(); let mut pending = 0; - let (tx, rx) = channel::(); + let (tx, rx) = channel::(); let run_strategy = if opts.options.panic_abort { RunStrategy::SpawnPrimary } else { @@ -295,10 +288,13 @@ where if concurrency == 1 { while !remaining.is_empty() { let test = remaining.pop().unwrap(); - callback(TeWait(test.desc.clone()))?; + let event = TestEvent::TeWait(test.desc.clone()); + notify_about_test_event(event)?; run_test(opts, !opts.run_tests, test, run_strategy, tx.clone(), Concurrent::No); - let (test, result, exec_time, stdout) = rx.recv().unwrap(); - callback(TeResult(test, result, exec_time, stdout))?; + let completed_test = rx.recv().unwrap(); + + let event = TestEvent::TeResult(completed_test); + notify_about_test_event(event)?; } } else { while pending > 0 || !remaining.is_empty() { @@ -306,7 +302,9 @@ where let test = remaining.pop().unwrap(); let timeout = time::get_default_test_timeout(); running_tests.insert(test.desc.clone(), timeout); - callback(TeWait(test.desc.clone()))?; //here no pad + + let event = TestEvent::TeWait(test.desc.clone()); + notify_about_test_event(event)?; //here no pad run_test(opts, !opts.run_tests, test, run_strategy, tx.clone(), Concurrent::Yes); pending += 1; } @@ -316,10 +314,18 @@ where if let Some(timeout) = calc_timeout(&running_tests) { res = rx.recv_timeout(timeout); for test in get_timed_out_tests(&mut running_tests) { - callback(TeTimeout(test))?; + let event = TestEvent::TeTimeout(test); + notify_about_test_event(event)?; } - if res != Err(RecvTimeoutError::Timeout) { - break; + + match res { + Err(RecvTimeoutError::Timeout) => { + // Result is not yet ready, continue waiting. + } + _ => { + // We've got a result, stop the loop. + break; + } } } else { res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected); @@ -327,10 +333,11 @@ where } } - let (desc, result, exec_time, stdout) = res.unwrap(); - running_tests.remove(&desc); + let completed_test = res.unwrap(); + running_tests.remove(&completed_test.desc); - callback(TeResult(desc, result, exec_time, stdout))?; + let event = TestEvent::TeResult(completed_test); + notify_about_test_event(event)?; pending -= 1; } } @@ -338,10 +345,13 @@ where if opts.bench_benchmarks { // All benchmarks run at the end, in serial. for b in filtered_benchs { - callback(TeWait(b.desc.clone()))?; + let event = TestEvent::TeWait(b.desc.clone()); + notify_about_test_event(event)?; run_test(opts, false, b, run_strategy, tx.clone(), Concurrent::No); - let (test, result, exec_time, stdout) = rx.recv().unwrap(); - callback(TeResult(test, result, exec_time, stdout))?; + let completed_test = rx.recv().unwrap(); + + let event = TestEvent::TeResult(completed_test); + notify_about_test_event(event)?; } } Ok(()) @@ -420,7 +430,7 @@ pub fn run_test( force_ignore: bool, test: TestDescAndFn, strategy: RunStrategy, - monitor_ch: Sender, + monitor_ch: Sender, concurrency: Concurrent, ) { let TestDescAndFn { desc, testfn } = test; @@ -430,7 +440,8 @@ pub fn run_test( && (cfg!(target_arch = "wasm32") || cfg!(target_os = "emscripten")); if force_ignore || desc.ignore || ignore_because_no_process_support { - monitor_ch.send((desc, TrIgnored, None, Vec::new())).unwrap(); + let message = CompletedTest::new(desc, TrIgnored, None, Vec::new()); + monitor_ch.send(message).unwrap(); return; } @@ -443,7 +454,7 @@ pub fn run_test( fn run_test_inner( desc: TestDesc, - monitor_ch: Sender, + monitor_ch: Sender, testfn: Box, opts: TestRunOpts, ) { @@ -530,7 +541,7 @@ fn run_test_in_process( nocapture: bool, report_time: bool, testfn: Box, - monitor_ch: Sender, + monitor_ch: Sender, time_opts: Option, ) { // Buffer for capturing standard I/O @@ -538,8 +549,8 @@ fn run_test_in_process( let oldio = if !nocapture { Some(( - io::set_print(Some(Box::new(Sink(data.clone())))), - io::set_panic(Some(Box::new(Sink(data.clone())))), + io::set_print(Some(Sink::new_boxed(&data))), + io::set_panic(Some(Sink::new_boxed(&data))), )) } else { None @@ -553,7 +564,7 @@ fn run_test_in_process( let result = catch_unwind(AssertUnwindSafe(testfn)); let exec_time = start.map(|start| { let duration = start.elapsed(); - time::TestExecTime(duration) + TestExecTime(duration) }); if let Some((printio, panicio)) = oldio { @@ -566,13 +577,14 @@ fn run_test_in_process( Err(e) => calc_result(&desc, Err(e.as_ref()), &time_opts, &exec_time), }; let stdout = data.lock().unwrap().to_vec(); - monitor_ch.send((desc.clone(), test_result, exec_time, stdout)).unwrap(); + let message = CompletedTest::new(desc.clone(), test_result, exec_time, stdout); + monitor_ch.send(message).unwrap(); } fn spawn_test_subprocess( desc: TestDesc, report_time: bool, - monitor_ch: Sender, + monitor_ch: Sender, time_opts: Option, ) { let (result, test_output, exec_time) = (|| { @@ -595,7 +607,7 @@ fn spawn_test_subprocess( }; let exec_time = start.map(|start| { let duration = start.elapsed(); - time::TestExecTime(duration) + TestExecTime(duration) }); let std::process::Output { stdout, stderr, status } = output; @@ -617,7 +629,8 @@ fn spawn_test_subprocess( (result, test_output, exec_time) })(); - monitor_ch.send((desc.clone(), result, exec_time, test_output)).unwrap(); + let message = CompletedTest::new(desc.clone(), result, exec_time, test_output); + monitor_ch.send(message).unwrap(); } fn run_test_in_spawned_subprocess( @@ -653,20 +666,3 @@ fn run_test_in_spawned_subprocess( record_result(None); unreachable!("panic=abort callback should have exited the process") } - -#[cfg(not(unix))] -fn get_exit_code(status: ExitStatus) -> Result { - status.code().ok_or("received no exit code from child process".into()) -} - -#[cfg(unix)] -fn get_exit_code(status: ExitStatus) -> Result { - use std::os::unix::process::ExitStatusExt; - match status.code() { - Some(code) => Ok(code), - None => match status.signal() { - Some(signal) => Err(format!("child process exited with signal {}", signal)), - None => Err("child process exited with unknown signal".into()), - } - } -} diff --git a/src/libtest/stats/tests.rs b/src/libtest/stats/tests.rs index 7d1d635186fba..eaf41bc9e2255 100644 --- a/src/libtest/stats/tests.rs +++ b/src/libtest/stats/tests.rs @@ -4,7 +4,7 @@ extern crate test; use std::f64; use std::io::prelude::*; use std::io; -use self::test::Bencher; +use self::test::test::Bencher; // Test vectors generated from R, using the script src/etc/stat-test-vectors.r. diff --git a/src/libtest/tests.rs b/src/libtest/tests.rs index 5f7150a8eeba2..f6470b40a391d 100644 --- a/src/libtest/tests.rs +++ b/src/libtest/tests.rs @@ -1,11 +1,18 @@ use super::*; -use crate::test::{ - filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, RunIgnored, RunStrategy, - // ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TestTimeOptions, - // TestType, TrFailedMsg, TrIgnored, TrOk, - ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, - TrIgnored, TrOk, +use crate::{ + bench::Bencher, + console::OutputLocation, + options::OutputFormat, + time::{TimeThreshold, TestTimeOptions}, + formatters::PrettyFormatter, + test::{ + filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, RunIgnored, RunStrategy, + // ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TestTimeOptions, + // TestType, TrFailedMsg, TrIgnored, TrOk, + ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, + TrIgnored, TrOk, + }, }; use std::sync::mpsc::channel; use std::time::Duration; @@ -74,8 +81,8 @@ pub fn do_not_run_ignored_tests() { }; let (tx, rx) = channel(); run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); - let (_, res, _, _) = rx.recv().unwrap(); - assert!(res != TrOk); + let result = rx.recv().unwrap().result; + assert!(result != TrOk); } #[test] @@ -93,8 +100,8 @@ pub fn ignored_tests_result_in_ignored() { }; let (tx, rx) = channel(); run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); - let (_, res, _, _) = rx.recv().unwrap(); - assert!(res == TrIgnored); + let result = rx.recv().unwrap().result; + assert!(result == TrIgnored); } // FIXME: Re-enable emscripten once it can catch panics again @@ -116,8 +123,8 @@ fn test_should_panic() { }; let (tx, rx) = channel(); run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); - let (_, res, _, _) = rx.recv().unwrap(); - assert!(res == TrOk); + let result = rx.recv().unwrap().result; + assert!(result == TrOk); } // FIXME: Re-enable emscripten once it can catch panics again @@ -139,8 +146,8 @@ fn test_should_panic_good_message() { }; let (tx, rx) = channel(); run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); - let (_, res, _, _) = rx.recv().unwrap(); - assert!(res == TrOk); + let result = rx.recv().unwrap().result; + assert!(result == TrOk); } // FIXME: Re-enable emscripten once it can catch panics again @@ -165,8 +172,8 @@ fn test_should_panic_bad_message() { }; let (tx, rx) = channel(); run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); - let (_, res, _, _) = rx.recv().unwrap(); - assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected))); + let result = rx.recv().unwrap().result; + assert!(result == TrFailedMsg(format!("{} '{}'", failed_msg, expected))); } // FIXME: Re-enable emscripten once it can catch panics again @@ -186,8 +193,8 @@ fn test_should_panic_but_succeeds() { }; let (tx, rx) = channel(); run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); - let (_, res, _, _) = rx.recv().unwrap(); - assert!(res == TrFailedMsg("test did not panic as expected".to_string())); + let result = rx.recv().unwrap().result; + assert!(result == TrFailedMsg("test did not panic as expected".to_string())); } fn report_time_test_template(report_time: bool) -> Option { @@ -214,7 +221,7 @@ fn report_time_test_template(report_time: bool) -> Option { }; let (tx, rx) = channel(); run_test(&test_opts, false, desc, RunStrategy::InProcess, tx, Concurrent::No); - let (_, _, exec_time, _) = rx.recv().unwrap(); + let exec_time = rx.recv().unwrap().exec_time; exec_time } @@ -252,7 +259,7 @@ fn time_test_failure_template(test_type: TestType) -> TestResult { }; let (tx, rx) = channel(); run_test(&test_opts, false, desc, RunStrategy::InProcess, tx, Concurrent::No); - let (_, result, _, _) = rx.recv().unwrap(); + let result = rx.recv().unwrap().result; result } @@ -658,9 +665,9 @@ fn should_sort_failures_before_printing_them() { test_type: TestType::Unknown, }; - let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false, None); + let mut out = PrettyFormatter::new(OutputLocation::Raw(Vec::new()), false, 10, false, None); - let st = ConsoleTestState { + let st = console::ConsoleTestState { log_out: None, total: 0, passed: 0, @@ -678,8 +685,8 @@ fn should_sort_failures_before_printing_them() { out.write_failures(&st).unwrap(); let s = match out.output_location() { - &Raw(ref m) => String::from_utf8_lossy(&m[..]), - &Pretty(_) => unreachable!(), + &OutputLocation::Raw(ref m) => String::from_utf8_lossy(&m[..]), + &OutputLocation::Pretty(_) => unreachable!(), }; let apos = s.find("a").unwrap(); diff --git a/src/libtest/time.rs b/src/libtest/time.rs index b7ce764505bfe..83a545470efaa 100644 --- a/src/libtest/time.rs +++ b/src/libtest/time.rs @@ -61,7 +61,7 @@ pub fn get_default_test_timeout() -> Instant { } /// The meassured execution time of a unit test. -#[derive(Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq)] pub struct TestExecTime(pub Duration); impl fmt::Display for TestExecTime { From ddc6a5fd0e3e56f2ccbc576fdeed149579f141de Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Thu, 17 Oct 2019 19:10:17 +0300 Subject: [PATCH 4/8] Split options parsing into several functions --- src/libtest/cli.rs | 302 +++++++++++++++++++++++++++------------------ 1 file changed, 179 insertions(+), 123 deletions(-) diff --git a/src/libtest/cli.rs b/src/libtest/cli.rs index b35193701d6ef..0c47bc8ae94a7 100644 --- a/src/libtest/cli.rs +++ b/src/libtest/cli.rs @@ -40,7 +40,7 @@ impl TestOpts { /// Result of parsing the options. pub type OptRes = Result; /// Result of parsing the option part. -type OptPartRes = Result, String>; +type OptPartRes = Result; fn optgroups() -> getopts::Options { let mut opts = getopts::Options::new(); @@ -186,14 +186,30 @@ Test Attributes: ); } -// FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566 -fn is_nightly() -> bool { - // Whether this is a feature-staged build, i.e., on the beta or stable channel - let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some(); - // Whether we should enable unstable features for bootstrapping - let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok(); +/// Parses command line arguments into test options. +/// Returns `None` if help was requested (since we only show help message and don't run tests), +/// returns `Some(Err(..))` if provided arguments are incorrect, +/// otherwise creates a `TestOpts` object and returns it. +pub fn parse_opts(args: &[String]) -> Option { + // Parse matches. + let opts = optgroups(); + let args = args.get(1..).unwrap_or(args); + let matches = match opts.parse(args) { + Ok(m) => m, + Err(f) => return Some(Err(f.to_string())), + }; - bootstrap || !disable_unstable_features + // Check if help was requested. + if matches.opt_present("h") { + // Show help and do nothing more. + usage(&args[0], &opts); + return None; + } + + // Actually parse the opts. + let opts_result = parse_opts_impl(matches); + + Some(opts_result) } // Gets the option value and checks if unstable features are enabled. @@ -201,21 +217,80 @@ macro_rules! unstable_optflag { ($matches:ident, $allow_unstable:ident, $option_name:literal) => {{ let opt = $matches.opt_present($option_name); if !$allow_unstable && opt { - return Some(Err(format!( + return Err(format!( "The \"{}\" flag is only accepted on the nightly compiler", $option_name - ))); + )); } opt }}; } +// Implementation of `parse_opts` that doesn't care about help message +// and returns a `Result`. +fn parse_opts_impl(matches: getopts::Matches) -> OptRes { + let allow_unstable = get_allow_unstable(&matches)?; + + // Unstable flags + let exclude_should_panic = unstable_optflag!(matches, allow_unstable, "exclude-should-panic"); + let include_ignored = unstable_optflag!(matches, allow_unstable, "include-ignored"); + let time_options = get_time_options(&matches, allow_unstable)?; + + let quiet = matches.opt_present("quiet"); + let exact = matches.opt_present("exact"); + let list = matches.opt_present("list"); + let skip = matches.opt_strs("skip"); + + let bench_benchmarks = matches.opt_present("bench"); + let run_tests = !bench_benchmarks || matches.opt_present("test"); + + let logfile = get_log_file(&matches)?; + let run_ignored = get_run_ignored(&matches, include_ignored)?; + let filter = get_filter(&matches)?; + let nocapture = get_nocapture(&matches)?; + let test_threads = get_test_threads(&matches)?; + let color = get_color_config(&matches)?; + let format = get_format(&matches, quiet, allow_unstable)?; + + let options = Options::new().display_output(matches.opt_present("show-output")); + + let test_opts = TestOpts { + list, + filter, + filter_exact: exact, + exclude_should_panic, + run_ignored, + run_tests, + bench_benchmarks, + logfile, + nocapture, + color, + format, + test_threads, + skip, + time_options, + options, + }; + + Ok(test_opts) +} + +// FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566 +fn is_nightly() -> bool { + // Whether this is a feature-staged build, i.e., on the beta or stable channel + let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some(); + // Whether we should enable unstable features for bootstrapping + let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok(); + + bootstrap || !disable_unstable_features +} + // Gets the CLI options assotiated with `report-time` feature. fn get_time_options( matches: &getopts::Matches, allow_unstable: bool) --> Option> { +-> OptPartRes> { let report_time = unstable_optflag!(matches, allow_unstable, "report-time"); let colored_opt_str = matches.opt_str("report-time"); let mut report_time_colored = report_time && colored_opt_str == Some("colored".into()); @@ -232,71 +307,73 @@ fn get_time_options( None }; - Some(Ok(options)) + Ok(options) } -// Parses command line arguments into test options -pub fn parse_opts(args: &[String]) -> Option { - let mut allow_unstable = false; - let opts = optgroups(); - let args = args.get(1..).unwrap_or(args); - let matches = match opts.parse(args) { - Ok(m) => m, - Err(f) => return Some(Err(f.to_string())), +fn get_test_threads(matches: &getopts::Matches) -> OptPartRes> { + let test_threads = match matches.opt_str("test-threads") { + Some(n_str) => match n_str.parse::() { + Ok(0) => return Err("argument for --test-threads must not be 0".to_string()), + Ok(n) => Some(n), + Err(e) => { + return Err(format!( + "argument for --test-threads must be a number > 0 \ + (error: {})", + e + )); + } + }, + None => None, }; - if let Some(opt) = matches.opt_str("Z") { - if !is_nightly() { - return Some(Err( - "the option `Z` is only accepted on the nightly compiler".into(), - )); - } + Ok(test_threads) +} - match &*opt { - "unstable-options" => { - allow_unstable = true; - } - _ => { - return Some(Err("Unrecognized option to `Z`".into())); +fn get_format(matches: &getopts::Matches, quiet: bool, allow_unstable: bool) -> OptPartRes { + let format = match matches.opt_str("format").as_ref().map(|s| &**s) { + None if quiet => OutputFormat::Terse, + Some("pretty") | None => OutputFormat::Pretty, + Some("terse") => OutputFormat::Terse, + Some("json") => { + if !allow_unstable { + return Err( + "The \"json\" format is only accepted on the nightly compiler".into(), + ); } + OutputFormat::Json } - }; - if matches.opt_present("h") { - usage(&args[0], &opts); - return None; - } - - let filter = if !matches.free.is_empty() { - Some(matches.free[0].clone()) - } else { - None + Some(v) => { + return Err(format!( + "argument for --format must be pretty, terse, or json (was \ + {})", + v + )); + } }; - let exclude_should_panic = unstable_optflag!(matches, allow_unstable, "exclude-should-panic"); + Ok(format) +} - let include_ignored = unstable_optflag!(matches, allow_unstable, "include-ignored"); +fn get_color_config(matches: &getopts::Matches) -> OptPartRes { + let color = match matches.opt_str("color").as_ref().map(|s| &**s) { + Some("auto") | None => ColorConfig::AutoColor, + Some("always") => ColorConfig::AlwaysColor, + Some("never") => ColorConfig::NeverColor, - let run_ignored = match (include_ignored, matches.opt_present("ignored")) { - (true, true) => { - return Some(Err( - "the options --include-ignored and --ignored are mutually exclusive".into(), + Some(v) => { + return Err(format!( + "argument for --color must be auto, always, or never (was \ + {})", + v )); } - (true, false) => RunIgnored::Yes, - (false, true) => RunIgnored::Only, - (false, false) => RunIgnored::No, }; - let quiet = matches.opt_present("quiet"); - let exact = matches.opt_present("exact"); - let list = matches.opt_present("list"); - let logfile = matches.opt_str("logfile"); - let logfile = logfile.map(|s| PathBuf::from(&s)); - - let bench_benchmarks = matches.opt_present("bench"); - let run_tests = !bench_benchmarks || matches.opt_present("test"); + Ok(color) +} +fn get_nocapture(matches: &getopts::Matches) -> OptPartRes { let mut nocapture = matches.opt_present("nocapture"); if !nocapture { nocapture = match env::var("RUST_TEST_NOCAPTURE") { @@ -305,80 +382,59 @@ pub fn parse_opts(args: &[String]) -> Option { }; } - let time_options = match get_time_options(&matches, allow_unstable) { - Some(Ok(val)) => val, - Some(Err(e)) => return Some(Err(e)), - None => panic!("Unexpected output from `get_time_options`"), - }; + Ok(nocapture) +} - let test_threads = match matches.opt_str("test-threads") { - Some(n_str) => match n_str.parse::() { - Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())), - Ok(n) => Some(n), - Err(e) => { - return Some(Err(format!( - "argument for --test-threads must be a number > 0 \ - (error: {})", - e - ))); - } - }, - None => None, +fn get_run_ignored(matches: &getopts::Matches, include_ignored: bool) -> OptPartRes { + let run_ignored = match (include_ignored, matches.opt_present("ignored")) { + (true, true) => { + return Err( + "the options --include-ignored and --ignored are mutually exclusive".into(), + ); + } + (true, false) => RunIgnored::Yes, + (false, true) => RunIgnored::Only, + (false, false) => RunIgnored::No, }; - let color = match matches.opt_str("color").as_ref().map(|s| &**s) { - Some("auto") | None => ColorConfig::AutoColor, - Some("always") => ColorConfig::AlwaysColor, - Some("never") => ColorConfig::NeverColor, + Ok(run_ignored) +} - Some(v) => { - return Some(Err(format!( - "argument for --color must be auto, always, or never (was \ - {})", - v - ))); - } +fn get_filter(matches: &getopts::Matches) -> OptPartRes> { + let filter = if !matches.free.is_empty() { + Some(matches.free[0].clone()) + } else { + None }; - let format = match matches.opt_str("format").as_ref().map(|s| &**s) { - None if quiet => OutputFormat::Terse, - Some("pretty") | None => OutputFormat::Pretty, - Some("terse") => OutputFormat::Terse, - Some("json") => { - if !allow_unstable { - return Some(Err( - "The \"json\" format is only accepted on the nightly compiler".into(), - )); - } - OutputFormat::Json + Ok(filter) +} + +fn get_allow_unstable(matches: &getopts::Matches) -> OptPartRes { + let mut allow_unstable = false; + + if let Some(opt) = matches.opt_str("Z") { + if !is_nightly() { + return Err( + "the option `Z` is only accepted on the nightly compiler".into(), + ); } - Some(v) => { - return Some(Err(format!( - "argument for --format must be pretty, terse, or json (was \ - {})", - v - ))); + match &*opt { + "unstable-options" => { + allow_unstable = true; + } + _ => { + return Err("Unrecognized option to `Z`".into()); + } } }; - let test_opts = TestOpts { - list, - filter, - filter_exact: exact, - exclude_should_panic, - run_ignored, - run_tests, - bench_benchmarks, - logfile, - nocapture, - color, - format, - test_threads, - skip: matches.opt_strs("skip"), - time_options, - options: Options::new().display_output(matches.opt_present("show-output")), - }; + Ok(allow_unstable) +} + +fn get_log_file(matches: &getopts::Matches) -> OptPartRes> { + let logfile = matches.opt_str("logfile").map(|s| PathBuf::from(&s)); - Some(Ok(test_opts)) + Ok(logfile) } From 85628e80637cf21caa9d6fef31d9fed53e7156aa Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Thu, 17 Oct 2019 19:10:35 +0300 Subject: [PATCH 5/8] Add more explaining comments to the code --- src/libtest/console.rs | 148 ++++++++++++++++++++----------------- src/libtest/options.rs | 10 +++ src/libtest/test_result.rs | 4 +- 3 files changed, 94 insertions(+), 68 deletions(-) diff --git a/src/libtest/console.rs b/src/libtest/console.rs index 2c14e9a1591c7..e17030726ceaa 100644 --- a/src/libtest/console.rs +++ b/src/libtest/console.rs @@ -7,21 +7,23 @@ use std::io; use term; use super::{ + bench::fmt_bench_samples, + cli::TestOpts, + event::{TestEvent, CompletedTest}, + formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter}, helpers::{ concurrency::get_concurrency, metrics::MetricMap, }, types::{TestDesc, TestDescAndFn, NamePadding}, options::{Options, OutputFormat}, - bench::fmt_bench_samples, test_result::TestResult, time::TestExecTime, - cli::TestOpts, - event::TestEvent, run_tests, filter_tests, }; +/// Generic wrapper over stdout. pub enum OutputLocation { Pretty(Box), Raw(T), @@ -43,8 +45,6 @@ impl Write for OutputLocation { } } -use crate::formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter}; - pub struct ConsoleTestState { pub log_out: Option, pub total: usize, @@ -190,65 +190,77 @@ pub fn list_tests_console(opts: &TestOpts, tests: Vec) -> io::Res Ok(()) } -// A simple console test runner -pub fn run_tests_console(opts: &TestOpts, tests: Vec) -> io::Result { - // A callback handling events that occure during test execution. - fn on_test_event( - event: &TestEvent, - st: &mut ConsoleTestState, - out: &mut dyn OutputFormatter, - ) -> io::Result<()> { - match (*event).clone() { - TestEvent::TeFiltered(ref filtered_tests) => { - st.total = filtered_tests.len(); - out.write_run_start(filtered_tests.len()) - } - TestEvent::TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out), - TestEvent::TeWait(ref test) => out.write_test_start(test), - TestEvent::TeTimeout(ref test) => out.write_timeout(test), - TestEvent::TeResult(completed_test) => { - let test = completed_test.desc; - let result = &completed_test.result; - let exec_time = &completed_test.exec_time; - let stdout = completed_test.stdout; - - st.write_log_result(&test, result, exec_time.as_ref())?; - out.write_result(&test, result, exec_time.as_ref(), &*stdout, st)?; - match result { - TestResult::TrOk => { - st.passed += 1; - st.not_failures.push((test, stdout)); - } - TestResult::TrIgnored => st.ignored += 1, - TestResult::TrAllowedFail => st.allowed_fail += 1, - TestResult::TrBench(bs) => { - st.metrics.insert_metric( - test.name.as_slice(), - bs.ns_iter_summ.median, - bs.ns_iter_summ.max - bs.ns_iter_summ.min, - ); - st.measured += 1 - } - TestResult::TrFailed => { - st.failed += 1; - st.failures.push((test, stdout)); - } - TestResult::TrFailedMsg(msg) => { - st.failed += 1; - let mut stdout = stdout; - stdout.extend_from_slice(format!("note: {}", msg).as_bytes()); - st.failures.push((test, stdout)); - } - TestResult::TrTimedFail => { - st.failed += 1; - st.time_failures.push((test, stdout)); - } - } - Ok(()) - } +// Updates `ConsoleTestState` depending on result of the test execution. +fn handle_test_result(st: &mut ConsoleTestState, completed_test: CompletedTest) { + let test = completed_test.desc; + let stdout = completed_test.stdout; + match completed_test.result { + TestResult::TrOk => { + st.passed += 1; + st.not_failures.push((test, stdout)); + } + TestResult::TrIgnored => st.ignored += 1, + TestResult::TrAllowedFail => st.allowed_fail += 1, + TestResult::TrBench(bs) => { + st.metrics.insert_metric( + test.name.as_slice(), + bs.ns_iter_summ.median, + bs.ns_iter_summ.max - bs.ns_iter_summ.min, + ); + st.measured += 1 + } + TestResult::TrFailed => { + st.failed += 1; + st.failures.push((test, stdout)); + } + TestResult::TrFailedMsg(msg) => { + st.failed += 1; + let mut stdout = stdout; + stdout.extend_from_slice(format!("note: {}", msg).as_bytes()); + st.failures.push((test, stdout)); + } + TestResult::TrTimedFail => { + st.failed += 1; + st.time_failures.push((test, stdout)); + } + } +} + +// Handler for events that occur during test execution. +// It is provided as a callback to the `run_tests` function. +fn on_test_event( + event: &TestEvent, + st: &mut ConsoleTestState, + out: &mut dyn OutputFormatter, +) -> io::Result<()> { + match (*event).clone() { + TestEvent::TeFiltered(ref filtered_tests) => { + st.total = filtered_tests.len(); + out.write_run_start(filtered_tests.len())?; + } + TestEvent::TeFilteredOut(filtered_out) => { + st.filtered_out = filtered_out; + } + TestEvent::TeWait(ref test) => out.write_test_start(test)?, + TestEvent::TeTimeout(ref test) => out.write_timeout(test)?, + TestEvent::TeResult(completed_test) => { + let test = &completed_test.desc; + let result = &completed_test.result; + let exec_time = &completed_test.exec_time; + let stdout = &completed_test.stdout; + + st.write_log_result(test, result, exec_time.as_ref())?; + out.write_result(test, result, exec_time.as_ref(), &*stdout, st)?; + handle_test_result(st, completed_test); } } + Ok(()) +} + +/// A simple console test runner. +/// Runs provided tests reporting process and results to the stdout. +pub fn run_tests_console(opts: &TestOpts, tests: Vec) -> io::Result { let output = match term::stdout() { None => OutputLocation::Raw(io::stdout()), Some(t) => OutputLocation::Pretty(t), @@ -279,12 +291,6 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec) -> io::Resu OutputFormat::Json => Box::new(JsonFormatter::new(output)), }; let mut st = ConsoleTestState::new(opts)?; - fn len_if_padded(t: &TestDescAndFn) -> usize { - match t.testfn.padding() { - NamePadding::PadNone => 0, - NamePadding::PadOnRight => t.desc.name.as_slice().len(), - } - } run_tests(opts, tests, |x| on_test_event(&x, &mut st, &mut *out))?; @@ -292,3 +298,11 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec) -> io::Resu return out.write_run_finish(&st); } + +// Calculates padding for given test description. +fn len_if_padded(t: &TestDescAndFn) -> usize { + match t.testfn.padding() { + NamePadding::PadNone => 0, + NamePadding::PadOnRight => t.desc.name.as_slice().len(), + } +} diff --git a/src/libtest/options.rs b/src/libtest/options.rs index 0a604cae0ca33..ec87b0fcd463b 100644 --- a/src/libtest/options.rs +++ b/src/libtest/options.rs @@ -7,12 +7,14 @@ pub enum Concurrent { No, } +/// Number of times to run a benchmarked function #[derive(Clone, PartialEq, Eq)] pub enum BenchMode { Auto, Single, } +/// Whether test is expected to panic or not #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub enum ShouldPanic { No, @@ -20,6 +22,7 @@ pub enum ShouldPanic { YesWithMessage(&'static str), } +/// Whether should console output be colored or not #[derive(Copy, Clone, Debug)] pub enum ColorConfig { AutoColor, @@ -27,17 +30,23 @@ pub enum ColorConfig { NeverColor, } +/// Format of the test results output #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum OutputFormat { + /// Verbose output Pretty, + /// Quiet output Terse, + /// JSON output Json, } +/// Whether ignored test should be runned or not #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum RunIgnored { Yes, No, + /// Run only ignored tests Only, } @@ -53,6 +62,7 @@ pub enum RunStrategy { SpawnPrimary, } +/// Options for the test run defined by the caller (instead of CLI arguments). /// In case we want to add other options as well, just add them in this struct. #[derive(Copy, Clone, Debug)] pub struct Options { diff --git a/src/libtest/test_result.rs b/src/libtest/test_result.rs index 4eb3f93e2a42b..dd4dfd9997f09 100644 --- a/src/libtest/test_result.rs +++ b/src/libtest/test_result.rs @@ -27,7 +27,8 @@ pub enum TestResult { unsafe impl Send for TestResult {} - +/// Creates a `TestResult` depending on the raw result of test execution +/// and assotiated data. pub fn calc_result<'a>( desc: &TestDesc, task_result: Result<(), &'a (dyn Any + 'static + Send)>, @@ -73,6 +74,7 @@ pub fn calc_result<'a>( result } +/// Creates a `TestResult` depending on the exit code of test subprocess. pub fn get_result_from_exit_code( desc: &TestDesc, code: i32, From cb5733de868600aab889730effe4077641a46981 Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Thu, 17 Oct 2019 19:21:05 +0300 Subject: [PATCH 6/8] Improve code style --- src/libtest/cli.rs | 6 +++++- src/libtest/event.rs | 7 ++++++- src/libtest/formatters/json.rs | 3 ++- src/libtest/formatters/terse.rs | 4 +++- src/libtest/helpers/isatty.rs | 2 +- src/libtest/lib.rs | 12 ++++++++---- src/libtest/test_result.rs | 9 ++++++--- src/libtest/tests.rs | 15 ++++++++------- src/libtest/time.rs | 4 ++-- 9 files changed, 41 insertions(+), 21 deletions(-) diff --git a/src/libtest/cli.rs b/src/libtest/cli.rs index 0c47bc8ae94a7..f95d5aad18a65 100644 --- a/src/libtest/cli.rs +++ b/src/libtest/cli.rs @@ -329,7 +329,11 @@ fn get_test_threads(matches: &getopts::Matches) -> OptPartRes> { Ok(test_threads) } -fn get_format(matches: &getopts::Matches, quiet: bool, allow_unstable: bool) -> OptPartRes { +fn get_format( + matches: &getopts::Matches, + quiet: bool, + allow_unstable: bool +) -> OptPartRes { let format = match matches.opt_str("format").as_ref().map(|s| &**s) { None if quiet => OutputFormat::Terse, Some("pretty") | None => OutputFormat::Pretty, diff --git a/src/libtest/event.rs b/src/libtest/event.rs index e1b606149c505..eefbd2d6a813a 100644 --- a/src/libtest/event.rs +++ b/src/libtest/event.rs @@ -14,7 +14,12 @@ pub struct CompletedTest { } impl CompletedTest { - pub fn new(desc: TestDesc, result: TestResult, exec_time: Option, stdout: Vec) -> Self { + pub fn new( + desc: TestDesc, + result: TestResult, + exec_time: Option, + stdout: Vec + ) -> Self { Self { desc, result, diff --git a/src/libtest/formatters/json.rs b/src/libtest/formatters/json.rs index fc677036dabf2..b73d7349678a7 100644 --- a/src/libtest/formatters/json.rs +++ b/src/libtest/formatters/json.rs @@ -92,7 +92,8 @@ impl OutputFormatter for JsonFormatter { stdout: &[u8], state: &ConsoleTestState, ) -> io::Result<()> { - let stdout = if (state.options.display_output || *result != TestResult::TrOk) && stdout.len() > 0 { + let display_stdout = state.options.display_output || *result != TestResult::TrOk; + let stdout = if display_stdout && stdout.len() > 0 { Some(String::from_utf8_lossy(stdout)) } else { None diff --git a/src/libtest/formatters/terse.rs b/src/libtest/formatters/terse.rs index 90eb62251fb42..fe56157d9c10a 100644 --- a/src/libtest/formatters/terse.rs +++ b/src/libtest/formatters/terse.rs @@ -196,7 +196,9 @@ impl OutputFormatter for TerseFormatter { ) -> io::Result<()> { match *result { TestResult::TrOk => self.write_ok(), - TestResult::TrFailed | TestResult::TrFailedMsg(_) | TestResult::TrTimedFail => self.write_failed(), + TestResult::TrFailed + | TestResult::TrFailedMsg(_) + | TestResult::TrTimedFail => self.write_failed(), TestResult::TrIgnored => self.write_ignored(), TestResult::TrAllowedFail => self.write_allowed_fail(), TestResult::TrBench(ref bs) => { diff --git a/src/libtest/helpers/isatty.rs b/src/libtest/helpers/isatty.rs index 638328aea18cf..6e4954778e605 100644 --- a/src/libtest/helpers/isatty.rs +++ b/src/libtest/helpers/isatty.rs @@ -30,4 +30,4 @@ pub fn stdout_isatty() -> bool { let mut out = 0; GetConsoleMode(handle, &mut out) != 0 } -} \ No newline at end of file +} diff --git a/src/libtest/lib.rs b/src/libtest/lib.rs index 31da97b736a29..89f527b6bd763 100644 --- a/src/libtest/lib.rs +++ b/src/libtest/lib.rs @@ -46,8 +46,8 @@ pub mod test { test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk}, time::{TestTimeOptions, TestExecTime}, types::{ - DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, - TestName, TestType, + DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, + TestDesc, TestDescAndFn, TestName, TestType, }, assert_test_result, filter_tests, run_test, test_main, test_main_static, }; @@ -199,7 +199,11 @@ pub fn assert_test_result(result: T) { ); } -pub fn run_tests(opts: &TestOpts, tests: Vec, mut notify_about_test_event: F) -> io::Result<()> +pub fn run_tests( + opts: &TestOpts, + tests: Vec, + mut notify_about_test_event: F +) -> io::Result<()> where F: FnMut(TestEvent) -> io::Result<()>, { @@ -325,7 +329,7 @@ where _ => { // We've got a result, stop the loop. break; - } + } } } else { res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected); diff --git a/src/libtest/test_result.rs b/src/libtest/test_result.rs index dd4dfd9997f09..80ca9dea18f5a 100644 --- a/src/libtest/test_result.rs +++ b/src/libtest/test_result.rs @@ -1,4 +1,3 @@ - use std::any::Any; use super::bench::BenchSamples; @@ -50,11 +49,15 @@ pub fn calc_result<'a>( if desc.allow_fail { TestResult::TrAllowedFail } else { - TestResult::TrFailedMsg(format!("panic did not include expected string '{}'", msg)) + TestResult::TrFailedMsg( + format!("panic did not include expected string '{}'", msg) + ) } } } - (&ShouldPanic::Yes, Ok(())) => TestResult::TrFailedMsg("test did not panic as expected".to_string()), + (&ShouldPanic::Yes, Ok(())) => { + TestResult::TrFailedMsg("test did not panic as expected".to_string()) + } _ if desc.allow_fail => TestResult::TrAllowedFail, _ => TestResult::TrFailed, }; diff --git a/src/libtest/tests.rs b/src/libtest/tests.rs index f6470b40a391d..9de774555e9cc 100644 --- a/src/libtest/tests.rs +++ b/src/libtest/tests.rs @@ -7,11 +7,12 @@ use crate::{ time::{TimeThreshold, TestTimeOptions}, formatters::PrettyFormatter, test::{ - filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, RunIgnored, RunStrategy, + filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, + RunIgnored, RunStrategy, ShouldPanic, StaticTestName, TestDesc, + TestDescAndFn, TestOpts, TrIgnored, TrOk, + // FIXME (introduced by #65251) // ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TestTimeOptions, // TestType, TrFailedMsg, TrIgnored, TrOk, - ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, - TrIgnored, TrOk, }, }; use std::sync::mpsc::channel; @@ -104,7 +105,7 @@ pub fn ignored_tests_result_in_ignored() { assert!(result == TrIgnored); } -// FIXME: Re-enable emscripten once it can catch panics again +// FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251) #[test] #[cfg(not(target_os = "emscripten"))] fn test_should_panic() { @@ -127,7 +128,7 @@ fn test_should_panic() { assert!(result == TrOk); } -// FIXME: Re-enable emscripten once it can catch panics again +// FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251) #[test] #[cfg(not(target_os = "emscripten"))] fn test_should_panic_good_message() { @@ -150,7 +151,7 @@ fn test_should_panic_good_message() { assert!(result == TrOk); } -// FIXME: Re-enable emscripten once it can catch panics again +// FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251) #[test] #[cfg(not(target_os = "emscripten"))] fn test_should_panic_bad_message() { @@ -176,7 +177,7 @@ fn test_should_panic_bad_message() { assert!(result == TrFailedMsg(format!("{} '{}'", failed_msg, expected))); } -// FIXME: Re-enable emscripten once it can catch panics again +// FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251) #[test] #[cfg(not(target_os = "emscripten"))] fn test_should_panic_but_succeeds() { diff --git a/src/libtest/time.rs b/src/libtest/time.rs index 83a545470efaa..f4d4b17b620ba 100644 --- a/src/libtest/time.rs +++ b/src/libtest/time.rs @@ -2,7 +2,7 @@ //! execution. //! Two main purposes of this module: //! - Check whether test is timed out. -//! - Provide helpers for `report-time` and `measure-time` options. +//! - Provide helpers for `report-time` and `measure-time` options. use std::time::{Duration, Instant}; use std::str::FromStr; @@ -55,7 +55,7 @@ pub mod time_constants { } /// Returns an `Instance` object denoting when the test should be considered -/// timed out. +/// timed out. pub fn get_default_test_timeout() -> Instant { Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S) } From a06b205177e714fc68fec2a2b0efe5b57d4eb865 Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Fri, 18 Oct 2019 08:32:44 +0300 Subject: [PATCH 7/8] Add public re-exports for benches --- src/libtest/bench.rs | 3 ++- src/libtest/lib.rs | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/libtest/bench.rs b/src/libtest/bench.rs index bb5b0d1da5337..c142c5213d2e0 100644 --- a/src/libtest/bench.rs +++ b/src/libtest/bench.rs @@ -1,4 +1,6 @@ //! Benchmarking module. +pub use std::hint::black_box; + use super::{ event::CompletedTest, helpers::sink::Sink, @@ -14,7 +16,6 @@ use std::cmp; use std::io; use std::panic::{catch_unwind, AssertUnwindSafe}; use std::sync::{Arc, Mutex}; -use std::hint::black_box; /// Manager of the benchmarking runs. /// diff --git a/src/libtest/lib.rs b/src/libtest/lib.rs index 89f527b6bd763..179558e8f9a18 100644 --- a/src/libtest/lib.rs +++ b/src/libtest/lib.rs @@ -35,6 +35,7 @@ pub use self::ColorConfig::*; pub use self::types::*; pub use self::types::TestName::*; pub use self::options::{Options, ShouldPanic}; +pub use self::bench::{Bencher, black_box}; // Module to be used by rustc to compile tests in libtest pub mod test { @@ -67,6 +68,7 @@ use std::{ }; pub mod stats; +pub mod bench; mod formatters; mod cli; mod console; @@ -75,7 +77,6 @@ mod helpers; mod time; mod types; mod options; -mod bench; mod test_result; #[cfg(test)] From ae04dc8473f9ea53b71123eb4eb0fcec71e6d797 Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Mon, 21 Oct 2019 09:05:40 +0300 Subject: [PATCH 8/8] Remove unneccessary use under cfg(unix) --- src/libtest/helpers/concurrency.rs | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/src/libtest/helpers/concurrency.rs b/src/libtest/helpers/concurrency.rs index f0292c2d2c792..61651a927c5f7 100644 --- a/src/libtest/helpers/concurrency.rs +++ b/src/libtest/helpers/concurrency.rs @@ -2,9 +2,6 @@ //! during tests execution. use std::env; -#[cfg(any(unix, target_os = "cloudabi"))] -use libc; - #[allow(deprecated)] pub fn get_concurrency() -> usize { return match env::var("RUST_TEST_THREADS") { @@ -12,10 +9,7 @@ pub fn get_concurrency() -> usize { let opt_n: Option = s.parse().ok(); match opt_n { Some(n) if n > 0 => n, - _ => panic!( - "RUST_TEST_THREADS is `{}`, should be a positive integer.", - s - ), + _ => panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.", s), } } Err(..) => num_cpus(), @@ -82,11 +76,7 @@ pub fn get_concurrency() -> usize { unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize } } - #[cfg(any( - target_os = "freebsd", - target_os = "dragonfly", - target_os = "netbsd" - ))] + #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "netbsd"))] fn num_cpus() -> usize { use std::ptr;