Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

bench: Fix execution pre-validation #340

Merged
merged 5 commits into from
Jun 2, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 7 additions & 18 deletions circle.yml
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,6 @@ jobs:
executor: linux-clang-latest
environment:
BUILD_TYPE: Coverage
TESTS_FILTER: unittests
CMAKE_OPTIONS: -DCMAKE_CXX_CLANG_TIDY=clang-tidy
steps:
- checkout
Expand All @@ -224,27 +223,17 @@ jobs:
name: "Collect coverage data"
working_directory: ~/build
command: |
binaries='-object bin/fizzy-unittests -object bin/fizzy-spectests -object bin/fizzy-bench'

mkdir coverage
find -name '*.profraw'
llvm-profdata merge *.profraw -o fizzy.profdata

llvm-cov report -use-color -instr-profile fizzy.profdata -Xdemangler llvm-cxxfilt \
-object bin/fizzy-unittests \
-object bin/fizzy-spectests
llvm-cov report -instr-profile fizzy.profdata -Xdemangler llvm-cxxfilt > coverage/report.txt \
-object bin/fizzy-unittests \
-object bin/fizzy-spectests

llvm-cov show -format=html -instr-profile fizzy.profdata -Xdemangler llvm-cxxfilt -region-coverage-lt=100 > coverage/missing.html \
-object bin/fizzy-unittests \
-object bin/fizzy-spectests
llvm-cov show -format=html -instr-profile fizzy.profdata -Xdemangler llvm-cxxfilt > coverage/full.html \
-object bin/fizzy-unittests \
-object bin/fizzy-spectests

llvm-cov export -instr-profile fizzy.profdata -format=lcov > fizzy.lcov \
-object bin/fizzy-unittests \
-object bin/fizzy-spectests
llvm-cov report -use-color -instr-profile fizzy.profdata -Xdemangler llvm-cxxfilt $binaries
llvm-cov report -instr-profile fizzy.profdata -Xdemangler llvm-cxxfilt $binaries > coverage/report.txt
llvm-cov show -format=html -instr-profile fizzy.profdata -Xdemangler llvm-cxxfilt -region-coverage-lt=100 $binaries > coverage/missing.html
llvm-cov show -format=html -instr-profile fizzy.profdata -Xdemangler llvm-cxxfilt $binaries > coverage/full.html
llvm-cov export -instr-profile fizzy.profdata -format=lcov $binaries > fizzy.lcov
genhtml fizzy.lcov -o coverage -t Fizzy
- store_artifacts:
path: ~/build/coverage
Expand Down
1 change: 1 addition & 0 deletions test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ add_subdirectory(bench)
add_subdirectory(bench_internal)
add_subdirectory(spectests)
add_subdirectory(unittests)
add_subdirectory(smoketests)

if(FIZZY_FUZZING)
add_subdirectory(fuzzer)
Expand Down
62 changes: 42 additions & 20 deletions test/bench/bench.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ void benchmark_parse(

// Pre-run for validation
if (!engine->parse(wasm_binary))
return state.SkipWithError("Parsing failed");
state.SkipWithError("Parsing failed");

const auto input_size = wasm_binary.size();
auto num_bytes_parsed = uint64_t{0};
Expand Down Expand Up @@ -88,73 +88,95 @@ struct ExecutionBenchmarkCase

/// Performs test run of the benchmark case and checks results produces by the engine
/// against the expected values.
void validate_benchmark_case(benchmark::State& state, fizzy::test::WasmEngine& engine,
bool validate_benchmark_case(benchmark::State& state, fizzy::test::WasmEngine& engine,
const ExecutionBenchmarkCase& benchmark_case)
{
if (!engine.instantiate(*benchmark_case.wasm_binary))
return state.SkipWithError("Instantiation failed");
{
state.SkipWithError("Instantiation failed");
return false;
}

const auto func_ref = engine.find_function(benchmark_case.func_name, benchmark_case.func_sig);
if (!func_ref)
{
return state.SkipWithError(
("Function \"" + benchmark_case.func_name + "\" not found").c_str());
state.SkipWithError(("Function \"" + benchmark_case.func_name + "\" not found").c_str());
return false;
}

if (!benchmark_case.memory.empty())
if (!benchmark_case.memory.empty() && !engine.init_memory(benchmark_case.memory))
{
if (!engine.init_memory(benchmark_case.memory))
return state.SkipWithError("Memory initialization failed");
state.SkipWithError("Memory initialization failed");
return false;
}

// Execute once and check results against expectations.
const auto result = engine.execute(*func_ref, benchmark_case.func_args);
if (result.trapped)
return state.SkipWithError("Trapped");
{
state.SkipWithError("Trapped");
return false;
}

if (benchmark_case.expected_result)
{
if (!result.value)
{
const auto error_msg = "Missing result value, expected: " +
std::to_string(*benchmark_case.expected_result);
return state.SkipWithError(error_msg.c_str());
state.SkipWithError(error_msg.c_str());
return false;
}
else if (*result.value != *benchmark_case.expected_result)
{
const auto error_msg = "Incorrect result value, expected: " +
std::to_string(*benchmark_case.expected_result) +
", got: " + std::to_string(*result.value);
return state.SkipWithError(error_msg.c_str());
state.SkipWithError(error_msg.c_str());
return false;
}
}
else if (result.value)
{
return state.SkipWithError(
("Unexpected result value: " + std::to_string(*result.value)).c_str());
state.SkipWithError(("Unexpected result value: " + std::to_string(*result.value)).c_str());
return false;
}
const auto memory = engine.get_memory();
if (memory.size() < benchmark_case.expected_memory.size())
return state.SkipWithError("Result memory is shorter than expected");
{
state.SkipWithError("Result memory is shorter than expected");
return false;
}

// Compare _beginning_ segment of the memory with expected.
// Specifying expected full memory pages is impractical.
if (!std::equal(std::begin(benchmark_case.expected_memory),
std::end(benchmark_case.expected_memory), std::begin(memory)))
return state.SkipWithError("Incorrect result memory");
{
state.SkipWithError("Incorrect result memory");
return false;
}

return true;
}

void benchmark_execute(
benchmark::State& state, EngineCreateFn create_fn, const ExecutionBenchmarkCase& benchmark_case)
{
const auto engine = create_fn();
const auto has_memory = !benchmark_case.memory.empty();

validate_benchmark_case(state, *engine, benchmark_case);
const auto engine = create_fn();
const bool ok = validate_benchmark_case(state, *engine, benchmark_case);

const auto has_memory = !benchmark_case.memory.empty();
engine->instantiate(*benchmark_case.wasm_binary);
const auto func_ref = engine->find_function(benchmark_case.func_name, benchmark_case.func_sig);
std::optional<fizzy::test::WasmEngine::FuncRef> func_ref;
if (ok)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you explain the changes here in this function?
It looks like when validation is not ok, you would still go and dereference func_ref at line 185

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah I guess it works because validate_benchmark_case would calll state.SkipWithError and the loop below will not be executed.

But why then not just early return

  if (!validate_benchmark_case(...))
    return;

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes. The loop will not run, but we need to call auto _ : state anyway (this is a libbenchmark limitation in the version we use).

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Perhaps it's woth to add a comment that we need to call it even when validation failed.

{
engine->instantiate(*benchmark_case.wasm_binary);
func_ref = engine->find_function(benchmark_case.func_name, benchmark_case.func_sig);
}

// The loop body will not be executed if validation error reported with state.SkipWithError().
// However, due to a bug in libbenchmark, the loop must be always reached.
for ([[maybe_unused]] auto _ : state)
{
state.PauseTiming();
Expand Down
5 changes: 5 additions & 0 deletions test/smoketests/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# Fizzy: A fast WebAssembly interpreter
# Copyright 2020 The Fizzy Authors.
# SPDX-License-Identifier: Apache-2.0

add_subdirectory(benchmarks)
38 changes: 38 additions & 0 deletions test/smoketests/benchmarks/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
# Fizzy: A fast WebAssembly interpreter
# Copyright 2020 The Fizzy Authors.
# SPDX-License-Identifier: Apache-2.0

add_test(
NAME fizzy/smoketests/bench/benchmarks
COMMAND fizzy-bench ${CMAKE_CURRENT_LIST_DIR} --benchmark_min_time=0.01
)

add_test(
NAME fizzy/smoketests/bench/cli-missing-dir-arg
COMMAND fizzy-bench
)
set_tests_properties(
fizzy/smoketests/bench/cli-missing-dir-arg
PROPERTIES
PASS_REGULAR_EXPRESSION "Missing DIR argument"
)

add_test(
NAME fizzy/smoketests/bench/cli-invalid-arg
COMMAND fizzy-bench ${CMAKE_CURRENT_LIST_DIR} --nonexisting_argument
)
set_tests_properties(
fizzy/smoketests/bench/cli-invalid-arg
PROPERTIES
PASS_REGULAR_EXPRESSION "error: unrecognized command-line flag: --nonexisting_argument"
)


# Dump coverage data to distinct files (otherwise file will be overwritten).
set_tests_properties(
fizzy/smoketests/bench/benchmarks
fizzy/smoketests/bench/cli-missing-dir-arg
fizzy/smoketests/bench/cli-invalid-arg
PROPERTIES
ENVIRONMENT LLVM_PROFILE_FILE=${CMAKE_BINARY_DIR}/bench-%p.profraw
)
50 changes: 50 additions & 0 deletions test/smoketests/benchmarks/arith.inputs
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
addition
add
ii:i
1 2

3

division_by_zero
div
ii:i
1 0

0

memory_initialization_failure
add
ii:i
0 0
fe
0

unexcepted_result
div
ii:i
1 1



expected_memory_shorter
add
ii:i
0 0

0
00

missing_function
sub
ii:i
0 0

0

incorrect_result_value
add
ii:i
2 2

5

Binary file added test/smoketests/benchmarks/arith.wasm
Binary file not shown.
12 changes: 12 additions & 0 deletions test/smoketests/benchmarks/arith.wat
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
(module
(func (export "add") (param i32 i32) (result i32)
local.get 0
local.get 1
i32.add
)
(func (export "div") (param i32 i32) (result i32)
local.get 0
local.get 1
i32.div_u
)
)
7 changes: 7 additions & 0 deletions test/smoketests/benchmarks/invalid.inputs
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
add
add
ii:i
1 2



Binary file added test/smoketests/benchmarks/invalid.wasm
Binary file not shown.
6 changes: 6 additions & 0 deletions test/smoketests/benchmarks/invalid.wat
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
(module
(func (export "add") (param i32 i32) (result i32)
local.get 0
i32.add
)
)
7 changes: 7 additions & 0 deletions test/smoketests/benchmarks/malformed/malformed.inputs
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
fake
fake_func
:




Binary file not shown.
24 changes: 24 additions & 0 deletions test/smoketests/benchmarks/memory.inputs
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
increase_value
inc
i:
0
ff000000

00010000


wrong_expected_memory
inc
i:
0
ff000000

ffffffff

missing_result_value
inc
i:
0

0

Binary file added test/smoketests/benchmarks/memory.wasm
Binary file not shown.
11 changes: 11 additions & 0 deletions test/smoketests/benchmarks/memory.wat
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
(module
(memory 1 1)
(func (export "inc") (param i32)
local.get 0
local.get 0
i32.load
i32.const 1
i32.add
i32.store
)
)
7 changes: 7 additions & 0 deletions test/smoketests/benchmarks/missing_imported_function.inputs
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
main
main
i:i
0

0

Binary file not shown.
7 changes: 7 additions & 0 deletions test/smoketests/benchmarks/missing_imported_function.wat
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
(module
(func $imported (import "env" "imported") (param i32) (result i32))
(func (export "main") (param i32) (result i32)
local.get 0
call $imported
)
)
1 change: 0 additions & 1 deletion test/utils/fizzy_engine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,6 @@ WasmEngine::Result FizzyEngine::execute(
{
const auto [trapped, result_stack] =
fizzy::execute(*m_instance, static_cast<uint32_t>(func_ref), args);
assert(result_stack.size() <= 1);
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This just shows fizzy's "public api" is broken right now :)

Hopefully after the span things are going in, we can merge a version of #219 can go in and fix this.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was confused at first, but truly we dump whole stack also in the case when returning trapped.
I can add disabled test for such case, but I don't think this really helps.

return {trapped, !result_stack.empty() ? result_stack.back() : std::optional<uint64_t>{}};
}
} // namespace fizzy::test