Skip to content

Commit

Permalink
Benchmarking fixes for P4Testgen.
Browse files Browse the repository at this point in the history
asdasd
  • Loading branch information
fruffy committed Oct 23, 2023
1 parent 0aa2797 commit 9f08be0
Show file tree
Hide file tree
Showing 5 changed files with 65 additions and 74 deletions.
67 changes: 36 additions & 31 deletions backends/p4tools/modules/testgen/benchmarks/plots.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#!/usr/bin/env python3

import argparse
import os
import re
import sys
from operator import attrgetter
Expand Down Expand Up @@ -83,6 +84,8 @@ def get_strategy_data(input_dir):
folders = input_dir.glob("*/")
program_data = {}
for folder in folders:
if not os.path.isdir(folder):
continue
folder = Path(folder)
program_name = folder.stem
# Make sure we escape hyphens
Expand All @@ -105,38 +108,40 @@ def get_strategy_data(input_dir):


def plot_strategies(args, extra_args):
program_data = get_strategy_data(args.input_dir)
program_data = program_data["tna_simple_switch"]
pruned_data = []
name_map = {
"depth_first": "DFS",
"random_backtrack": "Random",
"greedy_statement_search": "Coverage-Optimized",
}
for strategy, candidate_data in program_data.items():
candidate_data = candidate_data.drop("Seed", axis=1)
candidate_data["Strategy"] = name_map[strategy]
candidate_data.Time = pd.to_numeric(candidate_data.Time)
candidate_data = candidate_data.sort_values(by=["Time"])
bins = np.arange(
0,
candidate_data.Time.max() + candidate_data.Time.max() / 1000,
candidate_data.Time.max() / 1000,
program_datas = get_strategy_data(args.input_dir)
for program_name, program_data in program_datas.items():
pruned_data = []
name_map = {
"depth_first": "DFS",
"random_backtrack": "Random",
"greedy_statement_search": "Coverage-Optimized",
}
for strategy, candidate_data in program_data.items():
candidate_data = candidate_data.drop("Seed", axis=1)
candidate_data["Strategy"] = name_map[strategy]
candidate_data.Time = pd.to_numeric(candidate_data.Time)
candidate_data = candidate_data.sort_values(by=["Time"])
bins = np.arange(
0,
candidate_data.Time.max() + candidate_data.Time.max() / 1000,
candidate_data.Time.max() / 1000,
)
candidate_data["Minutes"] = pd.cut(
candidate_data.Time, bins.astype(np.int64), include_lowest=True
).map(attrgetter("right"))
candidate_data.Minutes = pd.to_timedelta(candidate_data.Minutes, unit="nanoseconds")
candidate_data.Minutes = candidate_data.Minutes / pd.Timedelta(minutes=1)
candidate_data = candidate_data[candidate_data.Minutes <= 60]
pruned_data.append(candidate_data)
concat_data = pd.concat(pruned_data)
ax = sns.lineplot(
x="Minutes", y="Coverage", hue="Strategy", data=concat_data, errorbar=None
)
candidate_data["Minutes"] = pd.cut(
candidate_data.Time, bins.astype(np.int64), include_lowest=True
).map(attrgetter("right"))
candidate_data.Minutes = pd.to_timedelta(candidate_data.Minutes, unit="nanoseconds")
candidate_data.Minutes = candidate_data.Minutes / pd.Timedelta(minutes=1)
candidate_data = candidate_data[candidate_data.Minutes <= 60]
pruned_data.append(candidate_data)
concat_data = pd.concat(pruned_data)
ax = sns.lineplot(x="Minutes", y="Coverage", hue="Strategy", data=concat_data, errorbar=None)
sns.move_legend(ax, "lower center", bbox_to_anchor=(0.5, 0.98), ncol=3, title=None)
outdir = Path(args.out_dir).joinpath("strategy_coverage")
plt.savefig(outdir.with_suffix(".png"), bbox_inches="tight")
plt.savefig(outdir.with_suffix(".pdf"), bbox_inches="tight")
plt.gcf().clear()
sns.move_legend(ax, "lower center", bbox_to_anchor=(0.5, 0.98), ncol=3, title=None)
outdir = Path(args.out_dir).joinpath(f"{program_name}_strategy_coverage")
plt.savefig(outdir.with_suffix(".png"), bbox_inches="tight")
plt.savefig(outdir.with_suffix(".pdf"), bbox_inches="tight")
plt.gcf().clear()


def main(args, extra_args):
Expand Down
31 changes: 21 additions & 10 deletions backends/p4tools/modules/testgen/benchmarks/test_coverage.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,15 +247,26 @@ def run_strategies_for_max_tests(options, test_args):
)

perf_file = test_args.test_dir.joinpath(test_args.p4_program.stem + "_perf").with_suffix(".csv")
perf = pd.read_csv(perf_file, index_col=0)
summarized_data = [
float(final_cov) * 100,
num_tests,
time_needed,
perf["Percentage"]["z3"],
perf["Percentage"]["step"],
perf["Percentage"]["backend"],
]
if perf_file.exists():
perf = pd.read_csv(perf_file, index_col=0)
summarized_data = [
float(final_cov) * 100,
num_tests,
time_needed,
perf["Percentage"]["z3"],
perf["Percentage"]["step"],
perf["Percentage"]["backend"],
]
else:
# In some cases, we do not have performance data. Nullify it.
summarized_data = [
float(final_cov) * 100,
num_tests,
time_needed,
None,
None,
None,
]
return summarized_data, nodes_cov, timestamps


Expand Down Expand Up @@ -313,7 +324,7 @@ def main(args, extra_args):
if options.test_mode == "DPDK":
options.target = "dpdk"
options.arch = "pna"
options.test_backend = "METADATA"
options.test_backend = "PTF"

# 7189 is an example of a good seed, which gets cov 1 with less than 100 tests
# in random access stack.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ std::string Metadata::getTestCaseTemplate() {
# Seed used to generate this test.
seed: {{ default(seed, "none") }}
# Test timestamp.
data: {{timestamp}}
date: {{timestamp}}
# Percentage of nodes covered at the time of this test.
node_coverage: {{coverage}}
Expand Down
37 changes: 5 additions & 32 deletions backends/p4tools/modules/testgen/targets/pna/backend/ptf/ptf.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -296,12 +296,13 @@ std::string PTF::getTestCaseTemplate() {
static std::string TEST_CASE(
R"""(
class Test{{test_id}}(AbstractTest):
# Date generated: {{timestamp}}
'''
Date generated: {{timestamp}}
Current node coverage: {{coverage}}
## if length(selected_branches) > 0
# {{selected_branches}}
Selected branches: {{selected_branches}}
## endif
'''
# Current statement coverage: {{coverage}}
Trace:
## for trace_item in trace
{{trace_item}}
##endfor
Expand Down Expand Up @@ -343,11 +344,6 @@ class Test{{test_id}}(AbstractTest):
)
## endfor
## endfor
## endif
## if exists("clone_specs")
## for clone_pkt in clone_specs.clone_pkts
self.insert_pre_clone_session({{clone_pkt.session_id}}, [{{clone_pkt.clone_port}}])
## endfor
## endif
Expand All @@ -367,20 +363,9 @@ class Test{{test_id}}(AbstractTest):
## for ignore_mask in verify.ignore_masks
exp_pkt.set_do_not_care({{ignore_mask.0}}, {{ignore_mask.1}})
## endfor
## if exists("clone_specs")
## for clone_pkt in clone_specs.clone_pkts
## if clone_pkt.cloned
ptfutils.verify_packet(self, exp_pkt, {{clone_pkt.clone_port}})
##endif
##endfor
## if not clone_specs.has_clone
ptfutils.verify_packet(self, exp_pkt, eg_port)
##endif
## else
ptfutils.verify_packet(self, exp_pkt, eg_port)
bt.testutils.log.info("Verifying no other packets ...")
ptfutils.verify_no_other_packets(self, self.device_id, timeout=self.packet_wait_time)
## endif
## else
ptfutils.verify_no_other_packets(self, self.device_id, timeout=self.packet_wait_time)
## endif
Expand Down Expand Up @@ -409,18 +394,6 @@ void PTF::emitTestcase(const TestSpec *testSpec, cstring selectedBranches, size_
coverageStr << std::setprecision(2) << currentCoverage;
dataJson["coverage"] = coverageStr.str();

// The following few lins are commented temporarily, they are copied from BMv2
// Check whether this test has a clone configuration.
// These are special because they require additional instrumentation and produce two output
// packets.
// auto cloneSpecs = testSpec->getTestObjectCategory("clone_specs");

// if (!cloneSpecs.empty()) {
// dataJson["clone_specs"] = getClone(cloneSpecs);
// }
// auto meterValues = testSpec->getTestObjectCategory("meter_values");
// dataJson["meter_values"] = getMeter(meterValues);

LOG5("PTF backend: emitting testcase:" << std::setw(4) << dataJson);

inja::render_to(ptfFileStream, testCase, dataJson);
Expand Down
2 changes: 2 additions & 0 deletions tools/testutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,8 @@ def exec_process(args: str, **extra_args) -> ProcessResult:
except subprocess.TimeoutExpired as exception:
if errpipe:
out = errpipe.out
else:
out = str(exception.stderr)
returncode = FAILURE
cmd = exception.cmd
# Rejoin the list for better readability.
Expand Down

0 comments on commit 9f08be0

Please sign in to comment.