diff --git a/CHANGES.next.md b/CHANGES.next.md index e31187b794..1715d3a8ad 100644 --- a/CHANGES.next.md +++ b/CHANGES.next.md @@ -159,6 +159,8 @@ - Added AWS DAX provider. - Added Google Cloud Firestore ycsb benchmarks. - Added support for un-managed data processing yarn cluster benchmarking. +- Added support for UDP_STREAM tests to netperf_benchmark +- Added flags to select stream size to netperf_benchmark - Added placement group flag support for Azure. "cluster" will create proximity placement group. "spread" will create an availability set. - Added a tcpdump collector with --tcpdump flag. diff --git a/perfkitbenchmarker/linux_benchmarks/container_netperf_benchmark.py b/perfkitbenchmarker/linux_benchmarks/container_netperf_benchmark.py index c61a3a1a46..6ab75dda78 100644 --- a/perfkitbenchmarker/linux_benchmarks/container_netperf_benchmark.py +++ b/perfkitbenchmarker/linux_benchmarks/container_netperf_benchmark.py @@ -27,6 +27,11 @@ FLAGS = flags.FLAGS +# We set the default to 128KB (131072 bytes) to override the Linux default +# of 16K so that we can achieve the "link rate". +flags.DEFINE_integer('container_netperf_tcp_stream_send_size_in_bytes', 131072, + 'Send size to use for TCP_STREAM tests (netperf -m flag)') + BENCHMARK_NAME = 'container_netperf' BENCHMARK_CONFIG = """ container_netperf: @@ -87,6 +92,7 @@ def Run(benchmark_spec): '-H', container_0.ip_address, '-l', '100', '--', + '-m', FLAGS.container_netperf_tcp_stream_send_size_in_bytes, '-o', netperf_benchmark.OUTPUT_SELECTOR] cluster.DeployContainer('netperf', benchmark_spec.container_specs['netperf']) container_1 = cluster.containers['netperf'][1] diff --git a/perfkitbenchmarker/linux_benchmarks/netperf_benchmark.py b/perfkitbenchmarker/linux_benchmarks/netperf_benchmark.py index 960460b6f7..03e6a74465 100644 --- a/perfkitbenchmarker/linux_benchmarks/netperf_benchmark.py +++ b/perfkitbenchmarker/linux_benchmarks/netperf_benchmark.py @@ -65,8 +65,15 @@ flags.DEFINE_integer('netperf_thinktime_run_length', 0, 'The number of contiguous numbers to sum at a time in the ' 'thinktime array.') - -ALL_BENCHMARKS = ['TCP_RR', 'TCP_CRR', 'TCP_STREAM', 'UDP_RR'] +flags.DEFINE_integer('netperf_udp_stream_send_size_in_bytes', 1024, + 'Send size to use for UDP_STREAM tests (netperf -m flag)', + lower_bound=1, upper_bound=65507) +# We set the default to 128KB (131072 bytes) to override the Linux default +# of 16K so that we can achieve the "link rate". +flags.DEFINE_integer('netperf_tcp_stream_send_size_in_bytes', 131072, + 'Send size to use for TCP_STREAM tests (netperf -m flag)') + +ALL_BENCHMARKS = ['TCP_RR', 'TCP_CRR', 'TCP_STREAM', 'UDP_RR', 'UDP_STREAM'] flags.DEFINE_list('netperf_benchmarks', ALL_BENCHMARKS, 'The netperf benchmark(s) to run.') flags.register_validator( @@ -78,7 +85,7 @@ BENCHMARK_NAME = 'netperf' BENCHMARK_CONFIG = """ netperf: - description: Run TCP_RR, TCP_CRR, UDP_RR and TCP_STREAM + description: Run TCP_RR, TCP_CRR, UDP_RR, TCP_STREAM and UDP_STREAM vm_groups: vm_1: vm_spec: *default_single_core @@ -355,8 +362,8 @@ def RunNetperf(vm, benchmark_name, server_ip, num_streams): """ enable_latency_histograms = FLAGS.netperf_enable_histograms or num_streams > 1 # Throughput benchmarks don't have latency histograms - enable_latency_histograms = enable_latency_histograms and \ - benchmark_name != 'TCP_STREAM' + enable_latency_histograms = (enable_latency_histograms and + (benchmark_name not in ['TCP_STREAM', 'UDP_STREAM'])) # Flags: # -o specifies keys to include in CSV output. # -j keeps additional latency numbers @@ -367,6 +374,14 @@ def RunNetperf(vm, benchmark_name, server_ip, num_streams): confidence = ('-I 99,5 -i {0},3'.format(FLAGS.netperf_max_iter) if FLAGS.netperf_max_iter else '') verbosity = '-v2 ' if enable_latency_histograms else '' + + remote_cmd_timeout = ( + FLAGS.netperf_test_length * (FLAGS.netperf_max_iter or 1) + 300) + + metadata = {'netperf_test_length': FLAGS.netperf_test_length, + 'sending_thread_count': num_streams, + 'max_iter': FLAGS.netperf_max_iter or 1} + netperf_cmd = ('{netperf_path} -p {{command_port}} -j {verbosity} ' '-t {benchmark_name} -H {server_ip} -l {length} {confidence}' ' -- ' @@ -377,7 +392,19 @@ def RunNetperf(vm, benchmark_name, server_ip, num_streams): server_ip=server_ip, length=FLAGS.netperf_test_length, output_selector=OUTPUT_SELECTOR, - confidence=confidence, verbosity=verbosity) + confidence=confidence, + verbosity=verbosity) + + if benchmark_name.upper() == 'UDP_STREAM': + netperf_cmd += (' -R 1 -m {send_size} -M {send_size} '.format( + send_size=FLAGS.netperf_udp_stream_send_size_in_bytes)) + metadata['netperf_send_size_in_bytes'] = FLAGS.netperf_udp_stream_send_size_in_bytes + + elif benchmark_name.upper() == 'TCP_STREAM': + netperf_cmd += (' -m {send_size} -M {send_size} '.format( + send_size=FLAGS.netperf_tcp_stream_send_size_in_bytes)) + metadata['netperf_send_size_in_bytes'] = FLAGS.netperf_tcp_stream_send_size_in_bytes + if FLAGS.netperf_thinktime != 0: netperf_cmd += (' -X {thinktime},{thinktime_array_size},' '{thinktime_run_length} ').format( @@ -402,11 +429,6 @@ def RunNetperf(vm, benchmark_name, server_ip, num_streams): json_out = json.loads(remote_stdout) stdouts = json_out[0] - # Metadata to attach to samples - metadata = {'netperf_test_length': FLAGS.netperf_test_length, - 'max_iter': FLAGS.netperf_max_iter or 1, - 'sending_thread_count': num_streams} - parsed_output = [ParseNetperfOutput(stdout, metadata, benchmark_name, enable_latency_histograms) for stdout in stdouts] diff --git a/tests/data/netperf_results.json b/tests/data/netperf_results.json index 8e5d302605..807735f70e 100644 --- a/tests/data/netperf_results.json +++ b/tests/data/netperf_results.json @@ -38,5 +38,15 @@ "MIGRATED UDP REQUEST/RESPONSE TEST from 0.0.0.0 (0.0.0.0) port 20001 AF_INET to 10.240.31.117 () port 20001 AF_INET : +/-2.500% @ 99% conf. : first burst 0", "Throughput,Throughput Units,Throughput Confidence Width (%),Confidence Iterations Run,Stddev Latency Microseconds,50th Percentile Latency Microseconds,90th Percentile Latency Microseconds,99th Percentile Latency Microseconds,Minimum Latency Microseconds,Maximum Latency Microseconds,Local Transport Retransmissions,Remote Transport Retransmissions", "3313.49,Trans/s,7.546,20,214.64,295,330,406,200,500,0,0" + ], + [ + "MIGRATED UDP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 104.198.67.251 () port 20001 AF_INET : +/-2.500% @ 99% conf.", + "Throughput,Throughput Units,50th Percentile Latency Microseconds,90th Percentile Latency Microseconds,99th Percentile Latency Microseconds,Stddev Latency Microseconds,Minimum Latency Microseconds,Maximum Latency Microseconds,Confidence Iterations Run,Throughput Confidence Width (%),Local Transport Retransmissions,Remote Transport Retransmissions", + "1102.42,10^6bits/s,3,3,11,46.14,1,15144,1,-1.000,-1,-1" + ], + [ + "MIGRATED UDP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 104.198.67.251 () port 20001 AF_INET : +/-2.500% @ 99% conf.", + "Throughput,Throughput Units,50th Percentile Latency Microseconds,90th Percentile Latency Microseconds,99th Percentile Latency Microseconds,Stddev Latency Microseconds,Minimum Latency Microseconds,Maximum Latency Microseconds,Confidence Iterations Run,Throughput Confidence Width (%),Local Transport Retransmissions,Remote Transport Retransmissions", + "1802.72,10^6bits/s,3,3,11,46.14,1,15144,1,-1.000,-1,-1" ] ] diff --git a/tests/linux_benchmarks/netperf_benchmark_test.py b/tests/linux_benchmarks/netperf_benchmark_test.py index 872e39aa4b..046c0ce0d4 100644 --- a/tests/linux_benchmarks/netperf_benchmark_test.py +++ b/tests/linux_benchmarks/netperf_benchmark_test.py @@ -129,7 +129,10 @@ def testExternalAndInternal(self): ('UDP_RR_Latency_p99', 406.0, 'us'), ('UDP_RR_Latency_min', 200.0, 'us'), ('UDP_RR_Latency_max', 500.0, 'us'), - ('UDP_RR_Latency_stddev', 214.64, 'us')], + ('UDP_RR_Latency_stddev', 214.64, 'us'), + ('UDP_STREAM_Throughput', 1102.42, mbps), + ('UDP_STREAM_Throughput', 1802.72, 'Mbits/sec'), + ], [i[:3] for i in result]) external_meta = {'ip_type': 'external'}