diff --git a/pyproject.toml b/pyproject.toml index 9177ba5..8d6bf40 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,6 +37,7 @@ requires = ["poetry_core>=1.0.0"] build-backend = "poetry.core.masonry.api" [tool.poetry.scripts] +redis-benchmarks-compare = "redis_benchmarks_specification.__compare__.compare:main" redis-benchmarks-spec-api = "redis_benchmarks_specification.__api__.api:main" redis-benchmarks-spec-builder = "redis_benchmarks_specification.__builder__.builder:main" redis-benchmarks-spec-client-runner = "redis_benchmarks_specification.__runner__.runner:main" diff --git a/redis_benchmarks_specification/__cli__/cli.py b/redis_benchmarks_specification/__cli__/cli.py index bf32502..9c01b6f 100644 --- a/redis_benchmarks_specification/__cli__/cli.py +++ b/redis_benchmarks_specification/__cli__/cli.py @@ -231,7 +231,7 @@ def trigger_tests_cli_command_logic(args, project_name, project_version): ) filtered_hash_commits.append(cdict) - if True: # args.dry_run is False: + if args.dry_run is False: conn = redis.StrictRedis( host=args.redis_host, port=args.redis_port, diff --git a/redis_benchmarks_specification/__compare__/__init__.py b/redis_benchmarks_specification/__compare__/__init__.py new file mode 100644 index 0000000..4a70c9e --- /dev/null +++ b/redis_benchmarks_specification/__compare__/__init__.py @@ -0,0 +1,5 @@ +# Apache License Version 2.0 +# +# Copyright (c) 2021., Redis Labs Modules +# All rights reserved. +# diff --git a/redis_benchmarks_specification/__compare__/args.py b/redis_benchmarks_specification/__compare__/args.py new file mode 100644 index 0000000..7349918 --- /dev/null +++ b/redis_benchmarks_specification/__compare__/args.py @@ -0,0 +1,135 @@ +# Apache License Version 2.0 +# +# Copyright (c) 2021., Redis Labs Modules +# All rights reserved. +# + +# environment variables +import datetime +import os + + +def get_start_time_vars(start_time=None): + if start_time is None: + start_time = datetime.datetime.utcnow() + start_time_ms = int( + (start_time - datetime.datetime(1970, 1, 1)).total_seconds() * 1000 + ) + start_time_str = start_time.strftime("%Y-%m-%d-%H-%M-%S") + return start_time, start_time_ms, start_time_str + + +PERFORMANCE_GH_TOKEN = os.getenv("PERFORMANCE_GH_TOKEN", None) +PERFORMANCE_RTS_PUSH = bool(int(os.getenv("PUSH_RTS", "0"))) + + +_, NOW_UTC, _ = get_start_time_vars() +LAST_MONTH_UTC = NOW_UTC - (31 * 24 * 60 * 60 * 1000) +START_TIME_NOW_UTC, _, _ = get_start_time_vars() +START_TIME_LAST_SIX_MONTHS_UTC = START_TIME_NOW_UTC - datetime.timedelta(days=180) + + +def create_compare_arguments(parser): + parser.add_argument( + "--test", + type=str, + default="", + help="specify a test (or a comma separated list of tests) to use for comparison. If none is specified by default will use all of them.", + ) + parser.add_argument( + "--defaults_filename", + type=str, + default="defaults.yml", + help="specify the defaults file containing spec topologies, common metric extractions,etc...", + ) + parser.add_argument("--github_repo", type=str, default="redis") + parser.add_argument("--github_org", type=str, default="redis") + parser.add_argument("--triggering_env", type=str, default="ci") + parser.add_argument("--github_token", type=str, default=PERFORMANCE_GH_TOKEN) + parser.add_argument("--pull-request", type=str, default=None, nargs="?", const="") + parser.add_argument("--deployment_name", type=str, default="oss-standalone") + parser.add_argument("--deployment_type", type=str, default="oss-standalone") + parser.add_argument("--baseline_deployment_name", type=str, default="") + parser.add_argument("--comparison_deployment_name", type=str, default="") + parser.add_argument("--metric_name", type=str, default="ALL_STATS.Totals.Ops/sec") + parser.add_argument( + "--running_platform", type=str, default="intel64-ubuntu22.04-redis-icx1" + ) + parser.add_argument("--extra-filter", type=str, default=None) + parser.add_argument( + "--last_n", + type=int, + default=-1, + help="Use the last N samples for each time-serie. by default will use all available values", + ) + parser.add_argument( + "--last_n_baseline", + type=int, + default=7, + help="Use the last N samples for each time-serie. by default will use last 7 available values", + ) + parser.add_argument( + "--last_n_comparison", + type=int, + default=1, + help="Use the last N samples for each time-serie. by default will use last value only", + ) + parser.add_argument( + "--from-date", + type=lambda s: datetime.datetime.strptime(s, "%Y-%m-%d"), + default=START_TIME_LAST_SIX_MONTHS_UTC, + ) + parser.add_argument( + "--to-date", + type=lambda s: datetime.datetime.strptime(s, "%Y-%m-%d"), + default=START_TIME_NOW_UTC, + ) + parser.add_argument( + "--metric_mode", + type=str, + default="higher-better", + help="either 'lower-better' or 'higher-better'", + ) + parser.add_argument("--baseline-branch", type=str, default=None, required=False) + parser.add_argument("--baseline-tag", type=str, default=None, required=False) + parser.add_argument("--comparison-branch", type=str, default=None, required=False) + parser.add_argument("--comparison-tag", type=str, default=None, required=False) + parser.add_argument("--print-regressions-only", type=bool, default=False) + parser.add_argument("--print-improvements-only", type=bool, default=False) + parser.add_argument("--skip-unstable", type=bool, default=False) + parser.add_argument("--verbose", type=bool, default=False) + parser.add_argument("--simple-table", type=bool, default=False) + parser.add_argument("--use_metric_context_path", type=bool, default=False) + parser.add_argument("--testname_regex", type=str, default=".*", required=False) + parser.add_argument( + "--regressions-percent-lower-limit", + type=float, + default=5.0, + help="Only consider regressions with a percentage over the defined limit. (0-100)", + ) + parser.add_argument( + "--redistimeseries_host", type=str, default="benchmarks.redislabs.com" + ) + parser.add_argument("--redistimeseries_port", type=int, default=12011) + parser.add_argument("--redistimeseries_pass", type=str, default=None) + parser.add_argument("--redistimeseries_user", type=str, default=None) + parser.add_argument( + "--from_timestamp", + default=None, + help="The minimum period to use for the the value fetching", + ) + parser.add_argument("--to_timestamp", default=None) + + parser.add_argument( + "--grafana_base_dashboard", + type=str, + default="https://benchmarksrediscom.grafana.net/d/", + ) + parser.add_argument( + "--auto-approve", + required=False, + default=False, + action="store_true", + help="Skip interactive approval of changes to github before applying.", + ) + return parser diff --git a/redis_benchmarks_specification/__compare__/compare.py b/redis_benchmarks_specification/__compare__/compare.py new file mode 100644 index 0000000..e6c8712 --- /dev/null +++ b/redis_benchmarks_specification/__compare__/compare.py @@ -0,0 +1,1153 @@ +# BSD 3-Clause License +# +# Copyright (c) 2021., Redis Labs Modules +# All rights reserved. +# +import datetime +import logging +import re +import pandas as pd +import redis +import yaml +from pytablewriter import MarkdownTableWriter +import humanize +import datetime as dt +import os +from tqdm import tqdm +from github import Github +from slack_sdk.webhook import WebhookClient +import argparse +from redis_benchmarks_specification.__compare__.args import create_compare_arguments + + +from redis_benchmarks_specification.__common__.package import ( + get_version_string, + populate_with_poetry_data, +) + + +WH_TOKEN = os.getenv("PERFORMANCE_WH_TOKEN", None) + +LOG_LEVEL = logging.DEBUG +if os.getenv("VERBOSE", "0") == "0": + LOG_LEVEL = logging.INFO +LOG_FORMAT = "%(asctime)s %(levelname)-4s %(message)s" +LOG_DATEFMT = "%Y-%m-%d %H:%M:%S" + + +def get_overall_dashboard_keynames( + tf_github_org, + tf_github_repo, + tf_triggering_env, + build_variant_name=None, + running_platform=None, + test_name=None, +): + build_variant_str = "" + if build_variant_name is not None: + build_variant_str = "/{}".format(build_variant_name) + running_platform_str = "" + if running_platform is not None: + running_platform_str = "/{}".format(running_platform) + sprefix = ( + "ci.benchmarks.redislabs/" + + "{triggering_env}/{github_org}/{github_repo}".format( + triggering_env=tf_triggering_env, + github_org=tf_github_org, + github_repo=tf_github_repo, + ) + ) + testcases_setname = "{}:testcases".format(sprefix) + deployment_name_setname = "{}:deployment_names".format(sprefix) + project_archs_setname = "{}:archs".format(sprefix) + project_oss_setname = "{}:oss".format(sprefix) + project_branches_setname = "{}:branches".format(sprefix) + project_versions_setname = "{}:versions".format(sprefix) + project_compilers_setname = "{}:compilers".format(sprefix) + running_platforms_setname = "{}:platforms".format(sprefix) + build_variant_setname = "{}:build_variants".format(sprefix) + build_variant_prefix = "{sprefix}{build_variant_str}".format( + sprefix=sprefix, + build_variant_str=build_variant_str, + ) + prefix = "{build_variant_prefix}{running_platform_str}".format( + build_variant_prefix=build_variant_prefix, + running_platform_str=running_platform_str, + ) + tsname_project_total_success = "{}:total_success".format( + prefix, + ) + tsname_project_total_failures = "{}:total_failures".format( + prefix, + ) + testcases_metric_context_path_setname = "" + if test_name is not None: + testcases_metric_context_path_setname = ( + "{testcases_setname}:metric_context_path:{test_name}".format( + testcases_setname=testcases_setname, test_name=test_name + ) + ) + testcases_and_metric_context_path_setname = ( + "{testcases_setname}_AND_metric_context_path".format( + testcases_setname=testcases_setname + ) + ) + return ( + prefix, + testcases_setname, + deployment_name_setname, + tsname_project_total_failures, + tsname_project_total_success, + running_platforms_setname, + build_variant_setname, + testcases_metric_context_path_setname, + testcases_and_metric_context_path_setname, + project_archs_setname, + project_oss_setname, + project_branches_setname, + project_versions_setname, + project_compilers_setname, + ) + + +def get_start_time_vars(start_time=None): + if start_time is None: + start_time = dt.datetime.utcnow() + start_time_ms = int((start_time - dt.datetime(1970, 1, 1)).total_seconds() * 1000) + start_time_str = start_time.strftime("%Y-%m-%d-%H-%M-%S") + return start_time, start_time_ms, start_time_str + + +def get_project_compare_zsets(triggering_env, org, repo): + return "ci.benchmarks.redislabs/{}/{}/{}:compare:pull_requests:zset".format( + triggering_env, org, repo + ) + + +def compare_command_logic(args, project_name, project_version): + + logger = logging.getLogger() + logger.setLevel(LOG_LEVEL) + + # create console handler and set level to debug + ch = logging.StreamHandler() + ch.setLevel(LOG_LEVEL) + + # create formatter + formatter = logging.Formatter(LOG_FORMAT) + + # add formatter to ch + ch.setFormatter(formatter) + + # add ch to logger + logger.addHandler(ch) + + logging.info( + "Using: {project_name} {project_version}".format( + project_name=project_name, project_version=project_version + ) + ) + logging.info( + "Checking connection to RedisTimeSeries with user: {}, host: {}, port: {}".format( + args.redistimeseries_user, + args.redistimeseries_host, + args.redistimeseries_port, + ) + ) + rts = redis.Redis( + host=args.redistimeseries_host, + port=args.redistimeseries_port, + password=args.redistimeseries_pass, + username=args.redistimeseries_user, + ) + rts.ping() + default_baseline_branch = None + default_metrics_str = "" + if args.defaults_filename != "" and os.path.exists(args.defaults_filename): + logging.info( + "Loading configuration from defaults file: {}".format( + args.defaults_filename + ) + ) + with open(args.defaults_filename) as yaml_fd: + defaults_dict = yaml.safe_load(yaml_fd) + if "exporter" in defaults_dict: + exporter_dict = defaults_dict["exporter"] + if "comparison" in exporter_dict: + comparison_dict = exporter_dict["comparison"] + if "metrics" in comparison_dict: + metrics = comparison_dict["metrics"] + logging.info("Detected defaults metrics info. reading metrics") + default_metrics = [] + + for metric in metrics: + if metric.startswith("$."): + metric = metric[2:] + logging.info("Will use metric: {}".format(metric)) + default_metrics.append(metric) + if len(default_metrics) == 1: + default_metrics_str = default_metrics[0] + if len(default_metrics) > 1: + default_metrics_str = "({})".format( + ",".join(default_metrics) + ) + logging.info("Default metrics: {}".format(default_metrics_str)) + + if "baseline-branch" in comparison_dict: + default_baseline_branch = comparison_dict["baseline-branch"] + logging.info( + "Detected baseline branch in defaults file. {}".format( + default_baseline_branch + ) + ) + + tf_github_org = args.github_org + tf_github_repo = args.github_repo + tf_triggering_env = args.triggering_env + if args.baseline_deployment_name != "": + baseline_deployment_name = args.baseline_deployment_name + else: + baseline_deployment_name = args.deployment_name + if args.comparison_deployment_name != "": + comparison_deployment_name = args.comparison_deployment_name + else: + comparison_deployment_name = args.deployment_name + + logging.info( + "Using baseline deployment_name={} and comparison deployment_name={} for the analysis".format( + baseline_deployment_name, + comparison_deployment_name, + ) + ) + from_ts_ms = args.from_timestamp + to_ts_ms = args.to_timestamp + from_date = args.from_date + to_date = args.to_date + baseline_branch = args.baseline_branch + if baseline_branch is None and default_baseline_branch is not None: + logging.info( + "Given --baseline-branch was null using the default baseline branch {}".format( + default_baseline_branch + ) + ) + baseline_branch = default_baseline_branch + comparison_branch = args.comparison_branch + simplify_table = args.simple_table + print_regressions_only = args.print_regressions_only + print_improvements_only = args.print_improvements_only + skip_unstable = args.skip_unstable + baseline_tag = args.baseline_tag + comparison_tag = args.comparison_tag + last_n_baseline = args.last_n + last_n_comparison = args.last_n + if last_n_baseline < 0: + last_n_baseline = args.last_n_baseline + if last_n_comparison < 0: + last_n_comparison = args.last_n_comparison + logging.info("Using last {} samples for baseline analysis".format(last_n_baseline)) + logging.info( + "Using last {} samples for comparison analysis".format(last_n_comparison) + ) + verbose = args.verbose + regressions_percent_lower_limit = args.regressions_percent_lower_limit + metric_name = args.metric_name + if (metric_name is None or metric_name == "") and default_metrics_str != "": + logging.info( + "Given --metric_name was null using the default metric names {}".format( + default_metrics_str + ) + ) + metric_name = default_metrics_str + + if metric_name is None: + logging.error( + "You need to provider either " + + " --metric_name or provide a defaults file via --defaults_filename that contains exporter.redistimeseries.comparison.metrics array. Exiting..." + ) + exit(1) + else: + logging.info("Using metric {}".format(metric_name)) + + metric_mode = args.metric_mode + test = args.test + use_metric_context_path = args.use_metric_context_path + github_token = args.github_token + pull_request = args.pull_request + testname_regex = args.testname_regex + auto_approve = args.auto_approve + running_platform = args.running_platform + grafana_base_dashboard = args.grafana_base_dashboard + # using an access token + is_actionable_pr = False + contains_regression_comment = False + regression_comment = None + github_pr = None + # slack related + webhook_notifications_active = False + webhook_client_slack = None + if running_platform is not None: + logging.info( + "Using platform named: {} to do the comparison.\n\n".format( + running_platform + ) + ) + + old_regression_comment_body = "" + if github_token is not None: + logging.info("Detected github token") + g = Github(github_token) + if pull_request is not None and pull_request != "": + pull_request_n = int(pull_request) + github_pr = ( + g.get_user(tf_github_org) + .get_repo(tf_github_repo) + .get_issue(pull_request_n) + ) + comments = github_pr.get_comments() + pr_link = github_pr.html_url + logging.info("Working on github PR already: {}".format(pr_link)) + is_actionable_pr = True + contains_regression_comment, pos = check_regression_comment(comments) + if contains_regression_comment: + regression_comment = comments[pos] + old_regression_comment_body = regression_comment.body + logging.info( + "Already contains regression comment. Link: {}".format( + regression_comment.html_url + ) + ) + if verbose: + logging.info("Printing old regression comment:") + print("".join(["-" for x in range(1, 80)])) + print(regression_comment.body) + print("".join(["-" for x in range(1, 80)])) + else: + logging.info("Does not contain regression comment") + + grafana_dashboards_uids = { + "redisgraph": "SH9_rQYGz", + "redisbloom": "q4-5sRR7k", + "redisearch": "3Ejv2wZnk", + "redisjson": "UErSC0jGk", + "redistimeseries": "2WMw61UGz", + } + uid = None + if tf_github_repo.lower() in grafana_dashboards_uids: + uid = grafana_dashboards_uids[tf_github_repo.lower()] + grafana_link_base = None + if uid is not None: + grafana_link_base = "{}/{}".format(grafana_base_dashboard, uid) + logging.info( + "There is a grafana dashboard for this repo. Base link: {}".format( + grafana_link_base + ) + ) + + ( + detected_regressions, + table_output, + total_improvements, + total_regressions, + total_stable, + total_unstable, + total_comparison_points, + ) = compute_regression_table( + rts, + tf_github_org, + tf_github_repo, + tf_triggering_env, + metric_name, + comparison_branch, + baseline_branch, + baseline_tag, + comparison_tag, + baseline_deployment_name, + comparison_deployment_name, + print_improvements_only, + print_regressions_only, + skip_unstable, + regressions_percent_lower_limit, + simplify_table, + test, + testname_regex, + verbose, + last_n_baseline, + last_n_comparison, + metric_mode, + from_date, + from_ts_ms, + to_date, + to_ts_ms, + use_metric_context_path, + running_platform, + ) + comment_body = "" + if total_comparison_points > 0: + comment_body = "### Automated performance analysis summary\n\n" + comment_body += "This comment was automatically generated given there is performance data available.\n\n" + if running_platform is not None: + comment_body += "Using platform named: {} to do the comparison.\n\n".format( + running_platform + ) + comparison_summary = "In summary:\n" + if total_stable > 0: + comparison_summary += ( + "- Detected a total of {} stable tests between versions.\n".format( + total_stable, + ) + ) + + if total_unstable > 0: + comparison_summary += ( + "- Detected a total of {} highly unstable benchmarks.\n".format( + total_unstable + ) + ) + if total_improvements > 0: + comparison_summary += "- Detected a total of {} improvements above the improvement water line.\n".format( + total_improvements + ) + if total_regressions > 0: + comparison_summary += "- Detected a total of {} regressions bellow the regression water line {}.\n".format( + total_regressions, args.regressions_percent_lower_limit + ) + + comment_body += comparison_summary + comment_body += "\n" + + if grafana_link_base is not None: + grafana_link = "{}/".format(grafana_link_base) + if baseline_tag is not None and comparison_tag is not None: + grafana_link += "?var-version={}&var-version={}".format( + baseline_tag, comparison_tag + ) + if baseline_branch is not None and comparison_branch is not None: + grafana_link += "?var-branch={}&var-branch={}".format( + baseline_branch, comparison_branch + ) + comment_body += "You can check a comparison in detail via the [grafana link]({})".format( + grafana_link + ) + + comment_body += "\n\n##" + table_output + print(comment_body) + + if is_actionable_pr: + zset_project_pull_request = get_project_compare_zsets( + tf_triggering_env, + tf_github_org, + tf_github_repo, + ) + logging.info( + "Populating the pull request performance ZSETs: {} with branch {}".format( + zset_project_pull_request, comparison_branch + ) + ) + _, start_time_ms, _ = get_start_time_vars() + res = rts.zadd( + zset_project_pull_request, + {comparison_branch: start_time_ms}, + ) + logging.info( + "Result of Populating the pull request performance ZSETs: {} with branch {}: {}".format( + zset_project_pull_request, comparison_branch, res + ) + ) + user_input = "n" + html_url = "n/a" + regression_count = len(detected_regressions) + ( + baseline_str, + by_str_baseline, + comparison_str, + by_str_comparison, + ) = get_by_strings( + baseline_branch, + comparison_branch, + baseline_tag, + comparison_tag, + ) + + if contains_regression_comment: + same_comment = False + if comment_body == old_regression_comment_body: + logging.info( + "The old regression comment is the same as the new comment. skipping..." + ) + same_comment = True + else: + logging.info( + "The old regression comment is different from the new comment. updating it..." + ) + comment_body_arr = comment_body.split("\n") + old_regression_comment_body_arr = old_regression_comment_body.split( + "\n" + ) + if verbose: + DF = [ + x + for x in comment_body_arr + if x not in old_regression_comment_body_arr + ] + print("---------------------") + print(DF) + print("---------------------") + if same_comment is False: + if auto_approve: + print("auto approving...") + else: + user_input = input( + "Do you wish to update the comment {} (y/n): ".format( + regression_comment.html_url + ) + ) + if user_input.lower() == "y" or auto_approve: + print("Updating comment {}".format(regression_comment.html_url)) + regression_comment.edit(comment_body) + html_url = regression_comment.html_url + print( + "Updated comment. Access it via {}".format( + regression_comment.html_url + ) + ) + + else: + if auto_approve: + print("auto approving...") + else: + user_input = input( + "Do you wish to add a comment in {} (y/n): ".format(pr_link) + ) + if user_input.lower() == "y" or auto_approve: + print("creating an comment in PR {}".format(pr_link)) + regression_comment = github_pr.create_comment(comment_body) + html_url = regression_comment.html_url + print("created comment. Access it via {}".format(html_url)) + + else: + logging.error("There was no comparison points to produce a table...") + return ( + detected_regressions, + comment_body, + total_improvements, + total_regressions, + total_stable, + total_unstable, + total_comparison_points, + ) + + +def check_regression_comment(comments): + res = False + pos = -1 + for n, comment in enumerate(comments): + body = comment.body + if "Comparison between" in body and "Time Period from" in body: + res = True + pos = n + return res, pos + + +def compute_regression_table( + rts, + tf_github_org, + tf_github_repo, + tf_triggering_env, + metric_name, + comparison_branch, + baseline_branch="master", + baseline_tag=None, + comparison_tag=None, + baseline_deployment_name="oss-standalone", + comparison_deployment_name="oss-standalone", + print_improvements_only=False, + print_regressions_only=False, + skip_unstable=False, + regressions_percent_lower_limit=5.0, + simplify_table=False, + test="", + testname_regex=".*", + verbose=False, + last_n_baseline=-1, + last_n_comparison=-1, + metric_mode="higher-better", + from_date=None, + from_ts_ms=None, + to_date=None, + to_ts_ms=None, + use_metric_context_path=None, + running_platform=None, +): + START_TIME_NOW_UTC, _, _ = get_start_time_vars() + START_TIME_LAST_MONTH_UTC = START_TIME_NOW_UTC - datetime.timedelta(days=31) + if from_date is None: + from_date = START_TIME_LAST_MONTH_UTC + if to_date is None: + to_date = START_TIME_NOW_UTC + if from_ts_ms is None: + from_ts_ms = int(from_date.timestamp() * 1000) + if to_ts_ms is None: + to_ts_ms = int(to_date.timestamp() * 1000) + from_human_str = humanize.naturaltime( + dt.datetime.utcfromtimestamp(from_ts_ms / 1000) + ) + to_human_str = humanize.naturaltime(dt.datetime.utcfromtimestamp(to_ts_ms / 1000)) + logging.info( + "Using a time-delta from {} to {}".format(from_human_str, to_human_str) + ) + baseline_str, by_str_baseline, comparison_str, by_str_comparison = get_by_strings( + baseline_branch, + comparison_branch, + baseline_tag, + comparison_tag, + ) + ( + prefix, + testcases_setname, + _, + tsname_project_total_failures, + tsname_project_total_success, + _, + _, + _, + testcases_metric_context_path_setname, + _, + _, + _, + _, + _, + ) = get_overall_dashboard_keynames(tf_github_org, tf_github_repo, tf_triggering_env) + test_names = [] + used_key = testcases_setname + test_filter = "test_name" + if use_metric_context_path: + test_filter = "test_name:metric_context_path" + used_key = testcases_metric_context_path_setname + tags_regex_string = re.compile(testname_regex) + if test != "": + test_names = test.split(",") + logging.info("Using test name {}".format(test_names)) + else: + test_names = get_test_names_from_db( + rts, tags_regex_string, test_names, used_key + ) + ( + detected_regressions, + table, + total_improvements, + total_regressions, + total_stable, + total_unstable, + total_comparison_points, + ) = from_rts_to_regression_table( + baseline_deployment_name, + comparison_deployment_name, + baseline_str, + comparison_str, + by_str_baseline, + by_str_comparison, + from_ts_ms, + to_ts_ms, + last_n_baseline, + last_n_comparison, + metric_mode, + metric_name, + print_improvements_only, + print_regressions_only, + skip_unstable, + regressions_percent_lower_limit, + rts, + simplify_table, + test_filter, + test_names, + tf_triggering_env, + verbose, + running_platform, + ) + logging.info( + "Printing differential analysis between {} and {}".format( + baseline_str, comparison_str + ) + ) + writer = MarkdownTableWriter( + table_name="Comparison between {} and {}.\n\nTime Period from {}. (environment used: {})\n".format( + baseline_str, + comparison_str, + from_human_str, + baseline_deployment_name, + ), + headers=[ + "Test Case", + "Baseline {} (median obs. +- std.dev)".format(baseline_str), + "Comparison {} (median obs. +- std.dev)".format(comparison_str), + "% change ({})".format(metric_mode), + "Note", + ], + value_matrix=table, + ) + table_output = "" + + from io import StringIO + import sys + + old_stdout = sys.stdout + sys.stdout = mystdout = StringIO() + + writer.dump(mystdout, False) + + sys.stdout = old_stdout + + table_output = mystdout.getvalue() + + return ( + detected_regressions, + table_output, + total_improvements, + total_regressions, + total_stable, + total_unstable, + total_comparison_points, + ) + + +def get_by_strings( + baseline_branch, + comparison_branch, + baseline_tag, + comparison_tag, +): + baseline_covered = False + comparison_covered = False + by_str_baseline = "" + by_str_comparison = "" + baseline_str = "" + comparison_str = "" + if baseline_branch is not None: + baseline_covered = True + by_str_baseline = "branch" + baseline_str = baseline_branch + if comparison_branch is not None: + comparison_covered = True + by_str_comparison = "branch" + comparison_str = comparison_branch + + if baseline_tag is not None: + if comparison_covered: + logging.error( + "--baseline-branch and --baseline-tag are mutually exclusive. Pick one..." + ) + exit(1) + baseline_covered = True + by_str_baseline = "version" + baseline_str = baseline_tag + + if comparison_tag is not None: + # check if we had already covered comparison + if comparison_covered: + logging.error( + "--comparison-branch and --comparison-tag are mutually exclusive. Pick one..." + ) + exit(1) + comparison_covered = True + by_str_comparison = "version" + comparison_str = comparison_tag + + if baseline_covered is False: + logging.error( + "You need to provider either " + "( --baseline-branch or --baseline-tag ) " + ) + exit(1) + if comparison_covered is False: + logging.error( + "You need to provider either " + + "( --comparison-branch or --comparison-tag ) " + ) + exit(1) + return baseline_str, by_str_baseline, comparison_str, by_str_comparison + + +def from_rts_to_regression_table( + baseline_deployment_name, + comparison_deployment_name, + baseline_str, + comparison_str, + by_str_baseline, + by_str_comparison, + from_ts_ms, + to_ts_ms, + last_n_baseline, + last_n_comparison, + metric_mode, + metric_name, + print_improvements_only, + print_regressions_only, + skip_unstable, + regressions_percent_lower_limit, + rts, + simplify_table, + test_filter, + test_names, + tf_triggering_env, + verbose, + running_platform=None, +): + print_all = print_regressions_only is False and print_improvements_only is False + table = [] + detected_regressions = [] + total_improvements = 0 + total_stable = 0 + total_unstable = 0 + total_regressions = 0 + total_comparison_points = 0 + noise_waterline = 3 + progress = tqdm(unit="benchmark time-series", total=len(test_names)) + at_comparison = 0 + for test_name in test_names: + multi_value_baseline = check_multi_value_filter(baseline_str) + multi_value_comparison = check_multi_value_filter(comparison_str) + + filters_baseline = [ + "{}={}".format(by_str_baseline, baseline_str), + "metric={}".format(metric_name), + "{}={}".format(test_filter, test_name), + "deployment_name={}".format(baseline_deployment_name), + "triggering_env={}".format(tf_triggering_env), + ] + if running_platform is not None: + filters_baseline.append("running_platform={}".format(running_platform)) + filters_comparison = [ + "{}={}".format(by_str_comparison, comparison_str), + "metric={}".format(metric_name), + "{}={}".format(test_filter, test_name), + "deployment_name={}".format(comparison_deployment_name), + "triggering_env={}".format(tf_triggering_env), + ] + if running_platform is not None: + filters_comparison.append("running_platform={}".format(running_platform)) + baseline_timeseries = rts.ts().queryindex(filters_baseline) + comparison_timeseries = rts.ts().queryindex(filters_comparison) + + # avoiding target time-series + comparison_timeseries = [x for x in comparison_timeseries if "target" not in x] + baseline_timeseries = [x for x in baseline_timeseries if "target" not in x] + progress.update() + if verbose: + logging.info( + "Baseline timeseries for {}: {}. test={}".format( + baseline_str, len(baseline_timeseries), test_name + ) + ) + logging.info( + "Comparison timeseries for {}: {}. test={}".format( + comparison_str, len(comparison_timeseries), test_name + ) + ) + if len(baseline_timeseries) > 1 and multi_value_baseline is False: + baseline_timeseries = get_only_Totals(baseline_timeseries) + + if len(baseline_timeseries) != 1 and multi_value_baseline is False: + if verbose: + logging.warning( + "Skipping this test given the value of timeseries !=1. Baseline timeseries {}".format( + len(baseline_timeseries) + ) + ) + if len(baseline_timeseries) > 1: + logging.warning( + "\t\tTime-series: {}".format(", ".join(baseline_timeseries)) + ) + continue + + if len(comparison_timeseries) > 1 and multi_value_comparison is False: + comparison_timeseries = get_only_Totals(comparison_timeseries) + if len(comparison_timeseries) != 1 and multi_value_comparison is False: + if verbose: + logging.warning( + "Comparison timeseries {}".format(len(comparison_timeseries)) + ) + continue + + baseline_v = "N/A" + comparison_v = "N/A" + baseline_values = [] + baseline_datapoints = [] + comparison_values = [] + comparison_datapoints = [] + percentage_change = 0.0 + baseline_v_str = "N/A" + comparison_v_str = "N/A" + largest_variance = 0 + baseline_pct_change = "N/A" + comparison_pct_change = "N/A" + + note = "" + try: + for ts_name_baseline in baseline_timeseries: + datapoints_inner = rts.ts().revrange( + ts_name_baseline, from_ts_ms, to_ts_ms + ) + baseline_datapoints.extend(datapoints_inner) + ( + baseline_pct_change, + baseline_v, + largest_variance, + ) = get_v_pct_change_and_largest_var( + baseline_datapoints, + baseline_pct_change, + baseline_v, + baseline_values, + largest_variance, + last_n_baseline, + verbose, + ) + for ts_name_comparison in comparison_timeseries: + datapoints_inner = rts.ts().revrange( + ts_name_comparison, from_ts_ms, to_ts_ms + ) + comparison_datapoints.extend(datapoints_inner) + + ( + comparison_pct_change, + comparison_v, + largest_variance, + ) = get_v_pct_change_and_largest_var( + comparison_datapoints, + comparison_pct_change, + comparison_v, + comparison_values, + largest_variance, + last_n_comparison, + verbose, + ) + + waterline = regressions_percent_lower_limit + if regressions_percent_lower_limit < largest_variance: + note = "waterline={:.1f}%.".format(largest_variance) + waterline = largest_variance + + except redis.exceptions.ResponseError: + pass + except ZeroDivisionError as e: + logging.error("Detected a ZeroDivisionError. {}".format(e.__str__())) + pass + unstable = False + if baseline_v != "N/A" and comparison_v != "N/A": + if comparison_pct_change > 10.0 or baseline_pct_change > 10.0: + note = "UNSTABLE (very high variance)" + unstable = True + + baseline_v_str = prepare_value_str( + baseline_pct_change, baseline_v, baseline_values, simplify_table + ) + comparison_v_str = prepare_value_str( + comparison_pct_change, comparison_v, comparison_values, simplify_table + ) + + if metric_mode == "higher-better": + percentage_change = ( + float(comparison_v) / float(baseline_v) - 1 + ) * 100.0 + else: + # lower-better + percentage_change = ( + float(baseline_v) / float(comparison_v) - 1 + ) * 100.0 + if baseline_v != "N/A" or comparison_v != "N/A": + detected_regression = False + detected_improvement = False + if percentage_change < 0.0 and not unstable: + if -waterline >= percentage_change: + detected_regression = True + total_regressions = total_regressions + 1 + note = note + " REGRESSION" + detected_regressions.append(test_name) + elif percentage_change < -noise_waterline: + if simplify_table is False: + note = note + " potential REGRESSION" + else: + if simplify_table is False: + note = note + " No Change" + + if percentage_change > 0.0 and not unstable: + if percentage_change > waterline: + detected_improvement = True + total_improvements = total_improvements + 1 + note = note + " IMPROVEMENT" + elif percentage_change > noise_waterline: + if simplify_table is False: + note = note + " potential IMPROVEMENT" + else: + if simplify_table is False: + note = note + " No Change" + + if ( + detected_improvement is False + and detected_regression is False + and not unstable + ): + total_stable = total_stable + 1 + + if unstable: + total_unstable += 1 + + should_add_line = False + if print_regressions_only and detected_regression: + should_add_line = True + if print_improvements_only and detected_improvement: + should_add_line = True + if print_all: + should_add_line = True + if unstable and skip_unstable: + should_add_line = False + + if should_add_line: + total_comparison_points = total_comparison_points + 1 + add_line( + baseline_v_str, + comparison_v_str, + note, + percentage_change, + table, + test_name, + ) + return ( + detected_regressions, + table, + total_improvements, + total_regressions, + total_stable, + total_unstable, + total_comparison_points, + ) + + +def get_only_Totals(baseline_timeseries): + logging.warning("\t\tTime-series: {}".format(", ".join(baseline_timeseries))) + logging.info("Checking if Totals will reduce timeseries.") + new_base = [] + for ts_name in baseline_timeseries: + if "Totals" in ts_name: + new_base.append(ts_name) + baseline_timeseries = new_base + return baseline_timeseries + + +def check_multi_value_filter(baseline_str): + multi_value_baseline = False + if "(" in baseline_str and "," in baseline_str and ")" in baseline_str: + multi_value_baseline = True + return multi_value_baseline + + +def prepare_value_str(baseline_pct_change, baseline_v, baseline_values, simplify_table): + if baseline_v < 1.0: + baseline_v_str = " {:.2f}".format(baseline_v) + elif baseline_v < 10.0: + baseline_v_str = " {:.1f}".format(baseline_v) + else: + baseline_v_str = " {:.0f}".format(baseline_v) + stamp_b = "" + if baseline_pct_change > 10.0: + stamp_b = "UNSTABLE " + if len(baseline_values) > 1: + baseline_v_str += " +- {:.1f}% {}".format( + baseline_pct_change, + stamp_b, + ) + if simplify_table is False and len(baseline_values) > 1: + baseline_v_str += "({} datapoints)".format(len(baseline_values)) + return baseline_v_str + + +def get_test_names_from_db(rts, tags_regex_string, test_names, used_key): + try: + test_names = rts.smembers(used_key) + test_names = list(test_names) + test_names.sort() + final_test_names = [] + for test_name in test_names: + test_name = test_name.decode() + match_obj = re.search(tags_regex_string, test_name) + if match_obj is not None: + final_test_names.append(test_name) + test_names = final_test_names + + except redis.exceptions.ResponseError as e: + logging.warning( + "Error while trying to fetch test cases set (key={}) {}. ".format( + used_key, e.__str__() + ) + ) + pass + logging.warning( + "Based on test-cases set (key={}) we have {} comparison points. ".format( + used_key, len(test_names) + ) + ) + return test_names + + +def add_line( + baseline_v_str, + comparison_v_str, + note, + percentage_change, + table, + test_name, +): + percentage_change_str = "{:.1f}% ".format(percentage_change) + table.append( + [ + test_name, + baseline_v_str, + comparison_v_str, + percentage_change_str, + note.strip(), + ] + ) + + +def get_v_pct_change_and_largest_var( + comparison_datapoints, + comparison_pct_change, + comparison_v, + comparison_values, + largest_variance, + last_n=-1, + verbose=False, +): + comparison_nsamples = len(comparison_datapoints) + if comparison_nsamples > 0: + _, comparison_v = comparison_datapoints[0] + for tuple in comparison_datapoints: + if last_n < 0 or (last_n > 0 and len(comparison_values) < last_n): + comparison_values.append(tuple[1]) + comparison_df = pd.DataFrame(comparison_values) + comparison_median = float(comparison_df.median()) + comparison_v = comparison_median + comparison_std = float(comparison_df.std()) + if verbose: + logging.info( + "comparison_datapoints: {} value: {}; std-dev: {}; median: {}".format( + comparison_datapoints, + comparison_v, + comparison_std, + comparison_median, + ) + ) + comparison_pct_change = (comparison_std / comparison_median) * 100.0 + if comparison_pct_change > largest_variance: + largest_variance = comparison_pct_change + return comparison_pct_change, comparison_v, largest_variance + + +def main(): + _, _, project_version = populate_with_poetry_data() + project_name = "redis-benchmarks-spec-cli" + parser = argparse.ArgumentParser( + description=get_version_string(project_name, project_version), + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser = create_compare_arguments(parser) + args = parser.parse_args() + compare_command_logic(args, project_name, project_version) diff --git a/redis_benchmarks_specification/__runner__/runner.py b/redis_benchmarks_specification/__runner__/runner.py index 57acc55..841a032 100644 --- a/redis_benchmarks_specification/__runner__/runner.py +++ b/redis_benchmarks_specification/__runner__/runner.py @@ -358,6 +358,7 @@ def process_self_contained_coordinator_stream( preserve_temporary_client_dirs=False, resp_version=None, override_memtier_test_time=0, + used_memory_check_fail=False, ): def delete_temporary_files( temporary_dir_client, full_result_path, benchmark_tool_global @@ -587,20 +588,20 @@ def delete_temporary_files( priority_upper_limit, ) ) - - if "dataset" in benchmark_config["dbconfig"]: - if args.run_tests_with_dataset is False: - logging.warning( - "Skipping test {} giving it implies dataset preload".format( - test_name + if "dbconfig" in benchmark_config: + if "dataset" in benchmark_config["dbconfig"]: + if args.run_tests_with_dataset is False: + logging.warning( + "Skipping test {} giving it implies dataset preload".format( + test_name + ) ) - ) - delete_temporary_files( - temporary_dir_client=temporary_dir_client, - full_result_path=None, - benchmark_tool_global=benchmark_tool_global, - ) - continue + delete_temporary_files( + temporary_dir_client=temporary_dir_client, + full_result_path=None, + benchmark_tool_global=benchmark_tool_global, + ) + continue if dry_run is True: dry_run_count = dry_run_count + 1 @@ -610,38 +611,38 @@ def delete_temporary_files( benchmark_tool_global=benchmark_tool_global, ) continue - - if "preload_tool" in benchmark_config["dbconfig"]: - res = data_prepopulation_step( - benchmark_config, - benchmark_tool_workdir, - client_cpuset_cpus, - docker_client, - git_hash, - port, - temporary_dir_client, - test_name, - host, - tls_enabled, - tls_skip_verify, - test_tls_cert, - test_tls_key, - test_tls_cacert, - resp_version, - args.benchmark_local_install, - password, - oss_cluster_api_enabled, - ) - if res is False: - logging.warning( - "Skipping this test given preload result was false" - ) - delete_temporary_files( - temporary_dir_client=temporary_dir_client, - full_result_path=None, - benchmark_tool_global=benchmark_tool_global, + if "dbconfig" in benchmark_config: + if "preload_tool" in benchmark_config["dbconfig"]: + res = data_prepopulation_step( + benchmark_config, + benchmark_tool_workdir, + client_cpuset_cpus, + docker_client, + git_hash, + port, + temporary_dir_client, + test_name, + host, + tls_enabled, + tls_skip_verify, + test_tls_cert, + test_tls_key, + test_tls_cacert, + resp_version, + args.benchmark_local_install, + password, + oss_cluster_api_enabled, ) - continue + if res is False: + logging.warning( + "Skipping this test given preload result was false" + ) + delete_temporary_files( + temporary_dir_client=temporary_dir_client, + full_result_path=None, + benchmark_tool_global=benchmark_tool_global, + ) + continue execute_init_commands( benchmark_config, r, dbconfig_keyname="dbconfig" ) @@ -651,6 +652,7 @@ def delete_temporary_files( benchmark_required_memory, redis_conns, "start of benchmark", + used_memory_check_fail, ) logging.info("Checking if there is a keyspace check being enforced") @@ -842,6 +844,7 @@ def delete_temporary_files( benchmark_required_memory, redis_conns, "end of benchmark", + used_memory_check_fail, ) if args.flushall_on_every_test_end: @@ -1003,22 +1006,33 @@ def get_maxmemory(r): def get_benchmark_required_memory(benchmark_config): benchmark_required_memory = 0 - if "resources" in benchmark_config["dbconfig"]: - resources = benchmark_config["dbconfig"]["resources"] - if "requests" in resources: - resources_requests = benchmark_config["dbconfig"]["resources"]["requests"] - if "memory" in resources_requests: - benchmark_required_memory = resources_requests["memory"] - benchmark_required_memory = int(parse_size(benchmark_required_memory)) - logging.info( - "Benchmark required memory: {} Bytes".format( - benchmark_required_memory + if "dbconfig" in benchmark_config: + if "resources" in benchmark_config["dbconfig"]: + resources = benchmark_config["dbconfig"]["resources"] + if "requests" in resources: + resources_requests = benchmark_config["dbconfig"]["resources"][ + "requests" + ] + if "memory" in resources_requests: + benchmark_required_memory = resources_requests["memory"] + benchmark_required_memory = int( + parse_size(benchmark_required_memory) + ) + logging.info( + "Benchmark required memory: {} Bytes".format( + benchmark_required_memory + ) ) - ) return benchmark_required_memory -def used_memory_check(test_name, benchmark_required_memory, redis_conns, stage): +def used_memory_check( + test_name, + benchmark_required_memory, + redis_conns, + stage, + used_memory_check_fail=False, +): used_memory = 0 for conn in redis_conns: used_memory = used_memory + conn.info("memory")["used_memory"] @@ -1030,7 +1044,8 @@ def used_memory_check(test_name, benchmark_required_memory, redis_conns, stage): test_name, benchmark_required_memory, used_memory_gb ) ) - exit(1) + if used_memory_check_fail: + exit(1) def cp_to_workdir(benchmark_tool_workdir, srcfile): diff --git a/redis_benchmarks_specification/test-suites/create-re-string.py b/redis_benchmarks_specification/test-suites/create-re-string.py new file mode 100644 index 0000000..470a602 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/create-re-string.py @@ -0,0 +1,286 @@ +tests = [ + { + "name": "memtier_benchmark-100Kkeys-string-setget50c-20KiB", + "precommand": "--data-size 20000 --ratio 1:0 --key-pattern P:P -c 25 -t 2 --hide-histogram --key-minimum=1 --key-maximum=100000 -n allkeys", + "check": {"keyspacelen": 100000}, + "command": "--data-size 20000 --ratio 1:10 --key-pattern R:R -c 25 -t 2 --hide-histogram --key-minimum=1 --key-maximum=100000 --test-time 180", + "kpis": {}, + "tested-commands": ["setget-20k"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-100Kkeys-string-setget50c-20KiB-pipeline-10", + "precommand": "--data-size 20000 --ratio 1:0 --key-pattern P:P -c 25 -t 2 --hide-histogram --key-minimum=1 --key-maximum=100000 -n allkeys", + "check": {"keyspacelen": 100000}, + "command": "--pipeline 10 --data-size 20000 --ratio 1:10 --key-pattern R:R -c 25 -t 2 --key-minimum=1 --key-maximum=100000 --hide-histogram --test-time 180", + "kpis": {}, + "tested-commands": ["setget-20k"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-100Kkeys-string-setget200c-20KiB", + "precommand": "--data-size 20000 --ratio 1:0 --key-pattern P:P -c 50 -t 4 --hide-histogram --key-minimum=1 --key-maximum=100000 -n allkeys", + "check": {"keyspacelen": 100000}, + "command": "--data-size 20000 --ratio 1:10 --key-pattern R:R -c 50 -t 4 --hide-histogram --key-minimum=1 --key-maximum=100000 --test-time 180", + "kpis": {}, + "tested-commands": ["setget-20k"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-100Kkeys-string-setget200c-20KiB-pipeline-10", + "precommand": "--data-size 20000 --ratio 1:0 --key-pattern P:P -c 50 -t 4 --hide-histogram --key-minimum=1 --key-maximum=100000 -n allkeys", + "check": {"keyspacelen": 100000}, + "command": "--pipeline 10 --data-size 20000 --ratio 1:10 --key-pattern R:R -c 50 -t 4 --key-minimum=1 --key-maximum=100000 --hide-histogram --test-time 180", + "kpis": {}, + "tested-commands": ["setget-20k"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-100Kkeys-load-string50c-with-20KiB-values", + "precommand": "", + "check": {"keyspacelen": 0}, + "command": "--data-size 20000 --ratio 1:0 --key-pattern P:P --key-minimum=1 --key-maximum=100000 --test-time 180 -c 25 -t 2 --hide-histogram", + "kpis": {}, + "tested-commands": ["set-20k"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-100Kkeys-load-string50c-with-20KiB-values-pipeline-10", + "precommand": "", + "check": {"keyspacelen": 0}, + "command": "--pipeline 10 --data-size 20000 --ratio 1:0 --key-pattern P:P --key-minimum=1 --key-maximum=100000 --test-time 180 -c 25 -t 2 --hide-histogram", + "kpis": {}, + "tested-commands": ["set-20k"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-100Kkeys-load-string200c-with-20KiB-values", + "precommand": "", + "check": {"keyspacelen": 0}, + "command": "--data-size 20000 --ratio 1:0 --key-pattern P:P --key-minimum=1 --key-maximum=100000 --test-time 180 -c 50 -t 4 --hide-histogram", + "kpis": {}, + "tested-commands": ["set-20k"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-100Kkeys-load-string200c-with-20KiB-values-pipeline-10", + "precommand": "", + "check": {"keyspacelen": 0}, + "command": "--pipeline 10 --data-size 20000 --ratio 1:0 --key-pattern P:P --key-minimum=1 --key-maximum=100000 --test-time 180 -c 50 -t 4 --hide-histogram", + "kpis": {}, + "tested-commands": ["set-20k"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-1Mkeys-string-setget50c-100B", + "precommand": "--data-size 100 --ratio 1:0 --key-pattern P:P -c 25 -t 2 --hide-histogram --key-minimum=1 --key-maximum=1000000 -n allkeys", + "check": {"keyspacelen": 1000000}, + "command": "--data-size 100 --ratio 1:10 --key-pattern R:R -c 25 -t 2 --hide-histogram --test-time 180", + "kpis": {}, + "tested-commands": ["setget"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-1Mkeys-string-setget50c-1KiB", + "precommand": "--data-size 1000 --ratio 1:0 --key-pattern P:P -c 25 -t 2 --hide-histogram --key-minimum=1 --key-maximum=1000000 -n allkeys", + "check": {"keyspacelen": 1000000}, + "command": "--data-size 1000 --ratio 1:10 --key-pattern R:R -c 25 -t 2 --hide-histogram --test-time 180", + "kpis": {}, + "tested-commands": ["setget"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-1Mkeys-string-setget50c-100B-pipeline-10", + "precommand": "--data-size 100 --ratio 1:0 --key-pattern P:P -c 25 -t 2 --hide-histogram --key-minimum=1 --key-maximum=1000000 -n allkeys", + "check": {"keyspacelen": 1000000}, + "command": "--pipeline 10 --data-size 100 --ratio 1:10 --key-pattern R:R -c 25 -t 2 --hide-histogram --test-time 180", + "kpis": {}, + "tested-commands": ["setget"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-1Mkeys-string-setget50c-1KiB-pipeline-10", + "precommand": "--data-size 1000 --ratio 1:0 --key-pattern P:P -c 25 -t 2 --hide-histogram --key-minimum=1 --key-maximum=1000000 -n allkeys", + "check": {"keyspacelen": 1000000}, + "command": "--pipeline 10 --data-size 1000 --ratio 1:10 --key-pattern R:R -c 25 -t 2 --hide-histogram --test-time 180", + "kpis": {}, + "tested-commands": ["setget"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-1Mkeys-string-setget200c-100B", + "precommand": "--data-size 100 --ratio 1:0 --key-pattern P:P -c 50 -t 4 --hide-histogram --key-minimum=1 --key-maximum=1000000 -n allkeys", + "check": {"keyspacelen": 1000000}, + "command": "--data-size 100 --ratio 1:10 --key-pattern R:R -c 50 -t 4 --hide-histogram --test-time 180", + "kpis": {}, + "tested-commands": ["setget"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-1Mkeys-string-setget200c-1KiB", + "precommand": "--data-size 1000 --ratio 1:0 --key-pattern P:P -c 50 -t 4 --hide-histogram --key-minimum=1 --key-maximum=1000000 -n allkeys", + "check": {"keyspacelen": 1000000}, + "command": "--data-size 1000 --ratio 1:10 --key-pattern R:R -c 50 -t 4 --hide-histogram --test-time 180", + "kpis": {}, + "tested-commands": ["setget"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-1Mkeys-string-setget200c-100B-pipeline-10", + "precommand": "--data-size 100 --ratio 1:0 --key-pattern P:P -c 50 -t 4 --hide-histogram --key-minimum=1 --key-maximum=1000000 -n allkeys", + "check": {"keyspacelen": 1000000}, + "command": "--pipeline 10 --data-size 100 --ratio 1:10 --key-pattern R:R -c 50 -t 4 --hide-histogram --test-time 180", + "kpis": {}, + "tested-commands": ["setget"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-1Mkeys-string-setget200c-1KiB-pipeline-10", + "precommand": "--data-size 1000 --ratio 1:0 --key-pattern P:P -c 50 -t 4 --hide-histogram --key-minimum=1 --key-maximum=1000000 -n allkeys", + "check": {"keyspacelen": 1000000}, + "command": "--pipeline 10 --data-size 1000 --ratio 1:10 --key-pattern R:R -c 50 -t 4 --hide-histogram --test-time 180", + "kpis": {}, + "tested-commands": ["setget"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-1Mkeys-load-string50c-with-100B-values", + "precommand": "", + "check": {"keyspacelen": 0}, + "command": "--data-size 100 --ratio 1:0 --key-pattern P:P --key-minimum=1 --key-maximum 1000000 --test-time 180 -c 25 -t 2 --hide-histogram", + "kpis": {}, + "tested-commands": ["set"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-1Mkeys-load-string50c-with-1KiB-values", + "precommand": "", + "check": {"keyspacelen": 0}, + "command": "--data-size 1000 --ratio 1:0 --key-pattern P:P --key-minimum=1 --key-maximum 1000000 --test-time 180 -c 25 -t 2 --hide-histogram", + "kpis": {}, + "tested-commands": ["set"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-1Mkeys-load-string50c-with-100B-values-pipeline-10", + "precommand": "", + "check": {"keyspacelen": 0}, + "command": "--pipeline 10 --data-size 100 --ratio 1:0 --key-pattern P:P --key-minimum=1 --key-maximum 1000000 --test-time 180 -c 25 -t 2 --hide-histogram", + "kpis": {}, + "tested-commands": ["set"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-1Mkeys-load-string50c-with-1KiB-values-pipeline-10", + "precommand": "", + "check": {"keyspacelen": 0}, + "command": "--pipeline 10 --data-size 1000 --ratio 1:0 --key-pattern P:P --key-minimum=1 --key-maximum 1000000 --test-time 180 -c 25 -t 2 --hide-histogram", + "kpis": {}, + "tested-commands": ["set"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-1Mkeys-load-string200c-with-100B-values", + "precommand": "", + "check": {"keyspacelen": 0}, + "command": "--data-size 100 --ratio 1:0 --key-pattern P:P --key-minimum=1 --key-maximum 1000000 --test-time 180 -c 50 -t 4 --hide-histogram", + "kpis": {}, + "tested-commands": ["set"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-1Mkeys-load-string200c-with-1KiB-values", + "precommand": "", + "check": {"keyspacelen": 0}, + "command": "--data-size 1000 --ratio 1:0 --key-pattern P:P --key-minimum=1 --key-maximum 1000000 --test-time 180 -c 50 -t 4 --hide-histogram", + "kpis": {}, + "tested-commands": ["set"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-1Mkeys-load-string200c-with-100B-values-pipeline-10", + "precommand": "", + "check": {"keyspacelen": 0}, + "command": "--pipeline 10 --data-size 100 --ratio 1:0 --key-pattern P:P --key-minimum=1 --key-maximum 1000000 --test-time 180 -c 50 -t 4 --hide-histogram", + "kpis": {}, + "tested-commands": ["set"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-1Mkeys-load-string200c-with-1KiB-values-pipeline-10", + "precommand": "", + "check": {"keyspacelen": 0}, + "command": "--pipeline 10 --data-size 1000 --ratio 1:0 --key-pattern P:P --key-minimum=1 --key-maximum 1000000 --test-time 180 -c 50 -t 4 --hide-histogram", + "kpis": {}, + "tested-commands": ["set"], + "tested-groups": ["string"], + }, + { + "name": "memtier_benchmark-1Mkeys-string-mget-1KiB", + "precommand": "--data-size 1000 --key-minimum=1 --key-maximum 1000000 -n allkeys --ratio=1:0 --key-pattern P:P --hide-histogram -t 2 -c 100", + "check": {"keyspacelen": 1000000}, + "command": ' --command="MGET __key__ memtier-1 memtier-2 memtier-3 memtier-4 memtier-5 memtier-6 memtier-7 memtier-8 memtier-9" --command-key-pattern=R -c 50 -t 2 --hide-histogram --test-time 180', + "kpis": {}, + "tested-commands": ["mget"], + "tested-groups": ["string"], + }, +] + +print(len(tests)) +re_filenames = [x["name"] for x in tests] +re_test_specs = {} +for x in tests: + re_test_specs[x["name"]] = x + +import yaml +import json +import pathlib + + +defaults_filename = "default.yml" +prefix = "memtier_benchmark-" +test_glob = "memtier_*.yml" +files = pathlib.Path().glob(test_glob) +files = [str(x) for x in files] + +base_yaml = yaml.safe_load(open("memtier_benchmark-1Mkeys-string-get-1KiB.yml")) +del base_yaml["description"] +# del base_yaml["clientconfig"]["resources"] +# del base_yaml["build-variants"] +# del base_yaml["priority"] +# del base_yaml["redis-topologies"] +# del base_yaml["tested-commands"] +# del base_yaml["version"] +# del base_yaml["tested-groups"] +# + + +# +# for file in files: +# if defaults_filename in file: +# files.remove(file) + +for re_file in re_filenames: + re_spec = re_test_specs[re_file] + precommand = "" + if "precommand" in re_spec: + precommand = re_spec["precommand"] + + command = "" + if "command" in re_spec: + command = re_spec["command"] + if "dbconfig" in base_yaml: + del base_yaml["dbconfig"] + if precommand != "": + base_yaml["dbconfig"] = {} + base_yaml["dbconfig"]["preload_tool"] = {} + base_yaml["dbconfig"]["preload_tool"][ + "run_image" + ] = "redislabs/memtier_benchmark:edge" + base_yaml["dbconfig"]["preload_tool"]["tool"] = "memtier_benchmark" + base_yaml["dbconfig"]["preload_tool"]["arguments"] = f"{precommand}" + + base_yaml["clientconfig"]["arguments"] = command + base_yaml["name"] = re_file + with open(f"re-string/{re_file}.yml", "w") as outfile: + yaml.dump(base_yaml, outfile) diff --git a/redis_benchmarks_specification/test-suites/generate.py b/redis_benchmarks_specification/test-suites/generate.py new file mode 100644 index 0000000..50e0213 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/generate.py @@ -0,0 +1,108 @@ +import yaml +import json +import pathlib + +expected_format = { + "name": "HSet_d_400_c_10_t_10_pipeline_10_key-max=1000000", + "precommand": "", + "check": {"keyspacelen": 0}, + "command": '--command="HSET myhash2 __key__ __data__" --command-key-pattern=G --key-minimum=1 --key-maximum 1000000 d 400 -c 10 -t 10 --hide-histogram --pipeline=10 --test-time=100', + "kpis": { + "tls": {"ops": 1003694, "latency": 1, "kbs": 89188}, + "default": {"ops": 932625, "latency": 1.1, "kbs": 82873}, + }, + "tested-commands": ["hset"], + "tested-groups": ["hash"], + "comparison": {"max_variance_percent": 2.0}, +} +defaults_filename = "default.yml" +prefix = "memtier_benchmark-" +test_glob = "memtier_*.yml" +files = pathlib.Path().glob(test_glob) +files = [str(x) for x in files] + +for file in files: + if defaults_filename in file: + files.remove(file) + + +print(len(files)) + +rdb_counter = 0 +init_commands_counter = 0 +can_convert_counter = 0 + +group_tests = {} +counter_groups = {} +final_enterprise_json = [] +for yml_filename in files: + with open(yml_filename, "r") as yml_file: + benchmark_config = yaml.safe_load(yml_file) + tested_commands = benchmark_config["tested-commands"] + name = benchmark_config["name"] + tested_groups = benchmark_config["tested-groups"] + contains_db_config = False + contains_init_commands = False + contains_rdb = False + dbconfig = {} + precommand = "" + if "dbconfig" in benchmark_config: + dbconfig = benchmark_config["dbconfig"] + contains_db_config = True + if "init_commands" in benchmark_config["dbconfig"]: + contains_init_commands = True + if "dataset" in benchmark_config["dbconfig"]: + contains_rdb = True + + if contains_rdb: + rdb_counter = rdb_counter + 1 + elif contains_init_commands: + init_commands_counter = init_commands_counter + 1 + else: + can_convert_counter = can_convert_counter + 1 + + if ( + contains_db_config + and contains_rdb is False + and contains_init_commands is False + ): + keyspace_check = False + keyspace_value = 0 + + if "check" in dbconfig: + if "keyspacelen" in dbconfig["check"]: + keyspace_check = True + keyspace_value = dbconfig["check"]["keyspacelen"] + if "preload_tool" in dbconfig: + precommand = dbconfig["preload_tool"]["arguments"] + command = benchmark_config["clientconfig"]["arguments"] + check_dict = {"keyspacelen": keyspace_value} + + test_definition = { + "name": name, + "precommand": f"{precommand}", + "check": check_dict, + "command": f"{command}", + "kpis": {}, + "tested-commands": tested_commands, + "tested-groups": tested_groups, + } + for tested_group in tested_groups: + if tested_group not in group_tests: + group_tests[tested_group] = [] + group_tests[tested_group].append(test_definition) + if tested_group not in counter_groups: + counter_groups[tested_group] = 0 + counter_groups[tested_group] = counter_groups[tested_group] + 1 + final_enterprise_json.append(test_definition) + +print(f"RDB tests {rdb_counter}") +print(f"INIT command tests {init_commands_counter}") +print(f"Other tests {can_convert_counter}") +print(f"Final Enterprise tests {len(final_enterprise_json)}") + +print(counter_groups) + +for tested_group, tests in group_tests.items(): + with open(f"jsons/{tested_group}.json", "w") as json_fd: + json.dump(tests, json_fd) diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-load-string200c-with-20KiB-values-pipeline-10.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-load-string200c-with-20KiB-values-pipeline-10.yml new file mode 100644 index 0000000..bb5c2a6 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-load-string200c-with-20KiB-values-pipeline-10.yml @@ -0,0 +1,20 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --pipeline 10 --data-size 20000 --ratio 1:0 --key-pattern P:P --key-minimum=1 + --key-maximum=100000 --test-time 180 -c 50 -t 4 --hide-histogram + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-100Kkeys-load-string200c-with-20KiB-values-pipeline-10 +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-load-string200c-with-20KiB-values.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-load-string200c-with-20KiB-values.yml new file mode 100644 index 0000000..688dfba --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-load-string200c-with-20KiB-values.yml @@ -0,0 +1,20 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --data-size 20000 --ratio 1:0 --key-pattern P:P --key-minimum=1 --key-maximum=100000 + --test-time 180 -c 50 -t 4 --hide-histogram + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-100Kkeys-load-string200c-with-20KiB-values +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-load-string50c-with-20KiB-values-pipeline-10.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-load-string50c-with-20KiB-values-pipeline-10.yml new file mode 100644 index 0000000..547ae39 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-load-string50c-with-20KiB-values-pipeline-10.yml @@ -0,0 +1,20 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --pipeline 10 --data-size 20000 --ratio 1:0 --key-pattern P:P --key-minimum=1 + --key-maximum=100000 --test-time 180 -c 25 -t 2 --hide-histogram + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-100Kkeys-load-string50c-with-20KiB-values-pipeline-10 +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-load-string50c-with-20KiB-values.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-load-string50c-with-20KiB-values.yml new file mode 100644 index 0000000..6547fe6 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-load-string50c-with-20KiB-values.yml @@ -0,0 +1,20 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --data-size 20000 --ratio 1:0 --key-pattern P:P --key-minimum=1 --key-maximum=100000 + --test-time 180 -c 25 -t 2 --hide-histogram + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-100Kkeys-load-string50c-with-20KiB-values +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-string-setget200c-20KiB-pipeline-10.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-string-setget200c-20KiB-pipeline-10.yml new file mode 100644 index 0000000..31403c1 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-string-setget200c-20KiB-pipeline-10.yml @@ -0,0 +1,26 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --pipeline 10 --data-size 20000 --ratio 1:10 --key-pattern R:R -c 50 + -t 4 --key-minimum=1 --key-maximum=100000 --hide-histogram --test-time 180 + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +dbconfig: + preload_tool: + arguments: --data-size 20000 --ratio 1:0 --key-pattern P:P -c 50 -t 4 --hide-histogram + --key-minimum=1 --key-maximum=100000 -n allkeys + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-100Kkeys-string-setget200c-20KiB-pipeline-10 +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-string-setget200c-20KiB.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-string-setget200c-20KiB.yml new file mode 100644 index 0000000..e39d97f --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-string-setget200c-20KiB.yml @@ -0,0 +1,26 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --data-size 20000 --ratio 1:10 --key-pattern R:R -c 50 -t 4 --hide-histogram + --key-minimum=1 --key-maximum=100000 --test-time 180 + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +dbconfig: + preload_tool: + arguments: --data-size 20000 --ratio 1:0 --key-pattern P:P -c 50 -t 4 --hide-histogram + --key-minimum=1 --key-maximum=100000 -n allkeys + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-100Kkeys-string-setget200c-20KiB +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-string-setget50c-20KiB-pipeline-10.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-string-setget50c-20KiB-pipeline-10.yml new file mode 100644 index 0000000..20cc73a --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-string-setget50c-20KiB-pipeline-10.yml @@ -0,0 +1,26 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --pipeline 10 --data-size 20000 --ratio 1:10 --key-pattern R:R -c 25 + -t 2 --key-minimum=1 --key-maximum=100000 --hide-histogram --test-time 180 + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +dbconfig: + preload_tool: + arguments: --data-size 20000 --ratio 1:0 --key-pattern P:P -c 25 -t 2 --hide-histogram + --key-minimum=1 --key-maximum=100000 -n allkeys + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-100Kkeys-string-setget50c-20KiB-pipeline-10 +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-string-setget50c-20KiB.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-string-setget50c-20KiB.yml new file mode 100644 index 0000000..84140ca --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-100Kkeys-string-setget50c-20KiB.yml @@ -0,0 +1,26 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --data-size 20000 --ratio 1:10 --key-pattern R:R -c 25 -t 2 --hide-histogram + --key-minimum=1 --key-maximum=100000 --test-time 180 + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +dbconfig: + preload_tool: + arguments: --data-size 20000 --ratio 1:0 --key-pattern P:P -c 25 -t 2 --hide-histogram + --key-minimum=1 --key-maximum=100000 -n allkeys + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-100Kkeys-string-setget50c-20KiB +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string200c-with-100B-values-pipeline-10.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string200c-with-100B-values-pipeline-10.yml new file mode 100644 index 0000000..cea314c --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string200c-with-100B-values-pipeline-10.yml @@ -0,0 +1,20 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --pipeline 10 --data-size 100 --ratio 1:0 --key-pattern P:P --key-minimum=1 + --key-maximum 1000000 --test-time 180 -c 50 -t 4 --hide-histogram + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-1Mkeys-load-string200c-with-100B-values-pipeline-10 +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string200c-with-100B-values.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string200c-with-100B-values.yml new file mode 100644 index 0000000..25b503a --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string200c-with-100B-values.yml @@ -0,0 +1,20 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --data-size 100 --ratio 1:0 --key-pattern P:P --key-minimum=1 --key-maximum + 1000000 --test-time 180 -c 50 -t 4 --hide-histogram + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-1Mkeys-load-string200c-with-100B-values +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string200c-with-1KiB-values-pipeline-10.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string200c-with-1KiB-values-pipeline-10.yml new file mode 100644 index 0000000..23dd3da --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string200c-with-1KiB-values-pipeline-10.yml @@ -0,0 +1,20 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --pipeline 10 --data-size 1000 --ratio 1:0 --key-pattern P:P --key-minimum=1 + --key-maximum 1000000 --test-time 180 -c 50 -t 4 --hide-histogram + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-1Mkeys-load-string200c-with-1KiB-values-pipeline-10 +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string200c-with-1KiB-values.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string200c-with-1KiB-values.yml new file mode 100644 index 0000000..009f97d --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string200c-with-1KiB-values.yml @@ -0,0 +1,20 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --data-size 1000 --ratio 1:0 --key-pattern P:P --key-minimum=1 --key-maximum + 1000000 --test-time 180 -c 50 -t 4 --hide-histogram + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-1Mkeys-load-string200c-with-1KiB-values +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string50c-with-100B-values-pipeline-10.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string50c-with-100B-values-pipeline-10.yml new file mode 100644 index 0000000..3f2d633 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string50c-with-100B-values-pipeline-10.yml @@ -0,0 +1,20 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --pipeline 10 --data-size 100 --ratio 1:0 --key-pattern P:P --key-minimum=1 + --key-maximum 1000000 --test-time 180 -c 25 -t 2 --hide-histogram + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-1Mkeys-load-string50c-with-100B-values-pipeline-10 +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string50c-with-100B-values.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string50c-with-100B-values.yml new file mode 100644 index 0000000..a8524f1 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string50c-with-100B-values.yml @@ -0,0 +1,20 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --data-size 100 --ratio 1:0 --key-pattern P:P --key-minimum=1 --key-maximum + 1000000 --test-time 180 -c 25 -t 2 --hide-histogram + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-1Mkeys-load-string50c-with-100B-values +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string50c-with-1KiB-values-pipeline-10.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string50c-with-1KiB-values-pipeline-10.yml new file mode 100644 index 0000000..34f5820 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string50c-with-1KiB-values-pipeline-10.yml @@ -0,0 +1,20 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --pipeline 10 --data-size 1000 --ratio 1:0 --key-pattern P:P --key-minimum=1 + --key-maximum 1000000 --test-time 180 -c 25 -t 2 --hide-histogram + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-1Mkeys-load-string50c-with-1KiB-values-pipeline-10 +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string50c-with-1KiB-values.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string50c-with-1KiB-values.yml new file mode 100644 index 0000000..a22b110 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-load-string50c-with-1KiB-values.yml @@ -0,0 +1,20 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --data-size 1000 --ratio 1:0 --key-pattern P:P --key-minimum=1 --key-maximum + 1000000 --test-time 180 -c 25 -t 2 --hide-histogram + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-1Mkeys-load-string50c-with-1KiB-values +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-mget-1KiB.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-mget-1KiB.yml new file mode 100644 index 0000000..ffe59d8 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-mget-1KiB.yml @@ -0,0 +1,27 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: ' --command="MGET __key__ memtier-1 memtier-2 memtier-3 memtier-4 memtier-5 + memtier-6 memtier-7 memtier-8 memtier-9" --command-key-pattern=R -c 50 -t 2 --hide-histogram + --test-time 180' + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +dbconfig: + preload_tool: + arguments: --data-size 1000 --key-minimum=1 --key-maximum 1000000 -n allkeys --ratio=1:0 --key-pattern + P:P --hide-histogram -t 2 -c 100 + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-1Mkeys-string-mget-1KiB +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget200c-100B-pipeline-10.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget200c-100B-pipeline-10.yml new file mode 100644 index 0000000..aa728b7 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget200c-100B-pipeline-10.yml @@ -0,0 +1,26 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --pipeline 10 --data-size 100 --ratio 1:10 --key-pattern R:R -c 50 -t + 4 --hide-histogram --test-time 180 + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +dbconfig: + preload_tool: + arguments: --data-size 100 --ratio 1:0 --key-pattern P:P -c 50 -t 4 --hide-histogram + --key-minimum=1 --key-maximum=1000000 -n allkeys + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-1Mkeys-string-setget200c-100B-pipeline-10 +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget200c-100B.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget200c-100B.yml new file mode 100644 index 0000000..d864f6b --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget200c-100B.yml @@ -0,0 +1,26 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --data-size 100 --ratio 1:10 --key-pattern R:R -c 50 -t 4 --hide-histogram + --test-time 180 + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +dbconfig: + preload_tool: + arguments: --data-size 100 --ratio 1:0 --key-pattern P:P -c 50 -t 4 --hide-histogram + --key-minimum=1 --key-maximum=1000000 -n allkeys + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-1Mkeys-string-setget200c-100B +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget200c-1KiB-pipeline-10.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget200c-1KiB-pipeline-10.yml new file mode 100644 index 0000000..ccd97f5 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget200c-1KiB-pipeline-10.yml @@ -0,0 +1,26 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --pipeline 10 --data-size 1000 --ratio 1:10 --key-pattern R:R -c 50 -t + 4 --hide-histogram --test-time 180 + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +dbconfig: + preload_tool: + arguments: --data-size 1000 --ratio 1:0 --key-pattern P:P -c 50 -t 4 --hide-histogram + --key-minimum=1 --key-maximum=1000000 -n allkeys + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-1Mkeys-string-setget200c-1KiB-pipeline-10 +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget200c-1KiB.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget200c-1KiB.yml new file mode 100644 index 0000000..d3a7e25 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget200c-1KiB.yml @@ -0,0 +1,26 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --data-size 1000 --ratio 1:10 --key-pattern R:R -c 50 -t 4 --hide-histogram + --test-time 180 + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +dbconfig: + preload_tool: + arguments: --data-size 1000 --ratio 1:0 --key-pattern P:P -c 50 -t 4 --hide-histogram + --key-minimum=1 --key-maximum=1000000 -n allkeys + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-1Mkeys-string-setget200c-1KiB +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget50c-100B-pipeline-10.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget50c-100B-pipeline-10.yml new file mode 100644 index 0000000..bd84eb9 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget50c-100B-pipeline-10.yml @@ -0,0 +1,26 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --pipeline 10 --data-size 100 --ratio 1:10 --key-pattern R:R -c 25 -t + 2 --hide-histogram --test-time 180 + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +dbconfig: + preload_tool: + arguments: --data-size 100 --ratio 1:0 --key-pattern P:P -c 25 -t 2 --hide-histogram + --key-minimum=1 --key-maximum=1000000 -n allkeys + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-1Mkeys-string-setget50c-100B-pipeline-10 +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget50c-100B.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget50c-100B.yml new file mode 100644 index 0000000..4e493f2 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget50c-100B.yml @@ -0,0 +1,26 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --data-size 100 --ratio 1:10 --key-pattern R:R -c 25 -t 2 --hide-histogram + --test-time 180 + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +dbconfig: + preload_tool: + arguments: --data-size 100 --ratio 1:0 --key-pattern P:P -c 25 -t 2 --hide-histogram + --key-minimum=1 --key-maximum=1000000 -n allkeys + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-1Mkeys-string-setget50c-100B +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget50c-1KiB-pipeline-10.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget50c-1KiB-pipeline-10.yml new file mode 100644 index 0000000..80674e7 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget50c-1KiB-pipeline-10.yml @@ -0,0 +1,26 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --pipeline 10 --data-size 1000 --ratio 1:10 --key-pattern R:R -c 25 -t + 2 --hide-histogram --test-time 180 + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +dbconfig: + preload_tool: + arguments: --data-size 1000 --ratio 1:0 --key-pattern P:P -c 25 -t 2 --hide-histogram + --key-minimum=1 --key-maximum=1000000 -n allkeys + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-1Mkeys-string-setget50c-1KiB-pipeline-10 +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget50c-1KiB.yml b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget50c-1KiB.yml new file mode 100644 index 0000000..15d3844 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/string/memtier_benchmark-1Mkeys-string-setget50c-1KiB.yml @@ -0,0 +1,26 @@ +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + arguments: --data-size 1000 --ratio 1:10 --key-pattern R:R -c 25 -t 2 --hide-histogram + --test-time 180 + resources: + requests: + cpus: '4' + memory: 2g + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +dbconfig: + preload_tool: + arguments: --data-size 1000 --ratio 1:0 --key-pattern P:P -c 25 -t 2 --hide-histogram + --key-minimum=1 --key-maximum=1000000 -n allkeys + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark +name: memtier_benchmark-1Mkeys-string-setget50c-1KiB +priority: 1 +redis-topologies: +- oss-standalone +tested-commands: +- get +tested-groups: +- string +version: 0.4 diff --git a/redis_benchmarks_specification/test-suites/template.txt b/redis_benchmarks_specification/test-suites/template.txt new file mode 100644 index 0000000..7237e87 --- /dev/null +++ b/redis_benchmarks_specification/test-suites/template.txt @@ -0,0 +1,16 @@ +version: 0.4 +name: change_me +description: describe me. +redis-topologies: +- oss-standalone +build-variants: +- gcc:8.5.0-amd64-debian-buster-default +clientconfig: + run_image: redislabs/memtier_benchmark:edge + tool: memtier_benchmark + arguments: --pipeline 10 -c 2 -t 2 --command="HSET __key__ field __data__" -R --data-size=30 --hide-histogram --test-time 180 + resources: + requests: + cpus: '4' + memory: 2g +