From 1ee597508d11281eda5196beba10efaaa54a6685 Mon Sep 17 00:00:00 2001 From: huangjianhui <852142024@qq.com> Date: Tue, 4 Jan 2022 21:57:36 +0800 Subject: [PATCH 01/12] Update util.py delete pynvml and check_gpu_memory --- python/paddle_serving_server/env_check/util.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/python/paddle_serving_server/env_check/util.py b/python/paddle_serving_server/env_check/util.py index ef758c525..d88fdf240 100644 --- a/python/paddle_serving_server/env_check/util.py +++ b/python/paddle_serving_server/env_check/util.py @@ -1,5 +1,4 @@ import os -import pynvml import argparse import base64 import subprocess @@ -87,16 +86,6 @@ def kill_process(port, sleep_time=0): # 解决端口占用 os.system(f"sleep {sleep_time}") - -def check_gpu_memory(gpu_id): - pynvml.nvmlInit() - handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id) - mem_info = pynvml.nvmlDeviceGetMemoryInfo(handle) - mem_used = mem_info.used / 1024 ** 2 - print(f"GPU-{gpu_id} memory used:", mem_used) - return mem_used > 100 - - def count_process_num_on_port(port): command = "netstat -nlp | grep :" + str(port) + " | wc -l" count = eval(os.popen(command).read()) From 07199bd9f1da6ed9fd5371e34f68018f7b6de79c Mon Sep 17 00:00:00 2001 From: huangjianhui <852142024@qq.com> Date: Tue, 4 Jan 2022 22:09:20 +0800 Subject: [PATCH 02/12] Update test_fit_a_line.py Kill c++ service when env_check started --- python/paddle_serving_server/env_check/test_fit_a_line.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/paddle_serving_server/env_check/test_fit_a_line.py b/python/paddle_serving_server/env_check/test_fit_a_line.py index ad8c21de3..23ebe3469 100644 --- a/python/paddle_serving_server/env_check/test_fit_a_line.py +++ b/python/paddle_serving_server/env_check/test_fit_a_line.py @@ -20,12 +20,13 @@ def setup_class(self): serving_util.check_model_data_exist() self.get_truth_val_by_inference(self) self.serving_util = serving_util + self.serving_util.release('service') def teardown_method(self): print_log(["stderr.log", "stdout.log", "log/serving.ERROR", "PipelineServingLogs/pipeline.log"], iden="after predict") kill_process(9494) - self.serving_util.release() + self.serving_util.release('service') def get_truth_val_by_inference(self): try: From d97761f3c9b2fc7f7d80c330879fb6960f2d2b73 Mon Sep 17 00:00:00 2001 From: huangjianhui <852142024@qq.com> Date: Tue, 4 Jan 2022 22:11:37 +0800 Subject: [PATCH 03/12] Update test_uci_pipeline.py kill web_service when env_check started --- python/paddle_serving_server/env_check/test_uci_pipeline.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/paddle_serving_server/env_check/test_uci_pipeline.py b/python/paddle_serving_server/env_check/test_uci_pipeline.py index 1d32f4e7c..a27bc0697 100644 --- a/python/paddle_serving_server/env_check/test_uci_pipeline.py +++ b/python/paddle_serving_server/env_check/test_uci_pipeline.py @@ -21,12 +21,13 @@ def setup_class(self): serving_util.check_model_data_exist() self.get_truth_val_by_inference(self) self.serving_util = serving_util + self.serving_util.release('web_service') def teardown_method(self): print_log(["stderr.log", "stdout.log", "log/serving.ERROR", "PipelineServingLogs/pipeline.log"], iden="after predict") kill_process(9998) - self.serving_util.release() + self.serving_util.release('web_service') def get_truth_val_by_inference(self): try: From 011ea89a2845cd16f2370af9d0461d5947bb15ec Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Wed, 5 Jan 2022 07:18:21 +0000 Subject: [PATCH 04/12] add interaction env_check cmd --- python/paddle_serving_server/env_check/run.py | 24 ++++++++++---- python/paddle_serving_server/serve.py | 33 +++++++++++++++++-- 2 files changed, 48 insertions(+), 9 deletions(-) diff --git a/python/paddle_serving_server/env_check/run.py b/python/paddle_serving_server/env_check/run.py index 6b2955547..639377fae 100644 --- a/python/paddle_serving_server/env_check/run.py +++ b/python/paddle_serving_server/env_check/run.py @@ -31,12 +31,13 @@ cpp_test_cases = ["test_fit_a_line.py::TestFitALine::test_cpu", "test_fit_a_line.py::TestFitALine::test_gpu"] pipeline_test_cases = ["test_uci_pipeline.py::TestUCIPipeline::test_cpu", "test_uci_pipeline.py::TestUCIPipeline::test_gpu"] -def run_test_cases(cases_list, case_type): +def run_test_cases(cases_list, case_type, is_open_std): old_stdout, old_stderr = sys.stdout, sys.stderr real_path = os.path.dirname(os.path.realpath(__file__)) for case in cases_list: - sys.stdout = open('/dev/null', 'w') - sys.stderr = open('/dev/null', 'w') + if is_open_std is False: + sys.stdout = open('/dev/null', 'w') + sys.stderr = open('/dev/null', 'w') args_str = "--disable-warnings " + str(real_path) + "/" + case args = args_str.split(" ") res = pytest.main(args) @@ -54,10 +55,19 @@ def run_test_cases(cases_list, case_type): def unset_proxy(key): os.unsetenv(key) -def check_env(): +def check_env(mode): if 'https_proxy' in os.environ or 'http_proxy' in os.environ: unset_proxy("https_proxy") unset_proxy("http_proxy") - run_test_cases(inference_test_cases, "PaddlePaddle") - run_test_cases(cpp_test_cases, "C++") - run_test_cases(pipeline_test_cases, "Pipeline") + is_open_std = False + if mode is "debug": + is_open_std = True + if mode is "all" or mode is "inference" or mode is "debug": + run_test_cases(inference_test_cases, "PaddlePaddle", is_open_std) + if mode is "all" or mode is "cpp" or mode is "debug": + run_test_cases(cpp_test_cases, "C++", is_open_std) + if mode is "all" or mode is "pipeline" or mode is "debug": + run_test_cases(pipeline_test_cases, "Pipeline", is_open_std) + +if __name__ == '__main__': + check_env("debug") diff --git a/python/paddle_serving_server/serve.py b/python/paddle_serving_server/serve.py index 6e8cb2832..ce8e3dda3 100755 --- a/python/paddle_serving_server/serve.py +++ b/python/paddle_serving_server/serve.py @@ -35,6 +35,7 @@ import signal from paddle_serving_server.util import * from paddle_serving_server.env_check.run import check_env +import cmd # web_service.py is still used by Pipeline. @@ -471,6 +472,35 @@ def stop_serving(command: str, port: int=None): os.remove(filepath) return True +class Check_Env_Shell(cmd.Cmd): + intro = 'Welcome to the check env shell.Type help or ? to list commands.\n' + #prompt = '(check) ' + # ----- basic commands ----- + def do_check_all(self, arg): + 'Check Environment of Paddle Inference, Pipeline Serving, C++ Serving' + check_env("all") + + def do_check_pipeline(self, arg): + 'Check Environment of Pipeline Serving' + check_env("pipeline") + + def do_check_cpp(self, arg): + 'Check Environment of C++ Serving' + check_env("cpp") + + def do_check_inference(self, arg): + 'Check Environment of Paddle Inference' + check_env("inference") + + def do_debug(self, arg): + 'Open pytest log to debug' + check_env("debug") + + def do_exit(self, arg): + 'Exit Check Env Shell' + print('Check Environment Shell Exit') + os._exit(0) + return True if __name__ == "__main__": # args.device is not used at all. @@ -488,8 +518,7 @@ def stop_serving(command: str, port: int=None): else: os._exit(-1) elif args.server == "check": - check_env() - os._exit(0) + Check_Env_Shell().cmdloop() for single_model_config in args.model: if os.path.isdir(single_model_config): pass From 7cdfbf5bccccd709be96595fe635d7a75b600efb Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Wed, 5 Jan 2022 10:49:00 +0000 Subject: [PATCH 05/12] modify env_check help command --- .../paddle_serving_server/env_check/util.py | 5 +++- python/paddle_serving_server/serve.py | 24 ++++++++++++------- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/python/paddle_serving_server/env_check/util.py b/python/paddle_serving_server/env_check/util.py index d88fdf240..a733aecfb 100644 --- a/python/paddle_serving_server/env_check/util.py +++ b/python/paddle_serving_server/env_check/util.py @@ -3,6 +3,7 @@ import base64 import subprocess import numpy as np +import sys class ServingTest(object): def __init__(self, data_path: str, example_path: str, model_dir: str, client_dir: str): @@ -15,7 +16,9 @@ def __init__(self, data_path: str, example_path: str, model_dir: str, client_dir code_path = os.path.dirname(os.path.realpath(__file__)) self.data_path = f"{code_path}/{data_path}/" self.example_path = f"{code_path}/{example_path}/" - self.py_version = os.environ.get("PYTHON_EXECUTABLE") + self.py_version = sys.executable + if 'PYTHON_EXECUTABLE' in os.environ: + self.py_version = os.environ.get("PYTHON_EXECUTABLE") self.model_dir = model_dir self.client_config = f"{client_dir}/serving_client_conf.prototxt" diff --git a/python/paddle_serving_server/serve.py b/python/paddle_serving_server/serve.py index ce8e3dda3..19f93dfe1 100755 --- a/python/paddle_serving_server/serve.py +++ b/python/paddle_serving_server/serve.py @@ -473,31 +473,39 @@ def stop_serving(command: str, port: int=None): return True class Check_Env_Shell(cmd.Cmd): - intro = 'Welcome to the check env shell.Type help or ? to list commands.\n' - #prompt = '(check) ' + intro = "Welcome to the check env shell.Type help to list commands.\n" # ----- basic commands ----- + def do_help(self, arg): + print("\nCommand list\n"\ + "check_all\tCheck Environment of Paddle Inference, Pipeline Serving, C++ Serving\n"\ + "check_pipeline\tCheck Environment of Pipeline Serving\n"\ + "check_cpp\tCheck Environment of C++ Serving\n"\ + "check_inference\tCheck Environment of Paddle Inference\n"\ + "debug\tOpen pytest log to debug\n"\ + "exit\tExit Check Env Shell\n") + def do_check_all(self, arg): - 'Check Environment of Paddle Inference, Pipeline Serving, C++ Serving' + "Check Environment of Paddle Inference, Pipeline Serving, C++ Serving" check_env("all") def do_check_pipeline(self, arg): - 'Check Environment of Pipeline Serving' + "Check Environment of Pipeline Serving" check_env("pipeline") def do_check_cpp(self, arg): - 'Check Environment of C++ Serving' + "Check Environment of C++ Serving" check_env("cpp") def do_check_inference(self, arg): - 'Check Environment of Paddle Inference' + "Check Environment of Paddle Inference" check_env("inference") def do_debug(self, arg): - 'Open pytest log to debug' + "Open pytest log to debug" check_env("debug") def do_exit(self, arg): - 'Exit Check Env Shell' + "Exit Check Env Shell" print('Check Environment Shell Exit') os._exit(0) return True From d822637b42e05c0b69299833ad63c91dfbc29916 Mon Sep 17 00:00:00 2001 From: huangjianhui <852142024@qq.com> Date: Tue, 11 Jan 2022 16:56:03 +0800 Subject: [PATCH 06/12] Update test_fit_a_line.py --- python/paddle_serving_server/env_check/test_fit_a_line.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/paddle_serving_server/env_check/test_fit_a_line.py b/python/paddle_serving_server/env_check/test_fit_a_line.py index 23ebe3469..ea5171dac 100644 --- a/python/paddle_serving_server/env_check/test_fit_a_line.py +++ b/python/paddle_serving_server/env_check/test_fit_a_line.py @@ -104,7 +104,7 @@ def test_cpu(self): ) # 2.resource check - assert count_process_num_on_port(9494) == 1 + assert count_process_num_on_port(9494) == 1, "Please check 'Captured stdout teardown' to refer to stderr log" # 4.predict by brpc # batch_size 1 @@ -124,7 +124,7 @@ def test_gpu(self): ) # 2.resource check - assert count_process_num_on_port(9494) == 1 + assert count_process_num_on_port(9494) == 1, "Please check 'Captured stdout teardown' to refer to stderr log" # 4.predict by brpc # batch_size 1 From e3c688de99c43a28dce3e51555b51b75eae231ac Mon Sep 17 00:00:00 2001 From: huangjianhui <852142024@qq.com> Date: Tue, 11 Jan 2022 16:57:01 +0800 Subject: [PATCH 07/12] Update test_uci_pipeline.py --- .../paddle_serving_server/env_check/test_uci_pipeline.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/python/paddle_serving_server/env_check/test_uci_pipeline.py b/python/paddle_serving_server/env_check/test_uci_pipeline.py index a27bc0697..be7fff936 100644 --- a/python/paddle_serving_server/env_check/test_uci_pipeline.py +++ b/python/paddle_serving_server/env_check/test_uci_pipeline.py @@ -105,8 +105,8 @@ def test_cpu(self): ) # 2.resource check - assert count_process_num_on_port(9998) == 1 # gRPC Server - assert count_process_num_on_port(18082) == 1 # gRPC gateway + assert count_process_num_on_port(9998) == 1, "Please check 'Captured stdout teardown' to refer to stderr log" # gRPC Server + assert count_process_num_on_port(18082) == 1, "Please check 'Captured stdout teardown' to refer to stderr log" # gRPC gateway # 3.keywords check check_keywords_in_server_log("MKLDNN is enabled", filename="stderr.log") @@ -131,8 +131,8 @@ def test_gpu(self): ) # 2.resource check - assert count_process_num_on_port(9998) == 1 # gRPC Server - assert count_process_num_on_port(18082) == 1 # gRPC gateway + assert count_process_num_on_port(9998) == 1, "Please check 'Captured stdout teardown' to refer to stderr log" # gRPC Server + assert count_process_num_on_port(18082) == 1, "Please check 'Captured stdout teardown' to refer to stderr log" # gRPC gateway # 3.predict by rpc result = self.predict_pipeline_rpc(batch_size=1) From ccebc3981b547f73f77f26b514480918be761926 Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Thu, 13 Jan 2022 03:24:50 +0000 Subject: [PATCH 08/12] modify log print and help description --- python/paddle_serving_server/env_check/run.py | 11 +++++++---- .../env_check/test_fit_a_line.py | 13 +++++-------- .../env_check/test_uci_pipeline.py | 15 ++++++--------- python/paddle_serving_server/env_check/util.py | 10 +++++----- python/paddle_serving_server/serve.py | 14 +++++++------- 5 files changed, 30 insertions(+), 33 deletions(-) diff --git a/python/paddle_serving_server/env_check/run.py b/python/paddle_serving_server/env_check/run.py index 639377fae..9b1ed150a 100644 --- a/python/paddle_serving_server/env_check/run.py +++ b/python/paddle_serving_server/env_check/run.py @@ -50,15 +50,18 @@ def run_test_cases(cases_list, case_type, is_open_std): print("{} {} environment running failure. Please refer to https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html to configure environment".format(case_type, case_name)) os._exit(0) else: - print("{} {} environment running failure, if you need this environment, please refer to https://github.com/PaddlePaddle/Serving/blob/HEAD/doc/Compile_CN.md to configure environment".format(case_type, case_name)) + print("{} {} environment running failure, if you need this environment, please refer to https://github.com/PaddlePaddle/Serving/blob/develop/doc/Install_CN.md".format(case_type, case_name)) -def unset_proxy(key): +def unset_env(key): os.unsetenv(key) def check_env(mode): + if 'https_proxy' in os.environ or 'http_proxy' in os.environ: - unset_proxy("https_proxy") - unset_proxy("http_proxy") + unset_env("https_proxy") + unset_env("http_proxy") + if 'GREP_OPTIONS' in os.environ: + unset_env("GREP_OPTIONS") is_open_std = False if mode is "debug": is_open_std = True diff --git a/python/paddle_serving_server/env_check/test_fit_a_line.py b/python/paddle_serving_server/env_check/test_fit_a_line.py index ea5171dac..c224d4b77 100644 --- a/python/paddle_serving_server/env_check/test_fit_a_line.py +++ b/python/paddle_serving_server/env_check/test_fit_a_line.py @@ -21,10 +21,11 @@ def setup_class(self): self.get_truth_val_by_inference(self) self.serving_util = serving_util self.serving_util.release('service') + kill_process(9494) def teardown_method(self): print_log(["stderr.log", "stdout.log", - "log/serving.ERROR", "PipelineServingLogs/pipeline.log"], iden="after predict") + "log/serving.ERROR", "PipelineServingLogs/pipeline.log"]) kill_process(9494) self.serving_util.release('service') @@ -59,11 +60,9 @@ def get_truth_val_by_inference(self): output_data = output_handle.copy_to_cpu() output_data_dict[output_data_name] = output_data # convert to the same format of Serving output - print(output_data_dict) output_data_dict["price"] = output_data_dict["fc_0.tmp_1"] del output_data_dict["fc_0.tmp_1"] self.truth_val = output_data_dict - print(self.truth_val, self.truth_val["price"].shape) def predict_brpc(self, batch_size=1): data = np.array( @@ -75,7 +74,6 @@ def predict_brpc(self, batch_size=1): fetch_list = client.get_fetch_names() fetch_map = client.predict( feed={"x": data}, fetch=fetch_list, batch=True) - print(fetch_map) return fetch_map def predict_http(self, batch_size=1): @@ -88,12 +86,11 @@ def predict_http(self, batch_size=1): fetch_list = client.get_fetch_names() fetch_map = client.predict( feed={"x": data}, fetch=fetch_list, batch=True) - print(fetch_map) output_dict = self.serving_util.parse_http_result(fetch_map) return output_dict def test_inference(self): - assert self.truth_val['price'].size != 0 + assert self.truth_val['price'].size != 0, "The result of inference is empty" def test_cpu(self): @@ -104,7 +101,7 @@ def test_cpu(self): ) # 2.resource check - assert count_process_num_on_port(9494) == 1, "Please check 'Captured stdout teardown' to refer to stderr log" + assert count_process_num_on_port(9494) == 1, "Error occured when Paddle Server started" # 4.predict by brpc # batch_size 1 @@ -124,7 +121,7 @@ def test_gpu(self): ) # 2.resource check - assert count_process_num_on_port(9494) == 1, "Please check 'Captured stdout teardown' to refer to stderr log" + assert count_process_num_on_port(9494) == 1, "Error occured when Paddle Server started" # 4.predict by brpc # batch_size 1 diff --git a/python/paddle_serving_server/env_check/test_uci_pipeline.py b/python/paddle_serving_server/env_check/test_uci_pipeline.py index be7fff936..faa1640ea 100644 --- a/python/paddle_serving_server/env_check/test_uci_pipeline.py +++ b/python/paddle_serving_server/env_check/test_uci_pipeline.py @@ -25,8 +25,9 @@ def setup_class(self): def teardown_method(self): print_log(["stderr.log", "stdout.log", - "log/serving.ERROR", "PipelineServingLogs/pipeline.log"], iden="after predict") + "PipelineServingLogs/pipeline.log"], iden="after predict") kill_process(9998) + kill_process(18082) self.serving_util.release('web_service') def get_truth_val_by_inference(self): @@ -63,7 +64,6 @@ def get_truth_val_by_inference(self): output_data_dict["prob"] = output_data_dict["fc_0.tmp_1"] del output_data_dict["fc_0.tmp_1"] self.truth_val = output_data_dict - print(self.truth_val, self.truth_val["prob"].shape) def predict_pipeline_rpc(self, batch_size=1): # 1.prepare feed_data @@ -75,10 +75,8 @@ def predict_pipeline_rpc(self, batch_size=1): # 3.predict for fetch_map ret = client.predict(feed_dict=feed_dict) - print(ret) # 4.convert dict to numpy result = {"prob": np.array(eval(ret.value[0]))} - print(result) return result def predict_pipeline_http(self, batch_size=1): @@ -92,7 +90,6 @@ def predict_pipeline_http(self, batch_size=1): # 2.predict for fetch_map url = "http://127.0.0.1:18082/uci/prediction" r = requests.post(url=url, data=json.dumps(feed_dict)) - print(r.json()) # 3.convert dict to numpy array result = {"prob": np.array(eval(r.json()["value"][0]))} return result @@ -105,8 +102,8 @@ def test_cpu(self): ) # 2.resource check - assert count_process_num_on_port(9998) == 1, "Please check 'Captured stdout teardown' to refer to stderr log" # gRPC Server - assert count_process_num_on_port(18082) == 1, "Please check 'Captured stdout teardown' to refer to stderr log" # gRPC gateway + assert count_process_num_on_port(9998) == 1, "Error occured when Paddle Server started" # gRPC Server + assert count_process_num_on_port(18082) == 1, "Error occured when Paddle Server started" # gRPC gateway # 3.keywords check check_keywords_in_server_log("MKLDNN is enabled", filename="stderr.log") @@ -131,8 +128,8 @@ def test_gpu(self): ) # 2.resource check - assert count_process_num_on_port(9998) == 1, "Please check 'Captured stdout teardown' to refer to stderr log" # gRPC Server - assert count_process_num_on_port(18082) == 1, "Please check 'Captured stdout teardown' to refer to stderr log" # gRPC gateway + assert count_process_num_on_port(9998) == 1, "Error occured when Paddle Server started" # gRPC Server + assert count_process_num_on_port(18082) == 1, "Error occured when Paddle Server started" # gRPC gateway # 3.predict by rpc result = self.predict_pipeline_rpc(batch_size=1) diff --git a/python/paddle_serving_server/env_check/util.py b/python/paddle_serving_server/env_check/util.py index a733aecfb..3eed8bc8c 100644 --- a/python/paddle_serving_server/env_check/util.py +++ b/python/paddle_serving_server/env_check/util.py @@ -23,8 +23,8 @@ def __init__(self, data_path: str, example_path: str, model_dir: str, client_dir self.client_config = f"{client_dir}/serving_client_conf.prototxt" os.chdir(self.example_path) - print("======================cur path======================") - print(os.getcwd()) + #print("======================cur path======================") + #print(os.getcwd()) self.check_model_data_exist() def check_model_data_exist(self): @@ -46,7 +46,7 @@ def start_server_by_shell(self, cmd: str, sleep: int = 5, err="stderr.log", out= if wait: p.wait() - print_log([err, out]) + #print_log([err, out]) @staticmethod def check_result(result_data: dict, truth_data: dict, batch_size=1, delta=1e-3): @@ -92,7 +92,7 @@ def kill_process(port, sleep_time=0): def count_process_num_on_port(port): command = "netstat -nlp | grep :" + str(port) + " | wc -l" count = eval(os.popen(command).read()) - print(f"port-{port} processes num:", count) + #print(f"port-{port} processes num:", count) return count @@ -141,7 +141,7 @@ def print_log(file_list, iden=""): os.remove(file) else: print(f"{file} not exist") - print("======================================================") + #print("======================================================") def parse_prototxt(file): diff --git a/python/paddle_serving_server/serve.py b/python/paddle_serving_server/serve.py index 9e9d87a65..b7c724e27 100755 --- a/python/paddle_serving_server/serve.py +++ b/python/paddle_serving_server/serve.py @@ -479,13 +479,13 @@ class Check_Env_Shell(cmd.Cmd): intro = "Welcome to the check env shell.Type help to list commands.\n" # ----- basic commands ----- def do_help(self, arg): - print("\nCommand list\n"\ - "check_all\tCheck Environment of Paddle Inference, Pipeline Serving, C++ Serving\n"\ - "check_pipeline\tCheck Environment of Pipeline Serving\n"\ - "check_cpp\tCheck Environment of C++ Serving\n"\ - "check_inference\tCheck Environment of Paddle Inference\n"\ - "debug\tOpen pytest log to debug\n"\ - "exit\tExit Check Env Shell\n") + print("\nCommand list\t\tDescription\n"\ + "check_all\t\tCheck Environment of Paddle Inference, Pipeline Serving, C++ Serving\n"\ + "check_pipeline\t\tCheck Environment of Pipeline Serving\n"\ + "check_cpp\t\tCheck Environment of C++ Serving\n"\ + "check_inference\t\tCheck Environment of Paddle Inference\n"\ + "debug\t\t\tWhen checking was failed, open log to debug\n"\ + "exit\t\t\tExit Check Env Shell\n") def do_check_all(self, arg): "Check Environment of Paddle Inference, Pipeline Serving, C++ Serving" From f5a07f205ac2e124e417708634aac8a416066bff Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Thu, 13 Jan 2022 03:30:31 +0000 Subject: [PATCH 09/12] modify log --- python/paddle_serving_server/env_check/util.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/python/paddle_serving_server/env_check/util.py b/python/paddle_serving_server/env_check/util.py index 3eed8bc8c..5b1d68a76 100644 --- a/python/paddle_serving_server/env_check/util.py +++ b/python/paddle_serving_server/env_check/util.py @@ -23,8 +23,6 @@ def __init__(self, data_path: str, example_path: str, model_dir: str, client_dir self.client_config = f"{client_dir}/serving_client_conf.prototxt" os.chdir(self.example_path) - #print("======================cur path======================") - #print(os.getcwd()) self.check_model_data_exist() def check_model_data_exist(self): @@ -46,7 +44,6 @@ def start_server_by_shell(self, cmd: str, sleep: int = 5, err="stderr.log", out= if wait: p.wait() - #print_log([err, out]) @staticmethod def check_result(result_data: dict, truth_data: dict, batch_size=1, delta=1e-3): @@ -92,7 +89,6 @@ def kill_process(port, sleep_time=0): def count_process_num_on_port(port): command = "netstat -nlp | grep :" + str(port) + " | wc -l" count = eval(os.popen(command).read()) - #print(f"port-{port} processes num:", count) return count @@ -140,8 +136,7 @@ def print_log(file_list, iden=""): if file.startswith("log") or file.startswith("PipelineServingLogs"): os.remove(file) else: - print(f"{file} not exist") - #print("======================================================") + pass def parse_prototxt(file): From 8c87e0df4053d0255c1feffe1c394e32ac853b5f Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Thu, 13 Jan 2022 04:35:57 +0000 Subject: [PATCH 10/12] add del os.environ --- python/paddle_serving_server/env_check/run.py | 2 +- python/paddle_serving_server/env_check/test_uci_pipeline.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/paddle_serving_server/env_check/run.py b/python/paddle_serving_server/env_check/run.py index 9b1ed150a..f7ad73652 100644 --- a/python/paddle_serving_server/env_check/run.py +++ b/python/paddle_serving_server/env_check/run.py @@ -53,7 +53,7 @@ def run_test_cases(cases_list, case_type, is_open_std): print("{} {} environment running failure, if you need this environment, please refer to https://github.com/PaddlePaddle/Serving/blob/develop/doc/Install_CN.md".format(case_type, case_name)) def unset_env(key): - os.unsetenv(key) + del os.environ[key] def check_env(mode): diff --git a/python/paddle_serving_server/env_check/test_uci_pipeline.py b/python/paddle_serving_server/env_check/test_uci_pipeline.py index faa1640ea..8b115c521 100644 --- a/python/paddle_serving_server/env_check/test_uci_pipeline.py +++ b/python/paddle_serving_server/env_check/test_uci_pipeline.py @@ -25,7 +25,7 @@ def setup_class(self): def teardown_method(self): print_log(["stderr.log", "stdout.log", - "PipelineServingLogs/pipeline.log"], iden="after predict") + "PipelineServingLogs/pipeline.log"]) kill_process(9998) kill_process(18082) self.serving_util.release('web_service') From 680e2f142d1b9151d645b755de654c389ee0c6a6 Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Thu, 13 Jan 2022 11:11:08 +0000 Subject: [PATCH 11/12] add set environment variable for log --- python/paddle_serving_server/env_check/run.py | 7 ++++++- python/paddle_serving_server/env_check/test_fit_a_line.py | 1 + .../paddle_serving_server/env_check/test_uci_pipeline.py | 3 --- python/pipeline/logger.py | 5 ++++- 4 files changed, 11 insertions(+), 5 deletions(-) diff --git a/python/paddle_serving_server/env_check/run.py b/python/paddle_serving_server/env_check/run.py index f7ad73652..725a22cde 100644 --- a/python/paddle_serving_server/env_check/run.py +++ b/python/paddle_serving_server/env_check/run.py @@ -31,6 +31,11 @@ cpp_test_cases = ["test_fit_a_line.py::TestFitALine::test_cpu", "test_fit_a_line.py::TestFitALine::test_gpu"] pipeline_test_cases = ["test_uci_pipeline.py::TestUCIPipeline::test_cpu", "test_uci_pipeline.py::TestUCIPipeline::test_gpu"] +def set_serving_log_path(): + if 'SERVING_LOG_PATH' not in os.environ: + serving_log_path = os.path.expanduser(os.getcwd()) + os.environ['SERVING_LOG_PATH']=serving_log_path + def run_test_cases(cases_list, case_type, is_open_std): old_stdout, old_stderr = sys.stdout, sys.stderr real_path = os.path.dirname(os.path.realpath(__file__)) @@ -56,7 +61,7 @@ def unset_env(key): del os.environ[key] def check_env(mode): - + set_serving_log_path() if 'https_proxy' in os.environ or 'http_proxy' in os.environ: unset_env("https_proxy") unset_env("http_proxy") diff --git a/python/paddle_serving_server/env_check/test_fit_a_line.py b/python/paddle_serving_server/env_check/test_fit_a_line.py index c224d4b77..dcf2ece87 100644 --- a/python/paddle_serving_server/env_check/test_fit_a_line.py +++ b/python/paddle_serving_server/env_check/test_fit_a_line.py @@ -90,6 +90,7 @@ def predict_http(self, batch_size=1): return output_dict def test_inference(self): + self.serving_util.start_server_by_shell(cmd="", sleep=1) assert self.truth_val['price'].size != 0, "The result of inference is empty" diff --git a/python/paddle_serving_server/env_check/test_uci_pipeline.py b/python/paddle_serving_server/env_check/test_uci_pipeline.py index 8b115c521..a48c5cda5 100644 --- a/python/paddle_serving_server/env_check/test_uci_pipeline.py +++ b/python/paddle_serving_server/env_check/test_uci_pipeline.py @@ -105,9 +105,6 @@ def test_cpu(self): assert count_process_num_on_port(9998) == 1, "Error occured when Paddle Server started" # gRPC Server assert count_process_num_on_port(18082) == 1, "Error occured when Paddle Server started" # gRPC gateway - # 3.keywords check - check_keywords_in_server_log("MKLDNN is enabled", filename="stderr.log") - # 4.predict by rpc result = self.predict_pipeline_rpc(batch_size=1) self.serving_util.check_result(result_data=result, truth_data=self.truth_val, batch_size=1) diff --git a/python/pipeline/logger.py b/python/pipeline/logger.py index ebf31564b..24761b104 100644 --- a/python/pipeline/logger.py +++ b/python/pipeline/logger.py @@ -24,8 +24,11 @@ def __init__(self, levels): def filter(self, logRecord): return logRecord.levelno in self._levels - log_dir = "PipelineServingLogs" +if 'SERVING_LOG_PATH' in os.environ: + serving_log_path = os.environ['SERVING_LOG_PATH'] + log_dir = os.path.join(serving_log_path, log_dir) + if not os.path.exists(log_dir): os.makedirs(log_dir) From fe0195ee21b00d500d8183d691cd1ba89afb313e Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Fri, 14 Jan 2022 09:50:45 +0000 Subject: [PATCH 12/12] split log with different test cases --- python/paddle_serving_server/env_check/run.py | 16 ++++++++++++++++ python/paddle_serving_server/env_check/util.py | 15 +++++++++------ python/paddle_serving_server/serve.py | 12 ++++++++---- 3 files changed, 33 insertions(+), 10 deletions(-) diff --git a/python/paddle_serving_server/env_check/run.py b/python/paddle_serving_server/env_check/run.py index 725a22cde..4268a9873 100644 --- a/python/paddle_serving_server/env_check/run.py +++ b/python/paddle_serving_server/env_check/run.py @@ -30,12 +30,24 @@ inference_test_cases = ["test_fit_a_line.py::TestFitALine::test_inference"] cpp_test_cases = ["test_fit_a_line.py::TestFitALine::test_cpu", "test_fit_a_line.py::TestFitALine::test_gpu"] pipeline_test_cases = ["test_uci_pipeline.py::TestUCIPipeline::test_cpu", "test_uci_pipeline.py::TestUCIPipeline::test_gpu"] +log_files = ["PipelineServingLogs", "log", "stderr.log", "stdout.log"] def set_serving_log_path(): if 'SERVING_LOG_PATH' not in os.environ: serving_log_path = os.path.expanduser(os.getcwd()) os.environ['SERVING_LOG_PATH']=serving_log_path +def mv_log_to_new_dir(dir_path): + import shutil + if not os.path.exists(dir_path): + os.mkdir(dir_path) + serving_log_path = os.environ['SERVING_LOG_PATH'] + for file_name in log_files: + file_path = os.path.join(serving_log_path, file_name) + if os.path.exists(file_path): + shutil.move(file_path, dir_path) + + def run_test_cases(cases_list, case_type, is_open_std): old_stdout, old_stderr = sys.stdout, sys.stderr real_path = os.path.dirname(os.path.realpath(__file__)) @@ -48,6 +60,10 @@ def run_test_cases(cases_list, case_type, is_open_std): res = pytest.main(args) sys.stdout, sys.stderr = old_stdout, old_stderr case_name = case.split('_')[-1] + serving_log_path = os.environ['SERVING_LOG_PATH'] + dir_name = str(case_type) + '_' + case.split(':')[-1] + new_dir_path = os.path.join(serving_log_path, dir_name) + mv_log_to_new_dir(new_dir_path) if res == 0: print("{} {} environment running success".format(case_type, case_name)) elif res == 1: diff --git a/python/paddle_serving_server/env_check/util.py b/python/paddle_serving_server/env_check/util.py index 5b1d68a76..7add6c0c6 100644 --- a/python/paddle_serving_server/env_check/util.py +++ b/python/paddle_serving_server/env_check/util.py @@ -13,6 +13,7 @@ def __init__(self, data_path: str, example_path: str, model_dir: str, client_dir DATA_PATH: 数据集根目录 py_version: python版本 python3.6~3.8 """ + self.serving_log_path = os.environ['SERVING_LOG_PATH'] code_path = os.path.dirname(os.path.realpath(__file__)) self.data_path = f"{code_path}/{data_path}/" self.example_path = f"{code_path}/{example_path}/" @@ -37,6 +38,9 @@ def check_model_data_exist(self): os.system(f"ln -s {abs_path} {file}") def start_server_by_shell(self, cmd: str, sleep: int = 5, err="stderr.log", out="stdout.log", wait=False): + + err = os.path.join(self.serving_log_path, err) + out = os.path.join(self.serving_log_path, out) self.err = open(err, "w") self.out = open(out, "w") p = subprocess.Popen(cmd, shell=True, stdout=self.out, stderr=self.err) @@ -128,17 +132,16 @@ def diff_compare(array1, array2): def print_log(file_list, iden=""): + serving_log_path = os.environ['SERVING_LOG_PATH'] for file in file_list: - print(f"======================{file} {iden}=====================") - if os.path.exists(file): - with open(file, "r") as f: + print(f"======================{file}=====================") + file_path = os.path.join(serving_log_path, file) + if os.path.exists(file_path): + with open(file_path, "r") as f: print(f.read()) - if file.startswith("log") or file.startswith("PipelineServingLogs"): - os.remove(file) else: pass - def parse_prototxt(file): with open(file, "r") as f: lines = [i.strip().split(":") for i in f.readlines()] diff --git a/python/paddle_serving_server/serve.py b/python/paddle_serving_server/serve.py index b7c724e27..0bdfe7490 100755 --- a/python/paddle_serving_server/serve.py +++ b/python/paddle_serving_server/serve.py @@ -480,10 +480,14 @@ class Check_Env_Shell(cmd.Cmd): # ----- basic commands ----- def do_help(self, arg): print("\nCommand list\t\tDescription\n"\ - "check_all\t\tCheck Environment of Paddle Inference, Pipeline Serving, C++ Serving\n"\ - "check_pipeline\t\tCheck Environment of Pipeline Serving\n"\ - "check_cpp\t\tCheck Environment of C++ Serving\n"\ - "check_inference\t\tCheck Environment of Paddle Inference\n"\ + "check_all\t\tCheck Environment of Paddle Inference, Pipeline Serving, C++ Serving. "\ + "If failed, using debug command to debug\n"\ + "check_pipeline\t\tCheck Environment of Pipeline Serving. "\ + "If failed, using debug command to debug\n"\ + "check_cpp\t\tCheck Environment of C++ Serving. "\ + "If failed, using debug command to debug\n"\ + "check_inference\t\tCheck Environment of Paddle Inference. "\ + "If failed, using debug command to debug\n"\ "debug\t\t\tWhen checking was failed, open log to debug\n"\ "exit\t\t\tExit Check Env Shell\n")