From 364a5c66a62f1d7c543f68b2d4de5d119c9327ba Mon Sep 17 00:00:00 2001 From: vishal Date: Thu, 19 Sep 2019 20:34:38 +0000 Subject: [PATCH 1/2] Add additional information about request handlers --- pkg/workloads/cortex/onnx_serve/api.py | 19 +++++++++++++++++++ pkg/workloads/cortex/tf_api/api.py | 22 +++++++++++++++++++++- 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/pkg/workloads/cortex/onnx_serve/api.py b/pkg/workloads/cortex/onnx_serve/api.py index fbfd940650..ace77acc34 100644 --- a/pkg/workloads/cortex/onnx_serve/api.py +++ b/pkg/workloads/cortex/onnx_serve/api.py @@ -248,6 +248,25 @@ def start(args): local_cache["request_handler"] = ctx.get_request_handler_impl( api["name"], args.project_dir ) + request_handler = local_cache.get("request_handler") + + if request_handler is not None and util.has_function(request_handler, "pre_inference"): + logger.info( + "registered pre_inference request handler provided in {}".format( + api["request_handler"] + ) + ) + else: + logger.info("pre_inference request handler not registered") + + if request_handler is not None and util.has_function(request_handler, "post_inference"): + logger.info( + "registered post_inference request handler provided in {}".format( + api["request_handler"] + ) + ) + else: + logger.info("post_inference request handler not registered") sess = rt.InferenceSession(model_path) local_cache["sess"] = sess diff --git a/pkg/workloads/cortex/tf_api/api.py b/pkg/workloads/cortex/tf_api/api.py index cff2bb543e..579e22b0ee 100644 --- a/pkg/workloads/cortex/tf_api/api.py +++ b/pkg/workloads/cortex/tf_api/api.py @@ -254,7 +254,7 @@ def predict(deployment_name, api_name): def extract_signature(signature_def, signature_key): - logger.info("signature defs found in model: {}".format(signature_def)) + logger.info("signature defs found in model: {}".format(list(signature_def.keys()))) available_keys = list(signature_def.keys()) if len(available_keys) == 0: @@ -377,6 +377,26 @@ def start(args): local_cache["request_handler"] = ctx.get_request_handler_impl( api["name"], args.project_dir ) + request_handler = local_cache.get("request_handler") + + if request_handler is not None and util.has_function(request_handler, "pre_inference"): + logger.info( + "registered pre_inference request handler provided in {}".format( + api["request_handler"] + ) + ) + else: + logger.info("pre_inference request handler not registered") + + if request_handler is not None and util.has_function(request_handler, "post_inference"): + logger.info( + "registered post_inference request handler provided in {}".format( + api["request_handler"] + ) + ) + else: + logger.info("post_inference request handler not registered") + except Exception as e: logger.exception("failed to start api") sys.exit(1) From 2ae1296a6cd13d778e7b4783af717a83cb5719b3 Mon Sep 17 00:00:00 2001 From: vishal Date: Thu, 19 Sep 2019 16:56:41 -0400 Subject: [PATCH 2/2] Reword info messages --- pkg/workloads/cortex/onnx_serve/api.py | 12 ++++-------- pkg/workloads/cortex/tf_api/api.py | 14 +++++--------- 2 files changed, 9 insertions(+), 17 deletions(-) diff --git a/pkg/workloads/cortex/onnx_serve/api.py b/pkg/workloads/cortex/onnx_serve/api.py index ace77acc34..59c36e7165 100644 --- a/pkg/workloads/cortex/onnx_serve/api.py +++ b/pkg/workloads/cortex/onnx_serve/api.py @@ -252,21 +252,17 @@ def start(args): if request_handler is not None and util.has_function(request_handler, "pre_inference"): logger.info( - "registered pre_inference request handler provided in {}".format( - api["request_handler"] - ) + "using pre_inference request handler provided in {}".format(api["request_handler"]) ) else: - logger.info("pre_inference request handler not registered") + logger.info("pre_inference request handler not found") if request_handler is not None and util.has_function(request_handler, "post_inference"): logger.info( - "registered post_inference request handler provided in {}".format( - api["request_handler"] - ) + "using post_inference request handler provided in {}".format(api["request_handler"]) ) else: - logger.info("post_inference request handler not registered") + logger.info("post_inference request handler not found") sess = rt.InferenceSession(model_path) local_cache["sess"] = sess diff --git a/pkg/workloads/cortex/tf_api/api.py b/pkg/workloads/cortex/tf_api/api.py index 579e22b0ee..c7b6997d61 100644 --- a/pkg/workloads/cortex/tf_api/api.py +++ b/pkg/workloads/cortex/tf_api/api.py @@ -254,7 +254,7 @@ def predict(deployment_name, api_name): def extract_signature(signature_def, signature_key): - logger.info("signature defs found in model: {}".format(list(signature_def.keys()))) + logger.info("signature defs found in model: {}".format(signature_def)) available_keys = list(signature_def.keys()) if len(available_keys) == 0: @@ -381,21 +381,17 @@ def start(args): if request_handler is not None and util.has_function(request_handler, "pre_inference"): logger.info( - "registered pre_inference request handler provided in {}".format( - api["request_handler"] - ) + "using pre_inference request handler provided in {}".format(api["request_handler"]) ) else: - logger.info("pre_inference request handler not registered") + logger.info("pre_inference request handler not found") if request_handler is not None and util.has_function(request_handler, "post_inference"): logger.info( - "registered post_inference request handler provided in {}".format( - api["request_handler"] - ) + "using post_inference request handler provided in {}".format(api["request_handler"]) ) else: - logger.info("post_inference request handler not registered") + logger.info("post_inference request handler not found") except Exception as e: logger.exception("failed to start api")