diff --git a/Schema Decorator Webservice Example.ipynb b/Schema Decorator Webservice Example.ipynb new file mode 100644 index 0000000..2edba8b --- /dev/null +++ b/Schema Decorator Webservice Example.ipynb @@ -0,0 +1,421 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Retrieve the Workspace and a Previously Registered Scikit Learn Model" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found the config file in: F:\\temp\\schemaTest\\aml_config\\config.json\n", + "trangevimaster\tcentraluseuap\ttrangevi\n", + "Wall time: 323 ms\n" + ] + } + ], + "source": [ + "%%time\n", + "from azureml.core import Workspace, Run\n", + "\n", + "ws = Workspace.from_config(path='..\\\\aml_config\\\\config.json')\n", + "print(ws.name, ws.location, ws.resource_group, sep = '\\t')" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "from azureml.core.model import Model\n", + "\n", + "model = Model(ws, 'Salary_Predictor')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Sample Scoring and Environment Files are Below" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting score.py\n" + ] + } + ], + "source": [ + "%%writefile score.py\n", + "import sklearn\n", + "from azureml.core.model import Model\n", + "from sklearn.externals import joblib\n", + "\n", + "import numpy as np\n", + "from inference_schema.schema_decorators import input_schema, output_schema\n", + "from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType\n", + "\n", + "\n", + "input_sample = np.array([[1.8]])\n", + "output_sample = np.array([43638.88864165144])\n", + "\n", + "\n", + "def init():\n", + " global model\n", + "\n", + " model_file = Model.get_model_path('Salary_Predictor')\n", + " with open(model_file, 'rb') as f:\n", + " model = joblib.load(f)\n", + "\n", + "\n", + "@input_schema('data', NumpyParameterType(input_sample))\n", + "@output_schema(NumpyParameterType(output_sample))\n", + "def run(data):\n", + " try:\n", + " res = model.predict(data)\n", + " return res.tolist()\n", + " except Exception as e:\n", + " return str(e)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Writing myenv.yml\n" + ] + } + ], + "source": [ + "%%writefile myenv.yml\n", + "name: project_environment\n", + "dependencies:\n", + " # The python interpreter version.\n", + " # Currently Azure ML only supports 3.5.2 and later.\n", + " \n", + " - python=3.6.2\n", + " - pip:\n", + " # Required packages for AzureML execution, history, and data preparation.\n", + " - azureml-defaults\n", + " - scikit-learn\n", + " - inference-schema[numpy-support]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Deploy the Model to ACI" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating image\n", + "Image creation operation finished for image salary-predictor:4, operation \"Succeeded\"\n", + "Creating service\n", + "Running...............\n", + "SucceededACI service creation operation finished, operation \"Succeeded\"\n", + "http://52.180.92.69:80/score\n", + "http://52.180.92.69:80/swagger.json\n", + "Wall time: 3min 35s\n" + ] + } + ], + "source": [ + "%%time\n", + "from azureml.core.webservice import AciWebservice\n", + "from azureml.core.webservice import Webservice\n", + "\n", + "from azureml.core.image import ContainerImage\n", + "\n", + "aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,\n", + " memory_gb=2,\n", + " tags={\"data\": \"Salary-Data\", \"method\":\"sklearn\"},\n", + " description='Sample Model for salary predictions')\n", + "\n", + "image_config = ContainerImage.image_configuration(execution_script=\"./score.py\",\n", + " runtime=\"python\",\n", + " conda_file=\"./myenv.yml\")\n", + "\n", + "service = Webservice.deploy_from_model(workspace=ws,\n", + " name='salary-predictor',\n", + " deployment_config=aciconfig,\n", + " models=[model],\n", + " image_config=image_config)\n", + "\n", + "service.wait_for_deployment(show_output=True)\n", + "print(service.scoring_uri)\n", + "print(service.swagger_uri)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4. Swagger is Generated Based on the Provided Decorator" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"swagger\": \"2.0\",\n", + " \"info\": {\n", + " \"title\": \"salary-predictor\",\n", + " \"description\": \"API specification for the Azure Machine Learning service salary-predictor\",\n", + " \"version\": \"1.0\"\n", + " },\n", + " \"schemes\": [\n", + " \"https\"\n", + " ],\n", + " \"consumes\": [\n", + " \"application/json\"\n", + " ],\n", + " \"produces\": [\n", + " \"application/json\"\n", + " ],\n", + " \"securityDefinitions\": {\n", + " \"Bearer\": {\n", + " \"type\": \"apiKey\",\n", + " \"name\": \"Authorization\",\n", + " \"in\": \"header\",\n", + " \"description\": \"For example: Bearer abc123\"\n", + " }\n", + " },\n", + " \"paths\": {\n", + " \"/\": {\n", + " \"get\": {\n", + " \"operationId\": \"ServiceHealthCheck\",\n", + " \"description\": \"Simple health check endpoint to ensure the service is up at any given point.\",\n", + " \"responses\": {\n", + " \"200\": {\n", + " \"description\": \"If service is up and running, this response will be returned with the content 'Healthy'\",\n", + " \"schema\": {\n", + " \"type\": \"string\"\n", + " },\n", + " \"examples\": {\n", + " \"application/json\": \"Healthy\"\n", + " }\n", + " },\n", + " \"default\": {\n", + " \"description\": \"The service failed to execute due to an error.\",\n", + " \"schema\": {\n", + " \"$ref\": \"#/definitions/ErrorResponse\"\n", + " }\n", + " }\n", + " }\n", + " }\n", + " },\n", + " \"/score\": {\n", + " \"post\": {\n", + " \"operationId\": \"RunMLService\",\n", + " \"description\": \"Run web service's model and get the prediction output\",\n", + " \"security\": [\n", + " {\n", + " \"Bearer\": []\n", + " }\n", + " ],\n", + " \"parameters\": [\n", + " {\n", + " \"name\": \"serviceInputPayload\",\n", + " \"in\": \"body\",\n", + " \"description\": \"The input payload for executing the real-time machine learning service.\",\n", + " \"schema\": {\n", + " \"$ref\": \"#/definitions/ServiceInput\"\n", + " }\n", + " }\n", + " ],\n", + " \"responses\": {\n", + " \"200\": {\n", + " \"description\": \"The service processed the input correctly and provided a result prediction, if applicable.\",\n", + " \"schema\": {\n", + " \"$ref\": \"#/definitions/ServiceOutput\"\n", + " }\n", + " },\n", + " \"default\": {\n", + " \"description\": \"The service failed to execute due to an error.\",\n", + " \"schema\": {\n", + " \"$ref\": \"#/definitions/ErrorResponse\"\n", + " }\n", + " }\n", + " }\n", + " }\n", + " }\n", + " },\n", + " \"definitions\": {\n", + " \"ServiceInput\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"data\": {\n", + " \"type\": \"array\",\n", + " \"items\": {\n", + " \"type\": \"array\",\n", + " \"items\": {\n", + " \"type\": \"number\",\n", + " \"format\": \"double\"\n", + " }\n", + " }\n", + " }\n", + " },\n", + " \"example\": {\n", + " \"data\": [\n", + " [\n", + " 1.8\n", + " ]\n", + " ]\n", + " }\n", + " },\n", + " \"ServiceOutput\": {\n", + " \"type\": \"array\",\n", + " \"items\": {\n", + " \"type\": \"number\",\n", + " \"format\": \"double\"\n", + " },\n", + " \"example\": [\n", + " 43638.88864165144\n", + " ]\n", + " },\n", + " \"ErrorResponse\": {\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"status_code\": {\n", + " \"type\": \"integer\",\n", + " \"format\": \"int32\"\n", + " },\n", + " \"message\": {\n", + " \"type\": \"string\"\n", + " }\n", + " }\n", + " }\n", + " }\n", + "}\n", + "Wall time: 52 ms\n" + ] + } + ], + "source": [ + "%%time\n", + "import json\n", + "import requests\n", + "from pprint import pprint\n", + "\n", + "headers = {'Content-Type': 'application/json'}\n", + "print(json.dumps(requests.get(service.swagger_uri, headers=headers).json(), indent=4))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 5. Run the Service" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[43638.88864165]\n", + "Wall time: 51.9 ms\n" + ] + } + ], + "source": [ + "%%time\n", + "import json\n", + "import numpy as np\n", + "\n", + "run_input = {'data': [[1.8]]}\n", + "\n", + "result = service.run(json.dumps(run_input))\n", + "\n", + "print(np.array(result))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 6. Clean Up" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [], + "source": [ + "service.delete()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "inference_schema_dev_env", + "language": "python", + "name": "inference_schema_dev_env" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.1" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}