Skip to content

Instantly share code, notes, and snippets.

@lordlinus
Created August 17, 2021 03:27
Show Gist options
  • Save lordlinus/a2c05c1a66765211b6df31bee30831cd to your computer and use it in GitHub Desktop.
Save lordlinus/a2c05c1a66765211b6df31bee30831cd to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{"cells":[{"cell_type":"code","source":["%pip install lightgbm\n%pip install azureml-sdk[databricks]\n%pip install azureml-mlflow"],"metadata":{"application/vnd.databricks.v1+cell":{"title":"","showTitle":false,"inputWidgets":{},"nuid":"2cbeaad5-6172-4b32-9776-b5a1ba19c769"}},"outputs":[{"output_type":"display_data","metadata":{"application/vnd.databricks.v1+output":{"data":"","errorSummary":"","metadata":{},"errorTraceType":null,"type":"ipynbError","arguments":{}}},"output_type":"display_data","data":{"text/html":["<style scoped>\n .ansiout {\n display: block;\n unicode-bidi: embed;\n white-space: pre-wrap;\n word-wrap: break-word;\n word-break: break-all;\n font-family: \"Source Code Pro\", \"Menlo\", monospace;;\n font-size: 13px;\n color: #555;\n margin-left: 4px;\n line-height: 19px;\n }\n</style>"]}}],"execution_count":0},{"cell_type":"code","source":["import databricks.koalas as ks\nimport pandas as pd\nimport numpy as np\nimport lightgbm as lgb\nimport mlflow\nimport mlflow.azureml\nfrom azureml.core import Workspace\nfrom azureml.core.authentication import InteractiveLoginAuthentication\nfrom azureml.core.compute import AksCompute, ComputeTarget\nfrom azureml.core.webservice import AksWebservice\nfrom random import randint\n\n\ninteractive_auth = InteractiveLoginAuthentication(tenant_id=\"xxxxxxxxxxxx\")\nsubscription_id = \"xxxxxxxxxxx\" #you should be owner or contributor\nresource_group = \"xxxxxx\" #Resource group name. NOTE: you should be owner or contributor\nworkspace_name = \"xxxxx\" #AzureML workspace name\nworkspace_region = \"xxxxx\" #your region (if workspace need to be created)\naks_compute_name = \"xxxxx\" #Name of the AKS cluster to deploy the model\nexperiment_name = \"lightgbm-example\" # Cab be any name and will be displayed in the Azure ML UI\n\nworkspace = Workspace.get(name = workspace_name,\n location = workspace_region,\n resource_group = resource_group,\n subscription_id = subscription_id,\n auth=interactive_auth)\n\nmlflow.set_tracking_uri(workspace.get_mlflow_tracking_uri())\nmlflow.set_experiment(experiment_name)\naks_target = AksCompute(workspace,aks_compute_name)"],"metadata":{"application/vnd.databricks.v1+cell":{"title":"","showTitle":false,"inputWidgets":{},"nuid":"e63a69ba-51ac-4974-a196-e12ce0a7b50b"}},"outputs":[{"output_type":"display_data","metadata":{"application/vnd.databricks.v1+output":{"data":"","errorSummary":"","metadata":{},"errorTraceType":null,"type":"ipynbError","arguments":{}}},"output_type":"display_data","data":{"text/html":["<style scoped>\n .ansiout {\n display: block;\n unicode-bidi: embed;\n white-space: pre-wrap;\n word-wrap: break-word;\n word-break: break-all;\n font-family: \"Source Code Pro\", \"Menlo\", monospace;;\n font-size: 13px;\n color: #555;\n margin-left: 4px;\n line-height: 19px;\n }\n</style>"]}}],"execution_count":0},{"cell_type":"code","source":["csv_file_path = '/dbfs/FileStore/data/Breast_cancer_data.csv'\ndbfs_csv_file_path = '/FileStore/data/Breast_cancer_data.csv'"],"metadata":{"application/vnd.databricks.v1+cell":{"title":"","showTitle":false,"inputWidgets":{},"nuid":"5c1a1931-4b1e-4a1e-bb24-7bdf73dee7d7"}},"outputs":[{"output_type":"display_data","metadata":{"application/vnd.databricks.v1+output":{"data":"","errorSummary":"","metadata":{},"errorTraceType":null,"type":"ipynbError","arguments":{}}},"output_type":"display_data","data":{"text/html":["<style scoped>\n .ansiout {\n display: block;\n unicode-bidi: embed;\n white-space: pre-wrap;\n word-wrap: break-word;\n word-break: break-all;\n font-family: \"Source Code Pro\", \"Menlo\", monospace;;\n font-size: 13px;\n color: #555;\n margin-left: 4px;\n line-height: 19px;\n }\n</style>"]}}],"execution_count":0},{"cell_type":"code","source":["df = pd.read_csv(csv_file_path)\n# df = ks.read_csv(dbfs_csv_file_path) # Read data in Koalas to perform parallel transformation\ndf.head()\ndf.info()"],"metadata":{"application/vnd.databricks.v1+cell":{"title":"","showTitle":false,"inputWidgets":{},"nuid":"faa0142f-7e3f-4557-bca4-fcc9441d5aed"}},"outputs":[{"output_type":"display_data","metadata":{"application/vnd.databricks.v1+output":{"data":"","errorSummary":"","metadata":{},"errorTraceType":null,"type":"ipynbError","arguments":{}}},"output_type":"display_data","data":{"text/html":["<style scoped>\n .ansiout {\n display: block;\n unicode-bidi: embed;\n white-space: pre-wrap;\n word-wrap: break-word;\n word-break: break-all;\n font-family: \"Source Code Pro\", \"Menlo\", monospace;;\n font-size: 13px;\n color: #555;\n margin-left: 4px;\n line-height: 19px;\n }\n</style>"]}}],"execution_count":0},{"cell_type":"code","source":["import time\n\nimport lightgbm as lgb\n\nfrom sklearn.metrics import log_loss, accuracy_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\n\n# define functions\ndef preprocess_data(df):\n X = df[['mean_radius','mean_texture','mean_perimeter','mean_area','mean_smoothness']]\n y = df['diagnosis']\n\n enc = LabelEncoder()\n y = enc.fit_transform(y)\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.3, random_state=16\n )\n\n return X_train, X_test, y_train, y_test, enc\n\n\ndef train_model(params, num_boost_round, X_train, X_test, y_train, y_test):\n t1 = time.time()\n train_data = lgb.Dataset(X_train, label=y_train)\n test_data = lgb.Dataset(X_test, label=y_test)\n model = lgb.train(\n params,\n train_data,\n num_boost_round=num_boost_round,\n valid_sets=[test_data],\n valid_names=[\"test\"],\n )\n t2 = time.time()\n\n return model, t2 - t1\n\n\ndef evaluate_model(model, X_test, y_test):\n y_proba = model.predict(X_test)\n y_pred = y_proba.argmax(axis=1)\n loss = log_loss(y_test, y_proba)\n acc = accuracy_score(y_test, y_pred)\n\n return loss, acc"],"metadata":{"application/vnd.databricks.v1+cell":{"title":"","showTitle":false,"inputWidgets":{},"nuid":"86fb076f-c1b2-4871-9b3c-1e64a597641f"}},"outputs":[{"output_type":"display_data","metadata":{"application/vnd.databricks.v1+output":{"data":"","errorSummary":"","metadata":{},"errorTraceType":null,"type":"ipynbError","arguments":{}}},"output_type":"display_data","data":{"text/html":["<style scoped>\n .ansiout {\n display: block;\n unicode-bidi: embed;\n white-space: pre-wrap;\n word-wrap: break-word;\n word-break: break-all;\n font-family: \"Source Code Pro\", \"Menlo\", monospace;;\n font-size: 13px;\n color: #555;\n margin-left: 4px;\n line-height: 19px;\n }\n</style>"]}}],"execution_count":0},{"cell_type":"code","source":["from sklearn.metrics import accuracy_score, log_loss\n\n# preprocess data\nX_train, X_test, y_train, y_test, enc = preprocess_data(df)\n\n# set training parameters\nparams = {\n \"objective\": \"multiclass\",\n \"num_class\": 2,\n \"learning_rate\": 0.1,\n \"metric\": \"multi_logloss\",\n \"colsample_bytree\": 1.0,\n \"subsample\": 1.0,\n \"seed\": 16,\n}\n\nnum_boost_round = 32\n\nwith mlflow.start_run() as run:\n # enable automatic logging\n mlflow.lightgbm.autolog()\n\n # train model\n model, train_time = train_model(\n params, num_boost_round, X_train, X_test, y_train, y_test\n )\n mlflow.log_metric(\"training_time\", train_time)\n\n # evaluate model\n loss, acc = evaluate_model(model, X_test, y_test)\n mlflow.log_metrics({\"loss\": loss, \"accuracy\": acc})"],"metadata":{"application/vnd.databricks.v1+cell":{"title":"","showTitle":false,"inputWidgets":{},"nuid":"c4c39b9c-17d4-408d-8393-4b94bcf05ac2"}},"outputs":[{"output_type":"display_data","metadata":{"application/vnd.databricks.v1+output":{"data":"","errorSummary":"","metadata":{},"errorTraceType":null,"type":"ipynbError","arguments":{}}},"output_type":"display_data","data":{"text/html":["<style scoped>\n .ansiout {\n display: block;\n unicode-bidi: embed;\n white-space: pre-wrap;\n word-wrap: break-word;\n word-break: break-all;\n font-family: \"Source Code Pro\", \"Menlo\", monospace;;\n font-size: 13px;\n color: #555;\n margin-left: 4px;\n line-height: 19px;\n }\n</style>"]}}],"execution_count":0},{"cell_type":"markdown","source":["# Option 1: Without saving the files or creating the score.py file. Deploy the latest trained model based on the run_id"],"metadata":{"application/vnd.databricks.v1+cell":{"title":"","showTitle":false,"inputWidgets":{},"nuid":"39001ba6-d375-4f3d-825a-5af24c567309"}}},{"cell_type":"code","source":["# get latest completed run of the training\nruns_df = mlflow.search_runs()\nruns_df = runs_df.loc[runs_df[\"status\"] == \"FINISHED\"]\nruns_df = runs_df.sort_values(by=\"end_time\", ascending=False)\nprint(runs_df.head())\nrun_id = runs_df.at[0, \"run_id\"]\n\naks_deploy_config = AksWebservice.deploy_configuration(\n compute_target_name=aks_compute_name,\n cpu_cores=1,\n memory_gb=1,\n tags={\"data\": \"kaggle\", \"method\": \"lightgbm\"},\n description=\"Sample LightGBM using kaggle data\",\n)\n\n# Note: This will create seperate service eveytime you execute, keep service_name same to update existing deployment\nwebservice, azure_model = mlflow.azureml.deploy(\n model_uri=f\"runs:/{run_id}/model\",\n workspace=workspace,\n deployment_config=aks_deploy_config,\n service_name=\"sample-lightgbm-\" + str(randint(10000, 99999)), \n model_name=experiment_name,\n synchronous=False)"],"metadata":{"application/vnd.databricks.v1+cell":{"title":"","showTitle":false,"inputWidgets":{},"nuid":"a5f2d9bc-7994-4a77-9998-140043384c44"}},"outputs":[{"output_type":"display_data","metadata":{"application/vnd.databricks.v1+output":{"data":"","errorSummary":"","metadata":{},"errorTraceType":null,"type":"ipynbError","arguments":{}}},"output_type":"display_data","data":{"text/html":["<style scoped>\n .ansiout {\n display: block;\n unicode-bidi: embed;\n white-space: pre-wrap;\n word-wrap: break-word;\n word-break: break-all;\n font-family: \"Source Code Pro\", \"Menlo\", monospace;;\n font-size: 13px;\n color: #555;\n margin-left: 4px;\n line-height: 19px;\n }\n</style>"]}}],"execution_count":0},{"cell_type":"code","source":["webservice.wait_for_deployment(show_output=True)\nwebservice.scoring_uri"],"metadata":{"application/vnd.databricks.v1+cell":{"title":"","showTitle":false,"inputWidgets":{},"nuid":"c2fa0fca-9504-484f-bfc0-248f85c4d704"}},"outputs":[{"output_type":"display_data","metadata":{"application/vnd.databricks.v1+output":{"data":"","errorSummary":"","metadata":{},"errorTraceType":null,"type":"ipynbError","arguments":{}}},"output_type":"display_data","data":{"text/html":["<style scoped>\n .ansiout {\n display: block;\n unicode-bidi: embed;\n white-space: pre-wrap;\n word-wrap: break-word;\n word-break: break-all;\n font-family: \"Source Code Pro\", \"Menlo\", monospace;;\n font-size: 13px;\n color: #555;\n margin-left: 4px;\n line-height: 19px;\n }\n</style>"]}}],"execution_count":0},{"cell_type":"markdown","source":["# Option 2: Manually export the model and deploy with custom score.py"],"metadata":{"application/vnd.databricks.v1+cell":{"title":"","showTitle":false,"inputWidgets":{},"nuid":"cdb1195a-ce34-4b66-8ab6-a2e67f314a9f"}}},{"cell_type":"code","source":["import joblib\nfrom azureml.core import Model\nfrom azureml.core.resource_configuration import ResourceConfiguration\n\njoblib.dump(model,'model_pkl_file.pkl')\nazureml_model = Model.register(workspace=workspace,\n model_name='my-lightgbm-model', # Name of the registered model in your workspace.\n model_path='./model_pkl_file.pkl', # Local file to upload and register as a model.\n model_framework=Model.Framework.CUSTOM, # Framework used to create the model.\n model_framework_version=lgb.__version__, # Version of lightgbm used to create the model.\n resource_configuration=ResourceConfiguration(cpu=1, memory_in_gb=0.5),\n description='Sample Model',\n tags={'area': 'azureml', 'type': 'databricks notebook'})\n\nprint('Name:', azureml_model.name)\nprint('Version:', azureml_model.version)"],"metadata":{"application/vnd.databricks.v1+cell":{"title":"","showTitle":false,"inputWidgets":{},"nuid":"51a97b19-83a9-46e5-a599-0f5bca8e0806"}},"outputs":[{"output_type":"display_data","metadata":{"application/vnd.databricks.v1+output":{"data":"","errorSummary":"","metadata":{},"errorTraceType":null,"type":"ipynbError","arguments":{}}},"output_type":"display_data","data":{"text/html":["<style scoped>\n .ansiout {\n display: block;\n unicode-bidi: embed;\n white-space: pre-wrap;\n word-wrap: break-word;\n word-break: break-all;\n font-family: \"Source Code Pro\", \"Menlo\", monospace;;\n font-size: 13px;\n color: #555;\n margin-left: 4px;\n line-height: 19px;\n }\n</style>"]}}],"execution_count":0},{"cell_type":"code","source":["score_py = \"\"\"import joblib\nimport numpy as np\nimport os\n\nfrom inference_schema.schema_decorators import input_schema, output_schema\nfrom inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType\n\n\n# The init() method is called once, when the web service starts up.\n#\n# Typically you would deserialize the model file, as shown here using joblib,\n# and store it in a global variable so your run() method can access it later.\ndef init():\n global model\n\n # The AZUREML_MODEL_DIR environment variable indicates\n # a directory containing the model file you registered.\n model_filename = 'model.pkl'\n model_path = os.path.join(os.environ['AZUREML_MODEL_DIR'],'model',model_filename)\n model = joblib.load(model_path)\n\n\n# The run() method is called each time a request is made to the scoring API.\n#\n# Shown here are the optional input_schema and output_schema decorators\n# from the inference-schema pip package. Using these decorators on your\n# run() method parses and validates the incoming payload against\n# the example input you provide here. This will also generate a Swagger\n# API document for your web service.\n@input_schema('data', NumpyParameterType(np.array([[0.1, 1.2, 2.3, 3.4, 4.5]])))\n@output_schema(NumpyParameterType(np.array([0.1,1.2])))\ndef run(data):\n # Use the model object loaded by init().\n result = model.predict(data)\n\n # You can return any JSON-serializable object.\n return result.tolist()\n\"\"\"\n\nwith open('score.py','w') as f:\n f.write(score_py)"],"metadata":{"application/vnd.databricks.v1+cell":{"title":"","showTitle":false,"inputWidgets":{},"nuid":"d82a3582-0935-4d10-8549-ee93a24bfee4"}},"outputs":[{"output_type":"display_data","metadata":{"application/vnd.databricks.v1+output":{"data":"","errorSummary":"","metadata":{},"errorTraceType":null,"type":"ipynbError","arguments":{}}},"output_type":"display_data","data":{"text/html":["<style scoped>\n .ansiout {\n display: block;\n unicode-bidi: embed;\n white-space: pre-wrap;\n word-wrap: break-word;\n word-break: break-all;\n font-family: \"Source Code Pro\", \"Menlo\", monospace;;\n font-size: 13px;\n color: #555;\n margin-left: 4px;\n line-height: 19px;\n }\n</style>"]}}],"execution_count":0},{"cell_type":"code","source":["from azureml.core.model import InferenceConfig\nfrom azureml.core.webservice import Webservice, AksWebservice\nfrom azureml.core.environment import Environment,CondaDependencies\nfrom azureml.core.model import InferenceConfig, Model\nfrom azureml.core.webservice.aks import AksServiceDeploymentConfiguration\n\n# Choosing AzureML-Minimal and customizing as required ( see the last cell to display available environments)\nenv = Environment.get(workspace=workspace,name=\"AzureML-lightgbm-3.2-ubuntu18.04-py37-cpu\")\ncurated_clone = env.clone(\"customize_curated\")\nenv.inferencing_stack_version=\"latest\"\n\nconda_dep_pkgs=['joblib']\npip_pkgs=['azureml-defaults', 'inference-schema']\nconda_dep = CondaDependencies()\n\n# Install additional packages as required\nfor conda_dep_pkg in conda_dep_pkgs:\n conda_dep.add_conda_package(conda_package=conda_dep_pkg)\n\nfor pip_pkg in pip_pkgs:\n conda_dep.add_pip_package(pip_package=pip_pkg)\n\ncurated_clone.python.conda_dependencies=conda_dep\n\nprod_webservice_name = \"lightgbm-model-prod\"\nprod_webservice_deployment_config = AksWebservice.deploy_configuration()\n\n# NOTE: score.py is created in the previous cell and save to driver local path\ninference_config = InferenceConfig(entry_script='score.py', environment=curated_clone)\n\nservice = Model.deploy(workspace=workspace,\n name=prod_webservice_name,\n models=[azureml_model],\n inference_config=inference_config,\n deployment_config=prod_webservice_deployment_config,\n deployment_target = aks_target,\n overwrite=True)\n"],"metadata":{"application/vnd.databricks.v1+cell":{"title":"","showTitle":false,"inputWidgets":{},"nuid":"dc557152-1579-4642-a077-cdbde9c29f5d"}},"outputs":[{"output_type":"display_data","metadata":{"application/vnd.databricks.v1+output":{"data":"","errorSummary":"","metadata":{},"errorTraceType":null,"type":"ipynbError","arguments":{}}},"output_type":"display_data","data":{"text/html":["<style scoped>\n .ansiout {\n display: block;\n unicode-bidi: embed;\n white-space: pre-wrap;\n word-wrap: break-word;\n word-break: break-all;\n font-family: \"Source Code Pro\", \"Menlo\", monospace;;\n font-size: 13px;\n color: #555;\n margin-left: 4px;\n line-height: 19px;\n }\n</style>"]}}],"execution_count":0},{"cell_type":"code","source":["print(service.get_logs())"],"metadata":{"application/vnd.databricks.v1+cell":{"title":"","showTitle":false,"inputWidgets":{},"nuid":"8d3c17e6-a8ce-426e-816d-a7a13df3cd57"}},"outputs":[{"output_type":"display_data","metadata":{"application/vnd.databricks.v1+output":{"data":"","errorSummary":"","metadata":{},"errorTraceType":null,"type":"ipynbError","arguments":{}}},"output_type":"display_data","data":{"text/html":["<style scoped>\n .ansiout {\n display: block;\n unicode-bidi: embed;\n white-space: pre-wrap;\n word-wrap: break-word;\n word-break: break-all;\n font-family: \"Source Code Pro\", \"Menlo\", monospace;;\n font-size: 13px;\n color: #555;\n margin-left: 4px;\n line-height: 19px;\n }\n</style>"]}}],"execution_count":0},{"cell_type":"code","source":["model.predict(np.array([[0.1, 1.2, 2.3, 3.4, 4.5]]))"],"metadata":{"application/vnd.databricks.v1+cell":{"title":"","showTitle":false,"inputWidgets":{},"nuid":"8c385f9f-967e-496c-b087-04bffc00d204"}},"outputs":[{"output_type":"display_data","metadata":{"application/vnd.databricks.v1+output":{"data":"","errorSummary":"","metadata":{},"errorTraceType":null,"type":"ipynbError","arguments":{}}},"output_type":"display_data","data":{"text/html":["<style scoped>\n .ansiout {\n display: block;\n unicode-bidi: embed;\n white-space: pre-wrap;\n word-wrap: break-word;\n word-break: break-all;\n font-family: \"Source Code Pro\", \"Menlo\", monospace;;\n font-size: 13px;\n color: #555;\n margin-left: 4px;\n line-height: 19px;\n }\n</style>"]}}],"execution_count":0}],"metadata":{"application/vnd.databricks.v1+notebook":{"notebookName":"azureml-lightgbm","dashboards":[],"notebookMetadata":{"pythonIndentUnit":2},"language":"python","widgets":{},"notebookOrigID":2263252723270257}},"nbformat":4,"nbformat_minor":0}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment