Skip to content

Instantly share code, notes, and snippets.

@HemachandranD
Last active March 8, 2024 18:31
Show Gist options
  • Save HemachandranD/2d985b63e947182c5a5740866207929f to your computer and use it in GitHub Desktop.
Save HemachandranD/2d985b63e947182c5a5740866207929f to your computer and use it in GitHub Desktop.
# Databricks notebook source
import os
import sys
import requests
import json
notebook_path = '/Workspace/' + os.path.dirname(dbutils.notebook.entry_point.getDbutils().notebook().getContext().notebookPath().get())
# COMMAND ----------
# Set the name of the MLflow endpoint
endpoint_name = dbutils.jobs.taskValues.get("Train", "model_name", debugValue="")
# Name of the registered MLflow model
model_name = dbutils.jobs.taskValues.get("Train", "model_name", debugValue="")
# Get the latest version of the MLflow model
model_version = dbutils.jobs.taskValues.get("Train", "model_version", debugValue="")
# Specify the type of compute (CPU, GPU_SMALL, GPU_MEDIUM, etc.)
workload_type = "CPU"
# Specify the scale-out size of compute (Small, Medium, Large, etc.)
workload_size = "Small"
# Get the API endpoint and token for the current notebook context
API_ROOT = dbutils.notebook.entry_point.getDbutils().notebook().getContext().apiUrl().get()
API_TOKEN = dbutils.notebook.entry_point.getDbutils().notebook().getContext().apiToken().get()
# COMMAND ----------
data={
"name": endpoint_name,
"config": {
"served_entities": [
{
"name": f"{model_name}-{model_version}",
"entity_name": model_name,
"entity_version": model_version,
"workload_size": workload_size,
"scale_to_zero_enabled": True
}
]
},
"tags": [
{
"key": "team",
"value": "MLOps"
}
]
}
headers = {"Context-Type": "text/json", "Authorization": f"Bearer {API_TOKEN}"}
response = requests.post(
url=f"{API_ROOT}/api/2.0/serving-endpoints", json=data, headers=headers
)
print(json.dumps(response.json(), indent=4))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment