Created
March 29, 2023 22:33
-
-
Save dgosbell/66e3050f8f41e4474c10230a48657fc9 to your computer and use it in GitHub Desktop.
Update Tabular Model Descriptions from Azure OpenAI with rate limiting logic
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#r "System.Net.Http" | |
using System.Net.Http; | |
using System.Text; | |
using Newtonsoft.Json.Linq; | |
// you need to create a resource and deploy a model in Azure OpenAI | |
// to get the following 3 pieces of information | |
// see https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal | |
const string apiKey = "<your API key"; | |
const string resource_name = "<your resource name>"; | |
const string deployment_name = "<your deployment name>"; | |
const string openai_api_version = "2023-03-15-preview"; | |
string uri = string.Format("https://{0}.openai.azure.com/openai/deployments/{1}/completions?api-version={2}", resource_name, deployment_name, openai_api_version); | |
const string question = "Explain the following calculation in a few sentences in simple business terms without using DAX function names:\n\n"; | |
const int oneMinute = 60000; // the number of milliseconds in a minute | |
const int apiLimit = 300; // a free account is limited to 20 calls per minute, change this if you have a paid account | |
const bool dontOverwrite = true; // this prevents existing descriptions from being overwritten | |
using (var client = new HttpClient()) { | |
client.DefaultRequestHeaders.Clear(); | |
client.DefaultRequestHeaders.Add("api-key", apiKey); | |
int callCount = 0; | |
// if any measures are currently selected add those | |
// to our collection | |
List<Measure> myMeasures = new List<Measure>(); | |
myMeasures.AddRange( Selected.Measures ); | |
// if no measures were selected grab all of the | |
// measures in the model | |
if ( myMeasures.Count == 0) | |
{ | |
myMeasures.AddRange(Model.Tables.Where(t => t.Measures.Count() > 0).SelectMany(t => t.Measures)); | |
} | |
foreach ( var m in myMeasures) | |
{ | |
// if we are not overwriting existing descriptions then skip to the | |
// next measure if this one is not an empty string | |
if (dontOverwrite && !string.IsNullOrEmpty(m.Description)) {continue; } | |
// Only uncomment the following when running from the command line or the script will | |
// show a popup after each measure | |
//Info("Processing " + m.DaxObjectFullName) | |
//var body = new requestBody() { prompt = question + m.Expression }; | |
var body = | |
"{ \"prompt\": " + JsonConvert.SerializeObject(question + m.Expression ) + | |
",\"model\": \"text-davinci-003\" " + | |
",\"temperature\": 1 " + | |
",\"max_tokens\": 2048 " + | |
"}"; | |
var res = client.PostAsync(uri, new StringContent(body, Encoding.UTF8,"application/json")); | |
res.Result.EnsureSuccessStatusCode(); | |
var result = res.Result.Content.ReadAsStringAsync().Result; | |
var obj = JObject.Parse(result); | |
var desc = obj["choices"][0]["text"].ToString().Trim(); | |
m.Description = desc + "\n=====\n" + m.Expression; | |
callCount++; // increment the call count | |
if ( callCount % apiLimit == 0) System.Threading.Thread.Sleep( oneMinute ); | |
} | |
} |
You can change the model in the body of the request to use gpt-35-turbo if you have that deployed. I'm not sure what you mean by "with system messages, etc", but if these are different types of options you should also be able to set these through the request body.
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
@dgosbell is there a version of this that uses gpt-35-turbo with system messages, etc.?