- Engine: text-davinci-003 (also works in text-davinci-002, but might require more instructions to get a correct JSON back)
- Temperature: 0.7
You must extract the following information from the phone conversation below:
{ | |
"@odata.type": "#Microsoft.Skills.Text.V3.SentimentSkill", | |
"name": "#4", | |
"description": null, | |
"context": "/document/transcript/*", | |
"defaultLanguageCode": "en", | |
"modelVersion": null, | |
"includeOpinionMining": false, | |
"inputs": [ | |
{ |
<?xml version="1.0" encoding="utf-8"?> | |
<lexicon xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.w3.org/2005/01/pronunciation-lexicon http://www.w3.org/TR/2007/CR-pronunciation-lexicon-20071212/pls.xsd" version="1.0" alphabet="ipa" xml:lang="de-DE" xmlns="http://www.w3.org/2005/01/pronunciation-lexicon"> | |
<lexeme> | |
<grapheme>passieren</grapheme> | |
<phoneme>paˈsiːʁən</phoneme> | |
</lexeme> | |
<lexeme> | |
<grapheme>Reserve</grapheme> | |
<phoneme>ʁeˈzɛʁvə</phoneme> | |
</lexeme> |
import azure.cognitiveservices.speech as speechsdk | |
import requests | |
# Access key to Speech API from Azure (this needs to be stored in the backend and show not get to the client) | |
access_key = 'xxxxxxxxxxxx' | |
region = "westeurope" | |
# This method should run in the backend, so that access_key is not needed in the client | |
def get_auth_token(access_key): | |
fetch_token_url = f"https://{region}.api.cognitive.microsoft.com/sts/v1.0/issueToken" |
import requests | |
import io | |
import logging | |
import threading | |
import time | |
import concurrent.futures | |
import Levenshtein as lev | |
from datetime import datetime |
import requests | |
import azure.cognitiveservices.speech as speechsdk | |
# This code should run in the backend of the mobile application | |
headers = { | |
'Ocp-Apim-Subscription-Key': '<paste your code here>' | |
} | |
token_url = 'https://speechapicstest.cognitiveservices.azure.com/sts/v1.0/issuetoken' | |
response = requests.post(token_url, headers=headers) |
# This example shows how the real-time Speech API can be used to snythesize audio files that are longer than 10 minutes | |
import azure.cognitiveservices.speech as speechsdk | |
speech_key, service_region = "xxxxxxxxxxx", "westeurope" | |
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region) | |
speech_config.speech_synthesis_voice_name = "de-DE-KatjaNeural" | |
file_name = "outputaudio.wav" |
You must extract the following information from the phone conversation below:
import os | |
import openai | |
from dotenv import load_dotenv | |
from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, LLMPredictor, PromptHelper | |
from langchain.llms import AzureOpenAI | |
from langchain.embeddings import OpenAIEmbeddings | |
from llama_index import LangchainEmbedding | |
# Load env variables (create .env with OPENAI_API_KEY and OPENAI_API_BASE) | |
load_dotenv() |
import os | |
import openai | |
from dotenv import load_dotenv | |
from langchain.llms import AzureOpenAI | |
from langchain.embeddings import OpenAIEmbeddings | |
# Load environment variables (set OPENAI_API_KEY, OPENAI_API_BASE, and OPENAI_API_VERSION in .env) | |
load_dotenv() | |
# Configure OpenAI API |
import os | |
import openai | |
from dotenv import load_dotenv | |
from langchain.llms import AzureOpenAI | |
from langchain.embeddings import OpenAIEmbeddings | |
# Load environment variables (set OPENAI_API_KEY and OPENAI_API_BASE in .env) | |
load_dotenv() | |
# Configure OpenAI API |