Skip to content

Instantly share code, notes, and snippets.

@jmaxhu
Last active December 27, 2024 01:34
Show Gist options
  • Save jmaxhu/df16ea7dbfeee7f0f2cb915fc7a67a01 to your computer and use it in GitHub Desktop.
Save jmaxhu/df16ea7dbfeee7f0f2cb915fc7a67a01 to your computer and use it in GitHub Desktop.
add dify support to realchar
# dify config
DIFY_API_KEY=
DIFY_BASE_URL=
# dify module realtime_ai_character/llm/dify/__init__.py
# realtime_ai_character/llm/dify/client.py
import requests
class DifyClient:
def __init__(self, api_key, base_url="https://api.dify.ai/v1"):
self.api_key = api_key
self.base_url = base_url
def _send_request(self, method, endpoint, json=None, params=None, stream=False):
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
url = f"{self.base_url}{endpoint}"
response = requests.request(method, url, json=json, params=params, headers=headers, stream=stream)
return response
def _send_request_with_files(self, method, endpoint, data, files):
headers = {
"Authorization": f"Bearer {self.api_key}"
}
url = f"{self.base_url}{endpoint}"
response = requests.request(method, url, data=data, headers=headers, files=files)
return response
def message_feedback(self, message_id, rating, user):
data = {
"rating": rating,
"user": user
}
return self._send_request("POST", f"/messages/{message_id}/feedbacks", data)
def get_application_parameters(self, user):
params = {"user": user}
return self._send_request("GET", "/parameters", params=params)
def file_upload(self, user, files):
data = {
"user": user
}
return self._send_request_with_files("POST", "/files/upload", data=data, files=files)
class CompletionClient(DifyClient):
def create_completion_message(self, inputs, response_mode, user, files=None):
data = {
"inputs": inputs,
"response_mode": response_mode,
"user": user,
"files": files
}
return self._send_request("POST", "/completion-messages", data,
stream=True if response_mode == "streaming" else False)
class ChatClient(DifyClient):
def create_chat_message(self, inputs, query, user, response_mode="blocking", conversation_id=None, files=None):
data = {
"inputs": inputs,
"query": query,
"user": user,
"response_mode": response_mode,
"files": files
}
if conversation_id:
data["conversation_id"] = conversation_id
return self._send_request("POST", "/chat-messages", data,
stream=True if response_mode == "streaming" else False)
def get_conversation_messages(self, user, conversation_id=None, first_id=None, limit=None):
params = {"user": user}
if conversation_id:
params["conversation_id"] = conversation_id
if first_id:
params["first_id"] = first_id
if limit:
params["limit"] = limit
return self._send_request("GET", "/messages", params=params)
def get_conversations(self, user, last_id=None, limit=None, pinned=None):
params = {"user": user, "last_id": last_id, "limit": limit, "pinned": pinned}
return self._send_request("GET", "/conversations", params=params)
def rename_conversation(self, conversation_id, name, user):
data = {"name": name, "user": user}
return self._send_request("POST", f"/conversations/{conversation_id}/name", data)
# realtime_ai_character/llm/dify_llm.py
import os
from typing import Optional
from dotenv import load_dotenv
import json
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.schema import BaseMessage, HumanMessage, Generation, LLMResult
from langchain.callbacks.manager import AsyncCallbackManager
from realtime_ai_character.llm.dify.client import ChatClient
from realtime_ai_character.llm.base import (
AsyncCallbackAudioHandler,
AsyncCallbackTextHandler,
LLM,
)
from realtime_ai_character.logger import get_logger
from realtime_ai_character.utils import Character, timed
logger = get_logger(__name__)
load_dotenv(override=True)
class DifyLLM(LLM):
def __init__(self):
self.api_key = os.getenv("DIFY_API_KEY", "")
self.base_url = os.getenv("DIFY_BASE_URL", "")
self.chat_client = ChatClient(self.api_key, self.base_url)
def get_config(self):
return {'base_url': self.base_url}
@timed
async def achat(
self,
history: list[BaseMessage],
user_input: str,
user_id: str,
character: Character,
callback: AsyncCallbackTextHandler,
audioCallback: Optional[AsyncCallbackAudioHandler] = None,
metadata: Optional[dict] = None,
*args,
**kwargs,
) -> str:
# Generate response
callbacks = [callback, StreamingStdOutCallbackHandler()]
if audioCallback is not None:
callbacks.append(audioCallback)
logger.info("call dify...")
response = self.chat_client.create_chat_message(inputs={}, query=user_input, user=user_id, response_mode='streaming')
response.raise_for_status()
callback_manager = AsyncCallbackManager.configure(callbacks, verbose=True, inheritable_metadata=metadata)
run_managers = await callback_manager.on_chat_model_start(
serialized={},
messages=[[HumanMessage(content=user_input)]]
)
generations = []
llm_output = {}
for line in response.iter_lines(decode_unicode=True):
line = str(line.split('data:',1)[-1]).strip()
if line:
data = json.loads(line)
event = data['event']
if event == 'message':
answer = data['answer']
# logger.info(f"data: {answer}")
generations.append(Generation(text=answer))
await run_managers[0].on_llm_new_token(answer)
if event == 'message_end':
result = LLMResult(generations=[generations], llm_output=llm_output)
await run_managers[0].on_llm_end(result)
return ''.join(generations)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment