Skip to content

Instantly share code, notes, and snippets.

View idontcalculate's full-sized avatar
🫘
magic

Sirius1389 idontcalculate

🫘
magic
View GitHub Profile
@bot.command()
async def dogsplain(ctx, *, question: str):
print(f"User: {ctx.author.name}, Query: {question}")
try:
response = chat_bot.query(question)
await send_response(ctx, response)
except Exception as e:
await send_response(ctx, "An error occurred. Please try again!")
print("Error occurred during 'query' command:", e)
import llama_index
from llama_index.tools import QueryEngineTool, ToolMetadata
from llama_index import (
SimpleDirectoryReader,
VectorStoreIndex,
StorageContext,
load_index_from_storage,
)
try:
query_engine_tools = [
QueryEngineTool(
query_engine=tesla_engine,
metadata=ToolMetadata(
name="tesla_tool",
description=(
"Provides information about Teslas predictions for future "
"Use a detailed plain text question as input to the tool."
),
),
from llama_index.agent import OpenAIAgent
from llama_index import load_index_from_storage, StorageContext
from llama_index.node_parser import SentenceSplitter
# Initialize the SentenceSplitter node parser
node_parser = SentenceSplitter()
#load documents and build vector index
for idx, patent_title in enumerate(patent_titles):
file_path = os.path.join(patents_dir, f"{patent_title}.txt")
# Create vector_index instance
vector_index = VectorStoreIndex(nodes, service_context=service_context)
# Build the summary index
summary_index = SummaryIndex(nodes, service_context=service_context)
# Now you can safely define query engines since vector_index is defined
vector_query_engine = vector_index.as_query_engine()
summary_query_engine = summary_index.as_query_engine()
from llama_index.agent import FnRetrieverOpenAIAgent
from llama_index.llms import OpenAI
# Initialize the LLM
llm = OpenAI(model="gpt-3.5-turbo-0613")
# Initialize the FnRetrieverOpenAIAgent
top_agent = FnRetrieverOpenAIAgent.from_retriever(
obj_index.as_retriever(similarity_top_k=4),
system_prompt=""" \
@idontcalculate
idontcalculate / vgg.py
Created May 17, 2024 19:15
example of transfer learning with tf hub models
from tensorflow.keras.applications import VGG16
from tensorflow.keras import layers, models
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.metrics import BinaryAccuracy
# Load the pre-trained VGG16 model without the top layer
pretrained = VGG16(input_shape=(256, 256, 3), include_top=False, weights="imagenet")
pretrained.trainable = False
import tensorflow as tf
from tensorflow.keras.applications import VGG16
from tensorflow.keras import layers, models, optimizers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import EarlyStopping
# Load the pre-trained VGG16 model without the top layer
base_model = VGG16(input_shape=(256, 256, 3), include_top=False, weights='imagenet')
# Freeze the convolutional base
@idontcalculate
idontcalculate / vggtunedgr.py
Created May 17, 2024 20:07
gradio wrapper around predictions
import tensorflow as tf
from tensorflow.keras.models import load_model
import gradio as gr
import numpy as np
from PIL import Image
# Load the saved model
model = load_model("modelVGG16.h5")
# Define the prediction function
from qdrant_client import QdrantClient
from qdrant_client.http.models import Distance, VectorParams
from config import QDRANT_API_KEY, QDRANT_URL
def get_qdrant_client():
qdrant_client = QdrantClient(
url=QDRANT_URL,
api_key=QDRANT_API_KEY,
)
# Ensure the collection exists