Skip to content

Instantly share code, notes, and snippets.

View john-adeojo's full-sized avatar

John Adeojo john-adeojo

View GitHub Profile
query = "What's the best day to visit the Eiffel Tower for the best view of the city?"
query_1 = f"{query} Return an explanation alongside your response, and a description of the relevant weather conditions"
from sqlalchemy import create_engine
import openai
import pandas as pd
from langchain import OpenAI, SQLDatabase, SQLDatabaseChain
# load data in memeory to sql lite database
def load_data(df):
engine = create_engine('sqlite:///:memory:')
# Write the data to the SQLite database
<OpenAIObject chat.completion id=chatcmpl-7byJmyUNMNeYIIQhrBQ295u4JqM4C at 0x1f654544680> JSON: {
"id": "chatcmpl-7byJmyUNMNeYIIQhrBQ295u4JqM4C",
"object": "chat.completion",
"created": 1689284354,
"model": "gpt-4-0613",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
import openai
import pandas as pd
import json
def parse_location(query):
# OpenAI function calling
function_call = [
{
from geopy.geocoders import Nominatim
import pandas as pd
import requests
import uuid
# gets latitude and longitude
def get_lat_long(location):
geolocator = Nominatim(user_agent="my_application_" + str(uuid.uuid4()))
location = geolocator.geocode(location)
return (location.latitude, location.longitude)
import pandas as pd
import openai
from langchain import OpenAI, SQLDatabase, SQLDatabaseChain
# Please replace "C:/path/to/my_database.db" with the absolute path to your database
db = SQLDatabase.from_uri("sqlite:///C:/path/to/my_database.db")
openai_token = YOUR_API_KEY # Replace with your API secret key from openai
llm = OpenAI(temperature=0, verbose=True, openai_api_key=openai_token)
db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True, use_query_checker=True)
conda env create -f environment.yml
data_location: YOUR_DATA_STORE
model_yaml: "https://raw.githubusercontent.com/john-adeojo/Credit-Card-Fraud-Model-Registry/main/model%20yaml%20files/model_1a.yaml"
output_dir: "../data/06_models"
model_options:
seed: 42
test_size: 0.2
"""
This is a boilerplate pipeline 'train_model'
generated using Kedro 0.18.10
"""
from kedro.pipeline import Pipeline, node, pipeline
from .nodes import read_data, split_data, run_experiment, run_predictions, model_training_diagnostics
def create_pipeline(**kwargs) -> Pipeline:
return pipeline(
[
import pandas as pd
from sklearn.model_selection import train_test_split
# declare the other imports
# Helper functions
def get_latest_experiment_dir(base_dir):
# Logic to fetch latest directory
def delete_file():
# Logic to delete files