Skip to content

Instantly share code, notes, and snippets.

View morganmcg1's full-sized avatar
💭
Trying to ML

Morgan McGuire morganmcg1

💭
Trying to ML
View GitHub Profile
import base64
from openai import OpenAI
import weave
import wave
client = OpenAI()
weave.init("audio-in-weave")
@weave.op()
@morganmcg1
morganmcg1 / gist:58e55e9f0abeb3f046ac851727a632ac
Created September 20, 2024 18:38
turn off wandb metadata logging
run = wandb.init(
settings={
"_disable_stats": True, # disable collecting system metrics
"_disable_meta": True, # disable collecting system metadata (including hardware info)
"console": "off", # disable capturing stdout/stderr
}
)
# From Unsloth Llama 3.1 fine-tuning notebook
from unsloth import FastLanguageModel
import torch
def print_gpu_stats():
gpu_stats = torch.cuda.get_device_properties(0)
start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
print(f"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
celebrities = [
"Tom Cruise",
"Leonardo DiCaprio",
"Brad Pitt",
"Jennifer Lawrence",
"Scarlett Johansson",
"Johnny Depp",
"Meryl Streep",
"Robert De Niro",
"Tom Hanks",
#!/bin/bash
# Download the Miniforge installer
wget https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-x86_64.sh
# Run the Miniforge installer
bash Miniforge3-Linux-x86_64.sh
# Install mamba in the base environment using conda
conda install mamba -n base -c conda-forge
@morganmcg1
morganmcg1 / moe_non_determinism.py
Created January 23, 2024 16:50
Non-determinism in GPT-3.5, GPT-4 and Mixtral
import os
import json
import tqdm
import wandb
from openai import OpenAI
from time import sleep
from pathlib import Path
import openai
import wandb
openai.api_key = "sk-..." # supply your API key however you choose
# https://platform.openai.com/docs/models
tbl = wandb.Table("my_table", columns=["input", "output", "temperature"])
query = "hello world"
systemp_prompts = "you are friendly"
@morganmcg1
morganmcg1 / expert_prompt
Last active July 11, 2023 19:32
chatgpt expert prompt
"Recently, this problem was solved correctly. Here is the answer which turned out to be perfectly correct.
Note how the answer is documented step-by-step in a way that uses complex reasoning.
The person who discovered this solution always showed how they arrived on the decision to
execute the most efficient possible choice about what to do next, and clearly relied on
error-free code, calculators & fact-checked outside data sources to provide perfectly accurate answers at every step."
---------------
Here's a prompt that works really well to get GPT-4 to shorten text: I often use it to make my tweets fit in 280 characters:
```
#!/usr/bin/env python
# coding: utf-8
# In this notebook we will automatically generate a set of evaluation questions based on wandb docs
import random
import wandb
import re
import openai
import os
@morganmcg1
morganmcg1 / wandb_prompts_lanarky_patch.py
Created May 19, 2023 12:15
Monkey patching Lanarky for WandbTracer
import langchain
import wandb
from typing import Any, Awaitable, Callable, Dict, Optional, Union
from fastapi.responses import StreamingResponse as _StreamingResponse
from langchain.chains.base import Chain
from starlette.background import BackgroundTask
from starlette.types import Send
from dotenv import load_dotenv