Skip to content

Instantly share code, notes, and snippets.

View existeundelta's full-sized avatar

ExisteUnDelta existeundelta

  • Barcelona
View GitHub Profile
@existeundelta
existeundelta / agent.py
Created February 23, 2023 06:25 — forked from wiseman/agent.py
Langchain example: self-debugging
from io import StringIO
import sys
from typing import Dict, Optional
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents.tools import Tool
from langchain.llms import OpenAI
@existeundelta
existeundelta / finetune_llama_v2.py
Created July 19, 2023 17:12 — forked from younesbelkada/finetune_llama_v2.py
Fine tune Llama v2 models on Guanaco Dataset
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
@existeundelta
existeundelta / import.py
Created September 18, 2023 08:55 — forked from ajosh0504/import.py
Code snippets associated with the <BLOG_NAME> blog.
import elasticsearch import Elasticsearch
from pathlib import Path
from eland.ml.pytorch import PyTorchModel
from eland.ml.pytorch.transformers import TransformerModel
# Load the custom model
tm = TransformerModel("model", "text_classification")
# Export the model to a TorchScript representation which Elasticsearch uses
tmp_path = "models"
@existeundelta
existeundelta / sft_trainer.py
Created October 5, 2023 19:55 — forked from lewtun/sft_trainer.py
Fine-tuning Mistral 7B with TRL & DeepSpeed ZeRO-3
# This is a modified version of TRL's `SFTTrainer` example (https://github.com/huggingface/trl/blob/main/examples/scripts/sft_trainer.py),
# adapted to run with DeepSpeed ZeRO-3 and Mistral-7B-V1.0. The settings below were run on 1 node of 8 x A100 (80GB) GPUs.
#
# Usage:
# - Install the latest transformers & accelerate versions: `pip install -U transformers accelerate`
# - Install deepspeed: `pip install deepspeed==0.9.5`
# - Install TRL from main: pip install git+https://github.com/huggingface/trl.git
# - Clone the repo: git clone github.com/huggingface/trl.git
# - Copy this Gist into trl/examples/scripts
# - Run from root of trl repo with: accelerate launch --config_file=examples/accelerate_configs/deepspeed_zero3.yaml --gradient_accumulation_steps 8 examples/scripts/sft_trainer.py