This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| def extract_arguments(json_str): | |
| json_str = json_str.replace("'", '"') | |
| start_index = json_str.find('"arguments":') + len('"arguments":') | |
| start_of_json = json_str.find("{", start_index) | |
| end_of_json = json_str.rfind("}") | |
| if start_of_json != -1 and end_of_json != -1: | |
| extracted = json_str[start_of_json:end_of_json] | |
| if (extracted.startswith("'") and extracted.endswith("'")) or ( |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments,BitsAndBytesConfig | |
| from datasets import load_dataset | |
| model_name ="meta-llama/Meta-Llama-3-8B-Instruct" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| dataset = load_dataset("glaiveai/glaive-function-calling-v2",split="train") | |
| def formatting_prompts_func(example): | |
| output_texts = [] |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| ```sh | |
| Initial Setup: | |
| +-------------------+ +---------------+ | |
| | Text Sequence | | Raw Images | | |
| | [T1, <IMG>, T2, | | [Image1, | | |
| | T3, <IMG>, T4] | | Image2] | | |
| +-------------------+ +---------------+ | |
| Step 1: Convert Text and <IMG> Tokens to Embeddings | |
| +---------------------------------------------------------+ |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| function escapeShellArg(str) { | |
| return "'" + str.replace(/'/g, "'\\''") + "'"; | |
| } | |
| const removeBackticks = (str) => { | |
| // remove leading backticks | |
| str = str.replace(/^(```\n|```)/g, ""); | |
| // remove tailing backticks and everything after | |
| const index = str.lastIndexOf("```"); |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| [ | |
| { | |
| "name": "TheBloke/dolphin-2.1-mistral-7B-GPTQ", | |
| "displayName": "TheBloke/dolphin-2.1-mistral-7B-GPTQ", | |
| "description": "Mistral 7B is a new Apache 2.0 model, released by Mistral AI that outperforms Llama2 13B in benchmarks.", | |
| "websiteUrl": "https://huggingface.co/ehartford/dolphin-2.1-mistral-7b", | |
| "userMessageToken": "<|im_start|>user\n", | |
| "userMessageEndToken": "<|im_end|>\n", | |
| "assistantMessageToken": "<|im_start|>assistant\n", | |
| "assistantMessageEndToken": "<|im_end|>\n", |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/bin/bash | |
| # Define the repository and model details | |
| REPO_URL="git@github.com:ggerganov/llama.cpp.git" | |
| REPO_DIR="llama.cpp" | |
| MODEL_URL="https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/resolve/main/mistral-7b-instruct-v0.1.Q5_K_M.gguf" | |
| MODEL_FILE="mistral-7b-instruct-v0.1.Q5_K_M.gguf" | |
| # Clone the repository if it doesn't already exist | |
| if [ ! -d "$REPO_DIR" ]; then |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| function escapeShellArg(str) { | |
| return "'" + str.replace(/'/g, "'\\''") + "'"; | |
| } | |
| const removeBackticks = (str) => { | |
| // remove leading backticks | |
| str = str.replace(/^(```\n|```)/g, ''); | |
| // remove tailing backticks and everything after | |
| const index = str.lastIndexOf('```'); |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import os | |
| from transformers import AutoTokenizer, TextGenerationPipeline | |
| from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig | |
| import numpy as np | |
| import torch | |
| import torch.nn as nn | |
| pretrained_model_dir = './merged_models' | |
| quantized_model_dir = './models/CodeLlama-34b-guanaco-gptq' |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| from peft import PeftModel | |
| import torch | |
| import os | |
| import argparse | |
| def get_args(): | |
| parser = argparse.ArgumentParser() | |
| parser.add_argument("--base_model_name_or_path", type=str) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import os | |
| import torch | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments,BitsAndBytesConfig | |
| from datasets import load_dataset | |
| from trl import SFTTrainer | |
| from peft import AutoPeftModelForCausalLM, LoraConfig, get_peft_model, prepare_model_for_kbit_training | |
| from utils import find_all_linear_names, print_trainable_parameters | |
| output_dir="./results" |