This guide is adapted from this original post by Christopher Charles.
- Clone the MLX Swift Examples GitHub repository:
<!DOCTYPE html> | |
<html lang="en"> | |
<head> | |
<meta charset="UTF-8"> | |
<meta name="viewport" content="width=device-width, initial-scale=1.0"> | |
<title>P5.js Particle Animation</title> | |
<script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.4.0/p5.min.js"></script> | |
<style> | |
body { | |
margin: 0; |
#!/usr/bin/env bash | |
# Default values for percentages | |
DEFAULT_WIRED_LIMIT_PERCENT=85 | |
DEFAULT_WIRED_LWM_PERCENT=75 | |
# Read input parameters or use default values | |
WIRED_LIMIT_PERCENT=${1:-$DEFAULT_WIRED_LIMIT_PERCENT} | |
WIRED_LWM_PERCENT=${2:-$DEFAULT_WIRED_LWM_PERCENT} |
This guide is adapted from this original post by Christopher Charles.
Understand the Task: Grasp the main objective, goals, requirements, constraints, and expected output. | |
- Minimal Changes: If an existing prompt is provided, improve it only if it's simple. For complex prompts, enhance clarity and add missing elements without altering the original structure. | |
- Reasoning Before Conclusions: Encourage reasoning steps before any conclusions are reached. ATTENTION! If the user provides examples where the reasoning happens afterward, REVERSE the order! NEVER START EXAMPLES WITH CONCLUSIONS! | |
- Reasoning Order: Call out reasoning portions of the prompt and conclusion parts (specific fields by name). For each, determine the ORDER in which this is done, and whether it needs to be reversed. | |
- Conclusion, classifications, or results should ALWAYS appear last. | |
- Examples: Include high-quality examples if helpful, using placeholders [in brackets] for complex elements. | |
- What kinds of examples may need to be included, how many, and whether they are complex enough to benefit from p |
# A one liner to leverage the GPU on a mac to transcribe audio files | |
# Inspired by https://simonwillison.net/2024/Aug/13/mlx-whisper/ | |
llm_transcribe_recording () { | |
local file_path="$1" | |
python3 -c " | |
import mlx_whisper | |
result = mlx_whisper.transcribe('$file_path', path_or_hf_repo='mlx-community/distil-whisper-large-v3') | |
print(result['text']) | |
" | |
} |
import argparse | |
import numpy as np | |
import torch | |
import torch.nn as nn | |
import coremltools as ct | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
# When using float16, all predicted logits are 0. To be debugged. | |
compute_precision = ct.precision.FLOAT32 | |
compute_units = ct.ComputeUnit.CPU_ONLY |
// 3D Dom viewer, copy-paste this into your console to visualise the DOM as a stack of solid blocks. | |
// You can also minify and save it as a bookmarklet (https://www.freecodecamp.org/news/what-are-bookmarklets/) | |
(() => { | |
const SHOW_SIDES = false; // color sides of DOM nodes? | |
const COLOR_SURFACE = true; // color tops of DOM nodes? | |
const COLOR_RANDOM = false; // randomise color? | |
const COLOR_HUE = 190; // hue in HSL (https://hslpicker.com) | |
const MAX_ROTATION = 180; // set to 360 to rotate all the way round | |
const THICKNESS = 20; // thickness of layers | |
const DISTANCE = 10000; // ¯\\_(ツ)_/¯ |
llmc() { | |
local system_prompt='Output a command that I can run in a ZSH terminal on macOS to accomplish the following task. Try to make the command self-documenting, using the long version of flags where possible. Output the command first enclosed in a "```zsh" codeblock followed by a concise explanation of how it accomplishes it.' | |
local temp_file=$(mktemp) | |
local capturing=true | |
local command_buffer="" | |
local first_line=true | |
local cleaned_up=false # Flag to indicate whether cleanup has been run | |
cleanup() { | |
# Only run cleanup if it hasn't been done yet |
// | |
// VolumePopupView.swift | |
// | |
// Created by Alex Rosenberg on 1/24/24. | |
// | |
import SwiftUI | |
import AVFoundation | |
import MediaPlayer |
import json | |
import os | |
import ollama | |
def query_ollama(prompt, model='openhermes:7b-mistral-v2.5-q6_K', context=''): | |
response = ollama.generate( | |
model=model, | |
prompt=context + prompt) | |
return response['response'].strip() |