Skip to content

Instantly share code, notes, and snippets.

View hajowieland's full-sized avatar

Hans-Jörg Wieland hajowieland

View GitHub Profile
@hajowieland
hajowieland / macos-live-recognition.sh
Created May 18, 2025 18:33
macos-webcam-live-recognition
#!/usr/bin/env bash
mediamtx &
ffmpeg -f avfoundation -use_wallclock_as_timestamps 1 -framerate 30 -video_size 1280x720 -i "0" -vcodec libx264 -preset ultrafast -tune zerolatency -pix_fmt yuv420p -f rtsp -rtsp_transport tcp rtsp://127.0.0.1:8554/mystream &
yolo pose predict model=yolo11n-pose.pt source='rtsp://127.0.0.1:8554/mystream' show
@hajowieland
hajowieland / lama2-70b-chat.yaml
Last active September 8, 2023 23:17
llama2-70b-chat-ggml.yaml
backend: llama
context_size: 4096
f16: true
gpu_layers: 83
mmap: true
name: llama2-70b-chat
parameters:
model: llama-2-70b-chat.ggmlv3.q4_0.bin
temperature: 0.2
top_k: 80
@hajowieland
hajowieland / lama2-13b-chat.yaml
Created September 8, 2023 22:42
lama2-13b-chat-ggml.yaml
backend: llama
context_size: 4096
f16: true
gpu_layers: 43
mmap: true
name: llama2-13b-chat
parameters:
model: llama-2-13b-chat.ggmlv3.q4_0.bin
temperature: 0.2
top_k: 80
@hajowieland
hajowieland / download_models.sh
Last active February 23, 2024 19:35
download_models.sh
#!/bin/bash
cd /models || exit
# Define available models
declare -a available_models=("codellama-34b-instruct.Q4_0.gguf" "llama-2-13b-chat.ggmlv3.q4_0.bin" "llama-2-13b-chat.Q4_0.gguf" "llama-2-70b-chat.ggmlv3.q4_0.bin" "llama-2-70b-chat.Q4_0.gguf")
# Display available models to user
echo "Available LLM models:"
for model in "${available_models[@]}"; do
@hajowieland
hajowieland / llama2-chat-message.tmpl
Created September 8, 2023 13:33
llama2-chat-message.tmpl
{{if eq .RoleName "assistant"}}{{.Content}}{{else}}
[INST]
{{if eq .RoleName "system"}}<<SYS>>{{.Content}}<</SYS>>{{else if and (.SystemPrompt) (eq .MessageIndex 0)}}<<SYS>>{{.SystemPrompt}}<</SYS>>{{end}}
{{if .Content}}{{.Content}}{{end}}
[/INST]
{{end}}
@hajowieland
hajowieland / codellama-34b-instruct.yaml
Last active September 8, 2023 22:04
codellama-34b-instruct.yaml
backend: llama
context_size: 4096
f16: true
gpu_layers: 43
mmap: true
name: codellama-34b-instruct
parameters:
model: codellama-34b-instruct.Q4_0.gguf
temperature: 0.2
top_k: 80
@hajowieland
hajowieland / llama2-70b-chat.yaml
Last active September 8, 2023 23:14
llama2-70b-chat.yaml
backend: llama
context_size: 4096
f16: true
gpu_layers: 43
ngqa: 8
mmap: true
name: llama2-13b-chat
parameters:
model: llama-2-70b-chat.Q4_0.gguf
temperature: 0.2
@hajowieland
hajowieland / llama2-13b-chat.yaml
Last active September 8, 2023 22:01
llama2-13b-chat.yaml
backend: llama
context_size: 4096
f16: true
gpu_layers: 43
mmap: true
name: llama2-13b-chat
parameters:
model: llama-2-13b-chat.ggmlv3.q4_0.bin
temperature: 0.2
top_k: 80
apiVersion: v1
kind: ServiceAccount
metadata:
name: echoserver
namespace: hajo
labels:
app: echoserver
owner: hajo
---
apiVersion: rbac.authorization.k8s.io/v1
@hajowieland
hajowieland / custom-eventing-1.7.3.yaml
Created October 13, 2022 12:45
eventing.yaml 1.7.3 without mt-broker resources
---
# eventing-core.yaml
# Copyright 2018 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0