# move to home directory
cd ~
# move the .zsh_history file into another .zsh_history_bad file
mv .zsh_history .zsh_history_bad
# write all printable strings into a new .zsh_history file
strings .zsh_history_bad > .zsh_history
llmc() { | |
local system_prompt='Output a command that I can run in a ZSH terminal on macOS to accomplish the following task. Try to make the command self-documenting, using the long version of flags where possible. Output the command first enclosed in a "```zsh" codeblock followed by a concise explanation of how it accomplishes it.' | |
local temp_file=$(mktemp) | |
local capturing=true | |
local command_buffer="" | |
local first_line=true | |
local cleaned_up=false # Flag to indicate whether cleanup has been run | |
cleanup() { | |
# Only run cleanup if it hasn't been done yet |
# [2023-05-25] Added Copilot Chat, and VSCode Insiders | |
# [2023-05-20] Add VSCode Insiders variant | |
# [2023-02-22] Added Copilot Labs | |
# Fix Github Co-pilot self-signed cert problem | |
# See: https://github.com/orgs/community/discussions/8866#discussioncomment-3517831 | |
# Note | |
# | |
# To make Github Copilot/Nightly/Labs/Chat work, you might need additional |
% df -h | |
Filesystem Size Used Avail Use% Mounted on | |
/dev/dm-1 46G 14G 31G 31% / | |
udev 10M 0 10M 0% /dev | |
tmpfs 2.3G 9.2M 2.3G 1% /run | |
tmpfs 5.8G 232K 5.8G 1% /dev/shm | |
tmpfs 5.0M 4.0K 5.0M 1% /run/lock | |
tmpfs 5.8G 0 5.8G 0% /sys/fs/cgroup | |
/dev/mapper/thinkbook-home 92G 55G 33G 63% /home | |
/dev/sda1 232M 35M 181M 16% /boot |
# logfilename [owner:group] mode count size when flags [/pid_file] [sig_num] | |
/Users/your-username/path-your-rails-project/log/*.log your-username:staff 644 4 * $D0 GJ | |
# NOTES | |
# | |
# Place file in /etc/newsyslog.d | |
# '$D0' under 'when' tells newsyslog to rotate logs daily at midnight. | |
# Alternatively you could use '24' for 'when', which would specify "every 24 hours" | |
# '*' under 'size' specifies that logs should be rotated regardless of their size. | |
# 'G' under 'flags' tells newsyslog that the 'logfilename' is a pattern and it should rotate all log files matching the pattern. |
#!/usr/bin/env bash | |
size=1024 # MB | |
mount_point=$HOME/tmp | |
name=$(basename "$mount_point") | |
usage() { | |
echo "usage: $(basename "$0") [mount | umount | remount | check | orphan]" \ | |
"(default: mount)" >&2 | |
} |
#!/bin/sh | |
# This program has two feature. | |
# | |
# 1. Create a disk image on RAM. | |
# 2. Mount that disk image. | |
# | |
# Usage: | |
# $0 <dir> <size> | |
# |
// #popclip extension for ChatGPT | |
// name: ChatGPT Quick Actions | |
// icon: iconify:logos:openai-icon | |
// language: javascript | |
// module: true | |
// entitlements: [network] | |
// options: [{ | |
// identifier: apikey, label: API Key, type: string, | |
// description: 'Obtain API key from https://platform.openai.com/account/api-keys' | |
// }] |
Our group recently acquired a new server to do some deep learning: a SuperMicro 4029GP-TRT2, stuffed with 8x NVidia RTX 2080 Ti. Though maybe a bit overpowered, with upcoming networks like BigGAN and fully 3D networks, as well as students joining our group, this machine will be used quite a lot in the future.
One challenge is, is how to manage these GPUs. There are many approaches, but given that most PhD candidates aren't sysadmins, these range from 'free-for-all', leading to one person hogging all GPUs for weeks due to a bug in the code, to Excel sheets that noone understands and noone adheres to because changing GPU ids in code is hard. This leads to a lot of frustration, low productivity and under-utilisation of these expensive servers. Another issue is conflicting software versions. TensorFlow and Keras, for example, tend to do breaking API changes every now and then. As t