$ docker images
$ docker ps
#include "unimap_trans.h" | |
enum macro_id { | |
CPP_POINTER, | |
CPP_COMMENT, | |
}; | |
// L0, for remapped X keycodes | |
// 1st row | |
#define AC_L0_LPRN ACTION_MODS_KEY(MOD_LSFT, KC_9) |
#include "unimap_trans.h" | |
#define AC_FN1 ACTION_LAYER_MOMENTARY(1) | |
#define AC_FN2 ACTION_LAYER_TAP_KEY(2, KC_LCTL) | |
#define AC_ENT2 ACTION_MODS_TAP_KEY(MOD_RCTL, KC_ENT) | |
#define AC_LSOS ACTION_MODS_ONESHOT(MOD_LSFT) | |
#define AC_RSOS ACTION_MODS_ONESHOT(MOD_RSFT) | |
#define AC_L2(KEY) ACTION_MODS_KEY(MOD_LCTL, KC_##KEY) |
@echo off | |
setlocal | |
:PROMPT | |
@echo DELETING %1 !!! | |
SET /P AREYOUSURE=Are you sure (Y/[N])? | |
IF /I "%AREYOUSURE%" NEQ "Y" GOTO END | |
takeown /f %1 /r /d y | |
icacls %1 /grant Everyone:(OI)(CI)F /T | |
icacls %1 /grant %username%:F /T |
{ | |
"title": "HHKB for Human Being", | |
"rules": [ | |
{ | |
"description": "Change left_control+ijkl to arrow keys", | |
"manipulators": [ | |
{ | |
"from": { | |
"key_code": "j", | |
"modifiers": { |
#!/bin/bash | |
pushd $1 | |
for branch in `git branch -a | grep remotes | grep -v HEAD | grep -v master `; do | |
git branch --track ${branch#remotes/origin/} $branch | |
done | |
git fetch --all | |
git pull --all | |
popd |
{ | |
"description": "Double click ESC to toggle cursor mode. ", | |
"manipulators": [ | |
{ | |
"conditions": [ | |
{ | |
"name": "cursor_mode_trigger_key_pressed", | |
"type": "variable_if", | |
"value": 1 | |
}, |
{ | |
"build_command": "$sourcepath $classpath $d \"$file\"", | |
"java_executables": | |
{ | |
"build": "nxjc", | |
"run": "nxjlink", | |
"version": "nxjc" | |
}, | |
"jdk_version": | |
{ |
Based on the instruction here https://github.com/gw0/docker-keras with a few modification on how to run on GPU:
The original snippet:
$ docker run -it --rm $(ls /dev/nvidia* | xargs -I{} echo '--device={}') $(ls /usr/lib/*-linux-gnu/{libcuda,libnvidia}* | xargs -I{} echo '-v {}:{}:ro') -v $(pwd):/srv gw000/keras:2.1.4-py2-tf-gpu /srv/run.py
The idea is to map local NVIDIA devices(/dev/nvidia*
) to container environment, and map local NVIDIA drivers and libraries to proper container's /usr/lib/ directories. With NVIDIA driver 387.34(latest when written), this is snippet is no longer sufficient.
Modified snippet:
# kaggle/python docker .bashrc for linux | |
kpython(){ | |
docker run -v $PWD:/tmp/working -w=/tmp/working --rm -it kaggle/python python "$@" | |
} | |
ikpython() { | |
docker run -v $PWD:/tmp/working -w=/tmp/working --rm -it kaggle/python ipython | |
} | |
kjupyter() { |