Enable Interoperability and generate a Access Key and Secret Key.
pip3 install boto3
fasttext skipgram \ | |
-thread $(grep ^cpu\\scores /proc/cpuinfo | uniq | awk '{print $4}' )\ | |
-minCount 2 \ | |
-wordNgrams 1 \ | |
-minn 2 \ | |
-maxn 7 \ | |
-epoch 10 \ | |
-dim 64 \ | |
-input train.txt \ | |
-output $(date +'model-%Y-%m-%dT%H-%M-%S') |
# https://hub.docker.com/r/d34dc3n73r/netdata-glibc/ | |
docker run -d --name=netdata \ | |
-p 19999:19999 \ | |
--restart=unless-stopped \ | |
-v /proc:/host/proc:ro \ | |
-v /sys:/host/sys:ro \ | |
-v /var/run/docker.sock:/var/run/docker.sock:ro \ | |
--gpus all \ | |
-e NVIDIA_VISIBLE_DEVICES=all \ | |
--cap-add SYS_PTRACE \ |
import tensorflow as tf | |
import numpy as np | |
import random as rn | |
from tensorflow.keras import backend as K | |
np.random.seed(0) | |
rn.seed(0) | |
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) | |
tf.set_random_seed(0) |
import numpy as np | |
from sklearn.metrics.pairwise import cosine_similarity | |
def __batch_cosine_similarity_internal__(x_pred, x_true, batch_size=1024): | |
x1_len = x_pred.shape[0] | |
idx = np.array([]) | |
val = np.array([]) | |
for i in range(0, len(x_true), batch_size): |
#!/bin/bash | |
ffmpeg -i $1 -c:v libx264 -profile:v baseline -level 3.0 -pix_fmt yuv420p wp-$1 |
references by https://forum.odroid.com/viewtopic.php?t=34769 power by odroid
Download and install
apt update && apt upgrade && apt dist-upgrade
apt install git bc dkms
git clone --depth 1 https://github.com/whitebatman2/rtl8821CU.git
cd rtl8821CU/
#!/bin/bash | |
# find last version in https://cmake.org/download/ | |
wget https://github.com/Kitware/CMake/releases/download/v3.15.2/cmake-3.15.2-Linux-x86_64.sh && \ | |
chmod +x cmake-3.15.2-Linux-x86_64.sh \ | |
sudo sh cmake-3.15.2-Linux-x86_64.sh --prefix=/usr/local --exclude-subdir |
import xlrd | |
import csv | |
import glob, os | |
for file in glob.glob("*.xlsx"): | |
print("{} start".format(file)) | |
wb = xlrd.open_workbook(file) | |
sh = wb.sheet_by_name('Plan1') | |
with open("{}.csv".format(file.split(".")[0]), 'w+') as f: |
import numpy as np | |
from sklearn.metrics import roc_curve | |
def calculate_thresholds(n_classes, y_true, y_pred): | |
""" | |
n_classes => 123 | |
y_true => [[1, 0], ...] # one hot encode lavels | |
y_pred => np.array([[0.9, 0.3]]) # with probabilities | |
""" | |
fpr = dict() |