I hereby claim:
- I am iamaaditya on github.
- I am iamaaditya (https://keybase.io/iamaaditya) on keybase.
- I have a public key whose fingerprint is 00B9 886D 6185 B18A 290A 02B8 2C21 2B82 7EC6 0287
To claim this, I am signing this object:
import ossaudiodev as oss | |
from numpy import fft | |
import math | |
import pygame | |
import pygame.surfarray as surfarray | |
d = oss.open('rw') | |
d.setfmt(oss.AFMT_U16_LE) | |
d.channels(1) |
# from: http://www.4dsolutions.net/cgi-bin/py2html.cgi?script=/ocn/python/primes.py | |
""" | |
primes.py -- Oregon Curriculum Network (OCN) | |
Feb 1, 2001 changed global var primes to _primes, added relative primes test | |
Dec 17, 2000 appended probable prime generating methods, plus invmod | |
Dec 16, 2000 revised to use pow(), removed methods not in text, added sieve() | |
Dec 12, 2000 small improvements to erastosthenes() | |
Dec 10, 2000 added Euler test | |
Oct 3, 2000 modified fermat test |
" cVim configuration -- Aaditya Prakash | |
" gist id -- ed0f4da609dde6b71b43 | |
" Settings | |
"set noautofocus | |
set cncpcompletion | |
set smoothscroll | |
set nohud |
import cv2 | |
import numpy as np | |
import matplotlib.pyplot as plt | |
# Load the data, converters convert the letter to a number | |
data= np.loadtxt('letter-recognition.data', dtype= 'float32', delimiter = ',', | |
converters= {0: lambda ch: ord(ch)-ord('A')}) | |
# split the data to two, 10000 each for train and test | |
train, test = np.vsplit(data,2) |
import tensorflow as tf | |
""" | |
Multi dimensional softmax, | |
refer to https://github.com/tensorflow/tensorflow/issues/210 | |
compute softmax along the dimension of target | |
the native softmax only supports batch_size x dimension | |
""" | |
def softmax(target, axis, name=None): |
""" This program takes a file, and splits it into given percentage by line number, but uses | |
randomization to select the files | |
USAGE: python randomize_split.py <file_name> <split_percentage_for_test_eg_10> | |
@author: aaditya prakash""" | |
from __future__ import division | |
import sys | |
import random |
# author: Aaditya Prakash | |
# NVIDIA-SMI does not show the full command, and when it was launched and its RAM usage. | |
# PS does but it does but you need PIDs for that | |
# lsof /dev/nvidia gives PIDs but only for the user invoking it | |
# usage: | |
# python programs_on_gpu.py | |
# Sample Output |
I hereby claim:
To claim this, I am signing this object:
# Code snippet to print various ML related metrics given the y_labels and probabilities of each label (output of softmax) | |
# Aaditya Prakash | |
from sklearn.metrics import f1_score, roc_auc_score, precision_score, recall_score, accuracy_score, average_precision_score, precision_recall_curve, hamming_loss | |
def print(y_labels, probs): | |
threshold = 0.5 | |
macro_auc = roc_auc_score(y_labels, probs, average = 'macro') | |
micro_auc = roc_auc_score(y_labels, probs, average = 'micro') |
# XLA compilation controlled by "compile_ops" option | |
# compile_ops=False: 4.39 sec | |
# compile_ops=True: 0.90 sec | |
import os | |
os.environ['CUDA_VISIBLE_DEVICES']='' | |
import tensorflow as tf | |