I hereby claim:
- I am iamaaditya on github.
- I am iamaaditya (https://keybase.io/iamaaditya) on keybase.
- I have a public key whose fingerprint is 00B9 886D 6185 B18A 290A 02B8 2C21 2B82 7EC6 0287
To claim this, I am signing this object:
| # memory footprint support libraries/code | |
| !ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi | |
| !pip install gputil | |
| !pip install psutil | |
| !pip install humanize | |
| import psutil | |
| import humanize | |
| import os | |
| import GPUtil as GPU | |
| GPUs = GPU.getGPUs() |
| FILENAME = paper | |
| PS_FILE = $(FILENAME).ps | |
| PDF_FILE = $(FILENAME).pdf | |
| # PAPERSIZE = a4 | |
| PAPERSIZE = letter | |
| LATEX_FILES = *.dvi *.log *.toc *.tof *.aux *.blg *.lof *.lot *.bbl | |
| CLEAN_FILES = $(LATEX_FILES) *.bak core $(PS_FILE) $(PDF_FILE) | |
| COMPRESS_FILES = *.tex *.bib *.sty *.eps *.ps *.fig *.m *.txt *.pgm *.bst *.cls |
| %matplotlib inline | |
| import matplotlib | |
| import numpy as np | |
| import pandas as pd | |
| from glob import glob | |
| import matplotlib.pyplot as plt | |
| plt.style.use('ggplot') | |
| import seaborn as sns; sns.set() |
| """ This program takes a file, and splits it into given percentage by line number, but uses | |
| randomization to select the files | |
| USAGE: python randomize_split.py <file_name> <split_percentage> | |
| @author: aaditya prakash""" | |
| from __future__ import division | |
| import sys | |
| import random |
| def get_variable_with_decay(name, shape, initializer, lambda): | |
| if initializer is None: | |
| initializer = tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32) | |
| var = tf.get_variable(name, shape, initializer=initializer, dtype=tf.float32) | |
| if lambda is not None: | |
| weight_decay = tf.multiply(tf.nn.l2_loss(var), lambda, name=name+'_weight_decay') | |
| tf.add_to_collection('losses', weight_decay) | |
| return var |
| import os,sys | |
| import numpy as np | |
| import tensorflow as tf | |
| os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' | |
| from util.data import get_data_NCHW_normalized as get_data | |
| from model import n_way | |
| from random import shuffle | |
| from time import time | |
| def chunker(seq, size): |
| import numpy as np | |
| from keras.layers import Dropout | |
| from keras import applications | |
| from keras.layers import Dense, GlobalAveragePooling2D, merge, Input | |
| from keras.models import Model | |
| max_words = 10000 | |
| epochs = 50 | |
| batch_size = 32 |
| # XLA compilation controlled by "compile_ops" option | |
| # compile_ops=False: 4.39 sec | |
| # compile_ops=True: 0.90 sec | |
| import os | |
| os.environ['CUDA_VISIBLE_DEVICES']='' | |
| import tensorflow as tf | |
| # Code snippet to print various ML related metrics given the y_labels and probabilities of each label (output of softmax) | |
| # Aaditya Prakash | |
| from sklearn.metrics import f1_score, roc_auc_score, precision_score, recall_score, accuracy_score, average_precision_score, precision_recall_curve, hamming_loss | |
| def print(y_labels, probs): | |
| threshold = 0.5 | |
| macro_auc = roc_auc_score(y_labels, probs, average = 'macro') | |
| micro_auc = roc_auc_score(y_labels, probs, average = 'micro') |
I hereby claim:
To claim this, I am signing this object: