Skip to content

Instantly share code, notes, and snippets.

View emuccino's full-sized avatar

Eric Muccino emuccino

  • Mindboard
View GitHub Profile
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input,Dense,BatchNormalization,Concatenate,GaussianNoise
from tensorflow.keras.optimizers import Nadam
#define latent dimension size
latent_dim = int(np.ceil(np.log(len(train_df)*len(data))))
#function for building generator network
def compile_generator():
@emuccino
emuccino / loan_data_preprocess.py
Last active April 13, 2020 09:35
Preprocess Loan data for training
from sklearn.preprocessing import MinMaxScaler
#specify target label
target = 'loan_condition'
data = [name for name in df.columns if name != target]
numeric_data = []
string_data = []
tokenizers = {}
@emuccino
emuccino / loan_data_clean.py
Last active April 13, 2020 08:01
Load and clean Loan data
import numpy as np
import pandas as pd
#import loan dataset
df = pd.read_csv('loan.csv').dropna(axis=1,how='any')
#convert loan grades to numerical values
df['sub_grade'] = df['sub_grade'].str.slice(start=1).astype(int)
grade_dict = {k:i for i,k in enumerate(['A', 'B', 'C', 'D', 'E', 'F', 'G'])}
term_dict = {k:i for i,k in enumerate(['36 months', '60 months'])}
#initialize our generators; specifying data directories, batch size, and dimension threshold
train_image_directory = 'imagenette2/train'
test_image_directory = 'imagenette2/val'
n_classes = 10
batch_size = 16
max_dimension = 512
#create generators for training and generating
#data generator class; yields batches of data for training/testing
class ImageGenerator():
def __init__(self, directory, batch_size=16, shuffle=False, max_dimension=None):
self.directories = directory
self.batch_size = batch_size
self.shuffle = shuffle
self.max_dimension = max_dimension
#set up MobileNet GlobalMaxPooling and unsepcified input resolution
inputs = Input(shape=(None,None,3))
net = MobileNetV2(include_top=False, alpha=0.35, weights='imagenet', input_tensor=inputs, classes=n_classes)
net = GlobalMaxPooling2D()(net.output)
outputs = Dense(n_classes,activation='softmax')(net)
model = Model(inputs=inputs,outputs=outputs)
model.compile(optimizer='Adam', loss='sparse_categorical_crossentropy',metrics=['sparse_categorical_accuracy'])
#imports
import tensorflow as tf
from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2,preprocess_input
from tensorflow.keras.layers import Input,GlobalMaxPooling2D,Dense
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import img_to_array,load_img
import numpy as np
@emuccino
emuccino / DeepDream.py
Created September 10, 2019 18:38
DeepDream
from keras.models import Model
from keras.layers import Dense, Input, Reshape
from keras.regularizers import Regularizer
from keras.utils.generic_utils import get_custom_objects
from keras import backend as K
from keras.applications.inception_v3 import InceptionV3
from keras.initializers import RandomUniform
from keras.preprocessing.image import array_to_img, save_img
import numpy as np
@emuccino
emuccino / generate_adversaries.py
Last active May 24, 2022 13:22
generate_adversaries
#custom activation function for keeping adversarial pixel values between 0.0 and 1.0
def clip(x):
return K.clip(x, 0.0, 1.0)
#custom loss funciton for non-targeted misclassification
def negative_categorical_crossentropy(yTrue,yPred):
return 0.0 - K.categorical_crossentropy(yTrue,yPred)
#add custom objects to dictionary
get_custom_objects().update({'clip': Activation(clip)})
@emuccino
emuccino / select_image.py
Created August 14, 2019 16:03
select_image
#select image to create an adversarial example from
img = x_train[0:1]
plt.imshow(img.reshape((28,28)),vmin=0., vmax=1.)
plt.show()
#varify accurate classificaiton
prediction = mnist_model.predict(img)[0]
print(prediction)
#applying random noise does not fool the classifier
quantized_noise = np.round(np.random.normal(loc=0.0, scale=0.3, size=img.shape) * 255.) / 255.