Skip to content

Instantly share code, notes, and snippets.

View StrikingLoo's full-sized avatar
😄

Luciano StrikingLoo

😄
View GitHub Profile
import numpy as np
np_im = pixels_from_path(file_path) #load the image as numpy matrix
pixels = vector_of_pixels(np_im) #turn it into a single vector
np.unique(pixels).shape # count unique pixel values
from dask_ml.cluster import KMeans
from PIL import Image
import numpy as np
#given the fitted k-means model and a vector of pixels, returns the new value for each pixel.
def clustered_pixels(x_fit, pixels):
labels = x_fit.predict(pixels).compute()
res= x_fit.cluster_centers_[labels]
return res
from PIL import Image
import numpy as np
def pixels_from_path(file_path):
im = Image.open(file_path)
np_im = np.array(im)
#matrix of pixel RGB values
return np_im
def vector_of_pixels(np_im):
print('# Fit model on training data')
history = model.fit(x_train,
x_train, #we pass it the same input data as desired output
#If the model is taking forever to train, make this bigger
#If it is taking forever to load for the first epoch, make this smaller
batch_size=256,
epochs=10,
customAdam = keras.optimizers.Adam(lr=0.001) #you have no idea how many times I changed this number
model.compile(optimizer=customAdam, # Optimizer
# Loss function to minimize
loss="mean_squared_error",
# List of metrics to monitor
metrics=["mean_squared_error"])
from tensorflow import keras
from tensorflow.keras import layers
total_pixels = img_size * img_size * 3
translator_factor = 2
translator_layer_size = int(total_pixels/translator_factor)
middle_factor = 2
middle_layer_size = int(translator_layer_size/middle_factor)
inputs = keras.Input(shape=(img_size,img_size,3), name='cat_image')
from tensorflow import keras
from tensorflow.keras import layers
total_pixels = img_size * img_size * 3
translator_factor = 2
translator_layer_size = int(total_pixels/translator_factor)
middle_factor = 2
middle_layer_size = int(translator_layer_size/middle_factor)
inputs = keras.Input(shape=(img_size,img_size,3), name='cat_image')
# 10% of the data will automatically be used for validation
validation_size = 0.1
img_size = 48 # resize images to be 48x48
num_channels = 3 # RGB
sample_size = 8192 #We'll use 8192 pictures (2**13)
data = dataset.read_train_sets(train_path, img_size, ['cats'],
validation_size=validation_size,
sample_size=sample_size)
param = {'max_depth':5,
'eta':.1,
'colsample_bytree':.75
}
num_round = 40
new_tree = xgb.train(param, dtrain, num_round)
# make prediction
new_preds = new_tree.predict(dtest)
import math
def msesqrt(std, test_label, preds):
squared_errors = [diff*diff for diff in (test_label - preds)]
return math.sqrt(sum(squared_errors)/len(preds))
msesqrt(std, test_label, first_preds) # 4.02