I hereby claim:
- I am coreyauger on github.
- I am coreyauger (https://keybase.io/coreyauger) on keybase.
- I have a public key ASDY8Z2DcU9q-n5nykzxsJyAq0fQ61GvnnTFvQKF8j7j6wo
To claim this, I am signing this object:
I hereby claim:
To claim this, I am signing this object:
| #include <SDL2/SDL.h> | |
| #include <iostream> | |
| using namespace std; | |
| const int SCREEN_WIDTH = 800; | |
| const int SCREEN_HEIGHT = 600; | |
| int main( int argc, char* args[] ){ | |
| SDL_Window* window = NULL; | |
| SDL_Surface* screenSurface = NULL; |
| android { | |
| compileSdkVersion 26 | |
| buildToolsVersion "26.0.1" | |
| defaultConfig { | |
| if (buildAsApplication) { | |
| applicationId "io.surfkit.simple" | |
| } | |
| minSdkVersion 24 | |
| targetSdkVersion 26 | |
| versionCode 1 |
| android { | |
| compileSdkVersion 26 | |
| buildToolsVersion "26.0.1" | |
| defaultConfig { | |
| if (buildAsApplication) { | |
| applicationId "io.surfkit" | |
| } | |
| minSdkVersion 24 | |
| targetSdkVersion 26 |
| #include <exception> | |
| #include <functional> | |
| #define GL_GLEXT_PROTOTYPES 1 | |
| #include <SDL2/SDL.h> | |
| #include <SDL2/SDL_opengles2.h> | |
| // Shader sources | |
| const GLchar* vertexSource = |
| import numpy as np | |
| import os | |
| import dtdata as dt | |
| from sklearn.model_selection import train_test_split | |
| from sklearn.decomposition import PCA | |
| from sklearn.preprocessing import StandardScaler | |
| from keras.models import Sequential | |
| from keras.layers import Dense, Activation, Dropout, Input | |
| from keras.regularizers import l1 | |
| from keras.optimizers import RMSprop, Adam |
| ################################################################################################# | |
| ## TRAIN FUTURE ENCODER | |
| ################################################################################################# | |
| x_train_future = data[hold_out:,20:2420] | |
| print("training on: " + str(x_train_future.shape)) | |
| input_future = Input(shape=(x_train_future.shape[1],)) | |
| encoded_future = Dense(encoding_dim, activation='relu')(input_future) | |
| decoded_future = Dense(x_train_future.shape[1], activation='linear')(encoded_future) | |
| autoencoder_future = Model(input_future, decoded_future) |
| ################################################################################################# | |
| ## TRAIN PAST ENCODER | |
| ################################################################################################# | |
| x_train_past = data[hold_out:,0:2400] | |
| print("training past on: " + str(x_train_past.shape)) | |
| input_past = Input(shape=(x_train_past.shape[1],)) | |
| encoded_past = Dense(encoding_dim, activation='relu')(input_past) | |
| decoded_past = Dense(x_train_past.shape[1], activation='linear')(encoded_past) | |
| autoencoder_past = Model(input_past, decoded_past) |
| import numpy as np | |
| import os | |
| import dtdata as dt | |
| from sklearn.model_selection import train_test_split | |
| from sklearn.decomposition import PCA | |
| from sklearn.preprocessing import StandardScaler | |
| from keras.models import Sequential | |
| from keras.layers import Dense, Activation, Dropout, Input | |
| from keras.regularizers import l1 | |
| from keras.optimizers import RMSprop, Adam |
| import numpy as np | |
| import os | |
| import dtdata as dt | |
| import matplotlib.pyplot as plt | |
| import math | |
| import random | |
| import pprint as pp | |
| from sklearn.model_selection import train_test_split | |
| from sklearn.preprocessing import MinMaxScaler, StandardScaler |