Created
June 7, 2022 10:34
-
-
Save deepanshu-yadav/633f12c4326b095f1b2bc27b97aabd46 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import glob | |
from sklearn.preprocessing import MinMaxScaler | |
training_files = glob.glob(os.path.join(train_dir, '*')) | |
validation_files = glob.glob(os.path.join(validation_dir, '*')) | |
# Declare constants like BATCH_SIZE and NO_OF_EPOCHS | |
BATCH_SIZE = 32 | |
NO_OF_EPOCHS = 3 | |
# lets construct one big numpy array from all the train files | |
X_train = np.empty((1, NO_OF_FEATURES)) | |
for train_file in training_files: | |
file_np = np.load(train_file) | |
X_train = np.vstack((X_train, file_np)) | |
# This is done leave the first row which is empty and of no use. | |
X_train = X_train[1:, :] | |
# lets construct one big numpy array from all validation files | |
X_validation = np.empty((1, NO_OF_FEATURES)) | |
for validation_file in validation_files: | |
file_np = np.load(validation_file) | |
X_validation = np.vstack((X_validation, file_np)) | |
# This is done leave the first row which is empty and of no use. | |
X_validation = X_validation[1:, :] | |
# Time for fitting MinMax scaler to our data. | |
# We are training autoencoder so we only need inputs (X) not labels (y) | |
min_max_scaler = MinMaxScaler() | |
min_max_scaler.fit(X_train) | |
# We will fit the scaler on training data. | |
# We will scale the training data and validation data after fitting. | |
X_train_scaled = min_max_scaler.transform(X_train) | |
X_validation_scaled = min_max_scaler.transform(X_validation) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment