Last active
May 24, 2023 02:03
-
-
Save datlife/abfe263803691a8864b7a2d4f87c4ab8 to your computer and use it in GitHub Desktop.
Training Keras model with tf.data
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
"""An example of how to use tf.Dataset in Keras Model""" | |
import tensorflow as tf # only work from tensorflow==1.9.0-rc1 and after | |
_EPOCHS = 5 | |
_NUM_CLASSES = 10 | |
_BATCH_SIZE = 128 | |
def training_pipeline(): | |
# ############# | |
# Load Dataset | |
# ############# | |
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() | |
training_set = tfdata_generator(x_train, y_train, is_training=True, batch_size=_BATCH_SIZE) | |
testing_set = tfdata_generator(x_test, y_test, is_training=False, batch_size=_BATCH_SIZE) | |
# ############# | |
# Train Model | |
# ############# | |
model = keras_model() # your keras model here | |
model.compile('adam', 'categorical_crossentropy', metrics=['acc']) | |
model.fit( | |
training_set.make_one_shot_iterator(), | |
steps_per_epoch=len(x_train) // _BATCH_SIZE, | |
epochs=_EPOCHS, | |
validation_data=testing_set.make_one_shot_iterator(), | |
validation_steps=len(x_test) // _BATCH_SIZE, | |
verbose = 1) | |
def tfdata_generator(images, labels, is_training, batch_size=128): | |
'''Construct a data generator using tf.Dataset''' | |
def preprocess_fn(image, label): | |
'''A transformation function to preprocess raw data | |
into trainable input. ''' | |
x = tf.reshape(tf.cast(image, tf.float32), (28, 28, 1)) | |
y = tf.one_hot(tf.cast(label, tf.uint8), _NUM_CLASSES) | |
return x, y | |
dataset = tf.data.Dataset.from_tensor_slices((images, labels)) | |
if is_training: | |
dataset = dataset.shuffle(1000) # depends on sample size | |
# Transform and batch data at the same time | |
dataset = dataset.apply(tf.contrib.data.map_and_batch( | |
preprocess_fn, batch_size, | |
num_parallel_batches=4, # cpu cores | |
drop_remainder=True if is_training else False)) | |
dataset = dataset.repeat() | |
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE) | |
return dataset | |
def keras_model(): | |
from tensorflow.keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, Input | |
inputs = Input(shape=(28, 28, 1)) | |
x = Conv2D(32, (3, 3),activation='relu', padding='valid')(inputs) | |
x = MaxPool2D(pool_size=(2, 2))(x) | |
x = Conv2D(64, (3, 3), activation='relu')(x) | |
x = MaxPool2D(pool_size=(2, 2))(x) | |
x = Flatten()(x) | |
x = Dense(512, activation='relu')(x) | |
x = Dropout(0.5)(x) | |
outputs = Dense(_NUM_CLASSES, activation='softmax')(x) | |
return tf.keras.Model(inputs, outputs) | |
if __name__ == '__main__': | |
training_pipeline() |
I noticed the delayed training issue and noticed a big speed improvement in tensorflow-gpu-1.13
on my machine when I run tf.enable_eager_execution()
before running the code.
I found tf1.14 still has this issue.
Is there a solution, anyone find it?
replace tf.contrib.data
-> tf.data.experimental
remove .make_one_shot_iterator
and this runs
tf.__version__
'2.2.0'
Python 3.8.3
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Hello, sorry if this is not directly related to this code sample. This is the most related post that I could find.
I would appreciate any help / reference.
I am trying to make tf.data.dataset work with tf.keras using the tf.estimator.
I get an error dimension when I am pushing the train_fn batch to the keras model (after converted it to estimator) . The code looks like that:
`def train_input_fn(batch_size=1):
"""An input function for training"""
print("train_input_fn: start function")
train_dataset = tf.data.experimental.make_csv_dataset(CSV_PATH_TRAIN, batch_size=batch_size,label_name='label',
select_columns=["sample","label"])
print('train_input_fn: finished make_csv_dataset')
train_dataset = train_dataset.map(parse_features_vector)
print("train_input_fn: finished the map with pars_features_vector")
train_dataset = train_dataset.repeat().batch(batch_size)
print("train_input_fn: finished batch size. train_dataset is %s ", train_dataset)
return train_dataset
IMG_SHAPE = (160,160,3)
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top = False,
weights = 'imagenet')
base_model.trainable = False
model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=0.0001),
loss='binary_crossentropy',
metrics=['accuracy'])
estimator = tf.keras.estimator.model_to_estimator(keras_model = model, model_dir = './date')
#train_input_fn read a CSV of images, resize them and returns dataset batch
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=20)
#eval_input_fn read a CSV of images, resize them and returns dataset batch of one sample
eval_spec = tf.estimator.EvalSpec(eval_input_fn)
tf.estimator.train_and_evaluate(estimator, train_spec=train_spec, eval_spec=eval_spec)`
the log is:
train_input_fn: finished batch size. train_dataset is %s <BatchDataset shapes: ({mobilenetv2_1.00_160_input: (None, 1, 160, 160, 3)}, (None, 1)), types: ({mobilenetv2_1.00_160_input: tf.float32}, tf.int32)>
and the error is:
ValueError: Input 0 of layer Conv1_pad is incompatible with the layer: expected ndim=4, found ndim=5. Full shape received: [None, 1, 160, 160, 3]
Many thanks for any help,
eilalan