-
-
Save HTLife/ca0a7d48bd9a3192cf8d3c8b1347e8dd to your computer and use it in GitHub Desktop.
def defModel(): | |
model = Sequential() | |
#Izda.add(TimeDistributed( | |
# Convolution2D(40,3,3,border_mode='same'), input_shape=(sequence_lengths, 1,8,10))) | |
model.add( | |
TimeDistributed( | |
Conv2D(32, (7, 7), padding='same', strides = 2), | |
input_shape=(None, 540, 960, 2))) | |
model.add(Activation('relu')) | |
model.add(TimeDistributed(Conv2D(64, (5, 5), padding='same', strides = 2))) | |
model.add(Activation('relu')) | |
#model.add(TimeDistributed(MaxPooling2D((2,2), data_format = 'channels_first', name='pool1'))) | |
model.add(TimeDistributed(Conv2D(128, (5, 5), padding='same', strides = 2))) | |
model.add(Activation('relu')) | |
model.add(TimeDistributed(Conv2D(128, (3, 3), padding='same'))) | |
model.add(Activation('relu')) | |
model.add(TimeDistributed(Conv2D(256, (3, 3), padding='same', strides = 2))) | |
model.add(Activation('relu')) | |
model.add(TimeDistributed(Conv2D(256, (3, 3), padding='same'))) | |
model.add(Activation('relu')) | |
model.add(TimeDistributed(Conv2D(256, (3, 3), padding='same', strides = 2))) | |
model.add(Activation('relu')) | |
model.add(TimeDistributed(Conv2D(256, (3, 3), padding='same'))) | |
model.add(Activation('relu')) | |
model.add(TimeDistributed(Conv2D(512, (3, 3), padding='same', strides = 2))) | |
model.add(Activation('relu')) | |
#model.add(TimeDistributed(MaxPooling2D((2,2), data_format = 'channels_first', name='pool1'))) | |
#model.add(TimeDistributed(Conv2D(32, (1, 1), data_format = 'channels_first'))) | |
#model.add(Activation('relu')) | |
model.add(TimeDistributed(Flatten())) | |
#model.add(TimeDistributed(Dense(512, name="first_dense" ))) | |
#model.add(LSTM(num_classes, return_sequences=True)) | |
model.add(LSTM(512 , return_sequences=True)) | |
model.add(LSTM(512)) | |
model.add(Dense(128)) | |
model.add(Dense(3)) | |
model.compile(loss='mean_squared_error', optimizer='adam') #, | |
#metrics=['accuracy']) | |
plot_model(model, to_file='model/model.png') | |
plot_model(model, to_file='model/model_detail.png', show_shapes=True) | |
return model |
hello,
i am trying to implement CNN+LSTM, the code for the model is almost same using timedistributed layers. The model is compilng fine. I have used keras image data generators for image inputs. It gives an error on fit_generator
Error is:
ValueError: Error when checking input: expected time_distributed_1_input to have 5 dimensions, but got array with shape (4, 64, 64, 3)
I have input images of size 64x64x3, want output of 4 sample images at once. how the 5th dimension included
Any help would be appreciated
Thank you!
hello,
i am trying to implement CNN+LSTM, the code for the model is almost same using timedistributed layers. The model is compilng fine. I have used keras image data generators for image inputs. It gives an error on fit_generator
Error is:ValueError: Error when checking input: expected time_distributed_1_input to have 5 dimensions, but got array with shape (4, 64, 64, 3)
I have input images of size 64x64x3, want output of 4 sample images at once. how the 5th dimension included
Any help would be appreciated
Thank you!
Your input shape did not match with define in the code input_shape=(None, 540, 960, 2)
.
I believe the input shape would be ( batch_size, time_step, rows, cols, channels. ), in which case the model will move ahead with the training process.
Can I ask how data is organized, because I'm getting an error of expecting 5 dimensions while it is getting only 4 dims