|
from keras.models import Sequential |
|
from keras.layers.core import Flatten, Dense, Dropout |
|
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D |
|
from keras.optimizers import SGD |
|
import cv2, numpy as np |
|
|
|
def VGG_19(weights_path=None): |
|
model = Sequential() |
|
model.add(ZeroPadding2D((1,1),input_shape=(3,224,224))) |
|
model.add(Convolution2D(64, 3, 3, activation='relu')) |
|
model.add(ZeroPadding2D((1,1))) |
|
model.add(Convolution2D(64, 3, 3, activation='relu')) |
|
model.add(MaxPooling2D((2,2), strides=(2,2))) |
|
|
|
model.add(ZeroPadding2D((1,1))) |
|
model.add(Convolution2D(128, 3, 3, activation='relu')) |
|
model.add(ZeroPadding2D((1,1))) |
|
model.add(Convolution2D(128, 3, 3, activation='relu')) |
|
model.add(MaxPooling2D((2,2), strides=(2,2))) |
|
|
|
model.add(ZeroPadding2D((1,1))) |
|
model.add(Convolution2D(256, 3, 3, activation='relu')) |
|
model.add(ZeroPadding2D((1,1))) |
|
model.add(Convolution2D(256, 3, 3, activation='relu')) |
|
model.add(ZeroPadding2D((1,1))) |
|
model.add(Convolution2D(256, 3, 3, activation='relu')) |
|
model.add(ZeroPadding2D((1,1))) |
|
model.add(Convolution2D(256, 3, 3, activation='relu')) |
|
model.add(MaxPooling2D((2,2), strides=(2,2))) |
|
|
|
model.add(ZeroPadding2D((1,1))) |
|
model.add(Convolution2D(512, 3, 3, activation='relu')) |
|
model.add(ZeroPadding2D((1,1))) |
|
model.add(Convolution2D(512, 3, 3, activation='relu')) |
|
model.add(ZeroPadding2D((1,1))) |
|
model.add(Convolution2D(512, 3, 3, activation='relu')) |
|
model.add(ZeroPadding2D((1,1))) |
|
model.add(Convolution2D(512, 3, 3, activation='relu')) |
|
model.add(MaxPooling2D((2,2), strides=(2,2))) |
|
|
|
model.add(ZeroPadding2D((1,1))) |
|
model.add(Convolution2D(512, 3, 3, activation='relu')) |
|
model.add(ZeroPadding2D((1,1))) |
|
model.add(Convolution2D(512, 3, 3, activation='relu')) |
|
model.add(ZeroPadding2D((1,1))) |
|
model.add(Convolution2D(512, 3, 3, activation='relu')) |
|
model.add(ZeroPadding2D((1,1))) |
|
model.add(Convolution2D(512, 3, 3, activation='relu')) |
|
model.add(MaxPooling2D((2,2), strides=(2,2))) |
|
|
|
model.add(Flatten()) |
|
model.add(Dense(4096, activation='relu')) |
|
model.add(Dropout(0.5)) |
|
model.add(Dense(4096, activation='relu')) |
|
model.add(Dropout(0.5)) |
|
model.add(Dense(1000, activation='softmax')) |
|
|
|
if weights_path: |
|
model.load_weights(weights_path) |
|
|
|
return model |
|
|
|
if __name__ == "__main__": |
|
im = cv2.resize(cv2.imread('cat.jpg'), (224, 224)).astype(np.float32) |
|
im[:,:,0] -= 103.939 |
|
im[:,:,1] -= 116.779 |
|
im[:,:,2] -= 123.68 |
|
im = im.transpose((2,0,1)) |
|
im = np.expand_dims(im, axis=0) |
|
|
|
# Test pretrained model |
|
model = VGG_19('vgg19_weights.h5') |
|
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True) |
|
model.compile(optimizer=sgd, loss='categorical_crossentropy') |
|
out = model.predict(im) |
|
print np.argmax(out) |
@morningsky of course yes, otherwise the shape of weights won't match.
As far as the number of classes is concerned, you could change the shape of the last layer without loading pre-trained weights, and then fine-tune on your dataset.
If you change the input size, instead, the shape of the first fully connected layer would be different, so you would have to fine-tune all FC layers.