Skip to content

Instantly share code, notes, and snippets.

@KentaKudo
KentaKudo / a.go
Last active May 4, 2018 17:20
Try out go plugin package
package main
import (
"fmt"
)
// F is to be loaded and called from main().
func F() {
fmt.Println(`This is a function in "a.go"`)
}
# -*- coding: utf-8 -*-
import numpy as np
from keras.preprocessing.text import Tokenizer
from sklearn.model_selection import train_test_split
from keras.preprocessing.sequence import pad_sequences
def load_dataset(file_path):
tokenizer = Tokenizer(filters="")
texts = []
# -*- coding: utf-8 -*-
import re
f = open('examples.utf', 'r')
f_j = open('tanaka_corpus_j.txt', 'w')
f_e = open('tanaka_corpus_e.txt', 'w')
cnt = 0
def simple_net(shape):
from keras.models import Model
from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Input, BatchNormalization, Dropout
from keras.regularizers import l2
# The layers of Convolution → Convolution → Pooling
def ccp(filters, kernel_size=(3, 3), weight_decay=1e-4, dropout=0.2):
def _ccp(x):
x = Conv2D(filters, kernel_size, padding='same', kernel_regularizer=l2(weight_decay), activation='relu')(x)
x = BatchNormalization()(x)
x = Conv2D(filters, kernel_size, padding='same', kernel_regularizer=l2(weight_decay), activation='relu')(x)
def res_net(shape):
from keras.models import Model
from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Input, BatchNormalization, Add, Activation, GlobalAveragePooling2D
from keras.regularizers import l2
def resblock(filters, kernel_size=(3, 3), increase=False):
strides = (2, 2) if increase else (1, 1)
def _res_block(x):
x_ = Conv2D(filters, kernel_size,
strides=strides,
@KentaKudo
KentaKudo / vgg.py
Last active February 6, 2018 14:22
def vgg(shape):
from keras.models import Model
from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Input, BatchNormalization, Dropout
from keras.regularizers import l2
weight_decay = 1e-4
inputs = Input(shape=shape)
x = Conv2D(64, (3, 3), padding='same', kernel_regularizer=l2(weight_decay), activation='relu')(inputs)
x = BatchNormalization()(x)
def alex_net(shape):
from keras.models import Model
from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Input, BatchNormalization, Dropout
from keras.regularizers import l2
weight_decay = 1e-4
inputs = Input(shape=shape)
x = Conv2D(96, (5, 5), padding='same', kernel_regularizer=l2(weight_decay), activation='relu')(inputs)
x = BatchNormalization()(x)
def le_net(shape):
from keras.models import Model
from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Input
inputs = Input(shape=shape)
x = Conv2D(6, (5, 5), padding='same', kernel_initializer='he_normal', activation='relu')(inputs)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Conv2D(16, (5, 5), padding='same', kernel_initializer='he_normal', activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
import numpy as np
from keras.datasets import cifar10
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
(train_X, train_y), (test_X, test_y) = cifar10.load_data()
train_X = train_X.astype('float32') / 255
test_X = test_X.astype('float32') / 255
from keras.datasets import reuters
from keras.models import Sequential
from keras.layers import Dense
from keras.preprocessing.text import Tokenizer
(x_train, y_train), (x_test, y_test) = reuters.load_data(num_words=1000,
test_split=0.2)
tokenizer = Tokenizer(num_words=1000)
x_train = tokenizer.sequences_to_matrix(x_train, mode='binary')