Skip to content

Instantly share code, notes, and snippets.

@TakuTsuzuki
Last active November 17, 2015 08:51
Show Gist options
  • Save TakuTsuzuki/b3d537a61dab3b25511a to your computer and use it in GitHub Desktop.
Save TakuTsuzuki/b3d537a61dab3b25511a to your computer and use it in GitHub Desktop.
Neuro_Astrocyte_classifier_in_chainer
import math
import chainer
import chainer.functions as F
import numpy as np
class CNN2(chainer.FunctionSet):
"""2rd layer + softmax CNN model for neuro detection."""
def __init__(self):
w = math.sqrt(2)
super(CNN2, self).__init__(
conv1=F.Convolution2D(1, 24, 11, wscale=w, stride=5, dtype=np.float32),
conv2=F.Convolution2D(24, 48, 5, wscale=w, stride=1, dtype=np.float32),
ln4=F.Linear(2352, 2),
)
def forward(self, x_data, y_data, train=True):
x = chainer.Variable(x_data)
t = chainer.Variable(y_data)
h = F.relu(self.conv1(x))
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = F.relu(self.conv2(h))
h = F.max_pooling_2d(h, ksize=3, stride=2)
y = self.ln4(h)
return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
def predict(self, x_data):
x = chainer.Variable(x_data)
h = F.relu(self.conv1(x))
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = F.relu(self.conv2(h))
h = F.max_pooling_2d(h, ksize=3, stride=2)
y = self.ln4(h)
return F.softmax(y)
class CNN2_F(chainer.FunctionSet):
"""2rd layer+ Fullconnect+ softmax CNN model for neuro detection."""
def __init__(self):
w = math.sqrt(2)
super(CNN2_F, self).__init__(
conv1=F.Convolution2D(1, 24, 11, wscale=w, stride=5, dtype=np.float32),
conv2=F.Convolution2D(24, 48, 5, wscale=w, stride=1, dtype=np.float32),
ln3=F.Linear(2352,1000),
ln4=F.Linear(1000, 2),
)
def forward(self, x_data, y_data, train=True):
x = chainer.Variable(x_data)
t = chainer.Variable(y_data)
h = F.relu(self.conv1(x))
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = F.relu(self.conv2(h))
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = F.dropout(F.relu(self.ln3(h)),train=train)
y = self.ln4(h)
return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
def predict(self, x_data):
x = chainer.Variable(x_data)
h = F.relu(self.conv1(x))
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = F.relu(self.conv2(h))
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = F.relu(self.ln3(h))
y = self.ln4(h)
return F.softmax(y)
class CNN3(chainer.FunctionSet):
"""3rd layer + softmax CNN model for neuro detection."""
def __init__(self):
w = math.sqrt(2)
super(CNN3, self).__init__(
conv1=F.Convolution2D(1, 24, 11, wscale=w, stride=5, dtype=np.float32),
conv2=F.Convolution2D(24, 48, 5, wscale=w, stride=1, dtype=np.float32),
conv3=F.Convolution2D(48, 96, 5, wscale=w, stride=1, dtype=np.float32),
ln4=F.Linear(96, 3),
)
def forward(self, x_data, y_data, train=True):
x = chainer.Variable(x_data)
t = chainer.Variable(y_data)
h = F.relu(self.conv1(x))
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = F.relu(self.conv2(h))
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = F.relu(self.conv3(h))
h = F.max_pooling_2d(h, ksize=3, stride=2)
y = self.ln4(h)
return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
def predict(self, x_data):
x = chainer.Variable(x_data)
h = F.relu(self.conv1(x))
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = F.relu(self.conv2(h))
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = F.relu(self.conv3(h))
h = F.max_pooling_2d(h, ksize=3, stride=2)
y = self.ln4(h)
return F.softmax(y)
class CNN4(chainer.FunctionSet):
"""4rd layer + softmax CNN model for neuro detection."""
def __init__(self):
w = math.sqrt(2)
super(CNN4, self).__init__(
conv1=F.Convolution2D(1, 24, 11, wscale=w, stride=3, dtype=np.float32),
conv2=F.Convolution2D(24, 48, 5, wscale=w, stride=1, dtype=np.float32),
conv3=F.Convolution2D(48, 96, 5, wscale=w, stride=1, dtype=np.float32),
conv4=F.Convolution2D(96, 192, 5, wscale=w, stride=1, dtype=np.float32),
ln4=F.Linear(192, 2),
)
def forward(self, x_data, y_data, train=True):
x = chainer.Variable(x_data)
t = chainer.Variable(y_data)
h = F.relu(self.conv1(x))
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = F.relu(self.conv2(h))
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = F.relu(self.conv3(h))
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = F.relu(self.conv4(h))
y = self.ln4(h)
return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
def predict(self, x_data):
x = chainer.Variable(x_data)
h = F.relu(self.conv1(x))
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = F.relu(self.conv2(h))
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = F.relu(self.conv3(h))
h = F.max_pooling_2d(h, ksize=3, stride=2)
h = F.relu(self.conv4(h))
y = self.ln4(h)
return F.softmax(y)
import os
import argparse
import numpy as np
import six
import six.moves.cPickle as pickle
import matplotlib.pyplot as plt
import chainer
import chainer.functions as F
from chainer import cuda
from chainer import optimizers
from chainer import computational_graph as c
# In[2]:
get_ipython().magic(u'matplotlib inline')
# ###define parameters
# parser = argparse.ArgumentParser(description='cnn for neuro classification')
# parser.add_argument('--gpu','-g',default=-1,type=int,
# help='GPU ID (negative value indicates CPU)')
# parser.add_argument('--arch','-a',default='cnn3')
# parser.add_argument('--batchs','-B',default=30,type=int,
# help = 'learning minibatch size')
# parser.add_argument('--epoch','-E',default=20, type=int)
# parser.add_argument('--out', '-o',default='model',
# help = "Path to save model")
# args = parser.parse_args()
# In[3]:
n_epoch =300
batchsize=100
gpu_flag = 0
N = 1500 #size of train data
# ###Get image dataset
# In[5]:
dpath=os.path.abspath("")
foldername = "/dataset"
x_data = np.load(dpath+foldername+"/x_data1.npy")
t_data = np.load(dpath+foldername+"/t_data1.npy")
mean = np.load(dpath+foldername+"/mean.npy")
#x_data = x_data - mean[:,:,:,np.newaxis]
x_data = x_data.transpose(3,0,1,2).astype(np.float32)
t_data = t_data.astype(np.int32)
print x_data.shape
print t_data.shape
print len(x_data)
N_test = len(x_data)-N
print N_test
# ###Separate data into train and test
# In[6]:
#shuffle the data
shuffle = np.random.permutation(len(t_data))
x_data = np.asarray(x_data[shuffle,:,:,:])
t_data = np.asarray(t_data[shuffle])
# In[7]:
#separate data
x_train, x_test = np.split(x_data,[N],axis=0)
t_train, t_test = np.split(t_data,[N])
print x_train.shape, t_train.shape
# ###Prepare model
# In[9]:
import cnn
model = cnn.CNN3()
# ###GPU setup
# In[10]:
if gpu_flag >= 0:
cuda.check_cuda_available()
xp = cuda.cupy if gpu_flag >=0 else np
if gpu_flag >= 0:
cuda.get_device(gpu_flag).use()
model.to_gpu()
# ###Setup optimizer(Adam)
# In[11]:
optimizer = optimizers.Adam()
optimizer.setup(model)
# ###Learning loop
# In[14]:
train_loss = []
train_acc = []
test_loss = []
test_acc = []
for epoch in six.moves.range(1,n_epoch +1):
print 'epoch: ', epoch
#training
perm = np.random.permutation(N)
sum_acc = 0
sum_loss = 0
for i in six.moves.range(0,N,batchsize):
x_batch = xp.asarray(x_train[perm[i:i + batchsize]])
t_batch = xp.asarray(t_train[perm[i:i + batchsize]])
optimizer.zero_grads()
loss, acc = model.forward(x_batch, t_batch)
loss.backward()
optimizer.update()
# if epoch ==1 and i ==0:
# with open("graph.dot","w") as o:
# o.write(c.build_computational_graph((loss, )).dump())
# with open("graph.wo_split.dot","w") as o:
# g = c.bulid_computational_graph((loss, ), remove_split=True)
# o.write(g.dump())
# print "graph generated"
sum_loss += float(loss.data)*len(t_batch)
sum_acc += float(acc.data)*len(t_batch)
train_loss.append([epoch,sum_loss/N])
train_acc.append([epoch,sum_acc/N])
print "train mean loss={}, accuracy={}".format(sum_loss/N, sum_acc/N)
#evaluation
sum_acc = 0
sum_loss = 0
for i in six.moves.range(0,N_test,batchsize):
x_batch = xp.asarray(x_test[i:i+batchsize])
t_batch = xp.asarray(t_test[i:i+batchsize])
loss, acc = model.forward(x_batch, t_batch, train=False)
sum_loss += float(loss.data)*len(t_batch)
sum_acc += float(acc.data)*len(t_batch)
test_loss.append([epoch,sum_loss/N_test])
test_acc.append([epoch,sum_acc/N_test])
print "test mean loss={}, accuracy={}".format(sum_loss/N_test, sum_acc/N_test)
train_loss = np.asarray(train_loss)
train_acc = np.asarray(train_acc)
test_loss = np.asarray(test_loss)
test_acc = np.asarray(test_acc)
# ###Plot&save graph
# In[13]:
fig, ax1 = plt.subplots()
ax1.plot(train_loss[:, 0], train_loss[:, 1], label='training loss')
ax1.plot(test_loss[:, 0], test_loss[:, 1], label='test loss')
ax1.set_xlim([1, len(train_loss)])
ax1.set_xlabel('epoch')
ax1.set_ylabel('loss')
ax2 = ax1.twinx()
ax2.plot(train_acc[:, 0], train_acc[:, 1],
label='training accuracy', c='r')
ax2.plot(test_acc[:, 0], test_acc[:, 1], label='test accuracy', c='c')
ax2.set_xlim([1, len(train_loss)])
ax2.set_ylabel('accuracy')
ax1.legend(bbox_to_anchor=(0.25, -0.1), loc=9)
ax2.legend(bbox_to_anchor=(0.75, -0.1), loc=9)
plt.title("Convoluton3+Logistic regression 3 class")
plt.savefig(dpath+"/figs/cnn3_3class", bbox_inches='tight')
plt.show()
# ###Save final model
# In[11]:
pickle.dump(model, open('', 'wb'),-1)
# In[ ]:
imagedir = getDirectory("Choose a Directory");
savedir = "/Users/tsuzuki/PycharmProjects/cell_detection/OkadaLab/dataset";
nfolder=12
j=0
for (k=1; k<=nfolder; k++){
run("ROI Manager...");
roiManager("Open", imagedir+"/"+k+"/neuron.zip");
nroi = roiManager("count");
for (i =0; i<=nroi-1; i++){
open(imagedir+"/"+k+"/img_000000000_Default_000.tif");
roiManager("Select", i);
run("Crop");
saveAs("PNG", savedir+"/neuron/"+j+".tif");
j++;
for (l=0; l<=2; l++){
run("Rotate 90 Degrees Left");
saveAs("PNG", savedir+"/neuron/"+j+".tif");
j++;
}
close();
wait(300);
//wait(2000);
}
wait(1000);
run("Close");
}
j=0
for (k=1; k<=nfolder; k++){
run("ROI Manager...");
roiManager("Open", imagedir+"/"+k+"/grea.zip");
nroi = roiManager("count");
for (i =0; i<=nroi-1; i++){
open(imagedir+"/"+k+"/img_000000000_Default_000.tif");
roiManager("Select", i);
run("Crop");
saveAs("PNG", savedir+"/grea/"+j+".tif");
j++;
for (l=0; l<=2; l++){
run("Rotate 90 Degrees Left");
saveAs("PNG", savedir+"/grea/"+j+".tif");
j++;
}
close();
wait(300);
//wait(2000);
}
wait(1000);
run("Close");
}
j=0
for (k=1; k<=nfolder; k++){
run("ROI Manager...");
roiManager("Open", imagedir+"/"+k+"/bgd.zip");
nroi = roiManager("count");
for (i =0; i<=nroi-1; i++){
open(imagedir+"/"+k+"/img_000000000_Default_000.tif");
roiManager("Select", i);
run("Crop");
saveAs("PNG", savedir+"/bgd/"+j+".tif");
j++;
for (l=0; l<=2; l++){
run("Rotate 90 Degrees Left");
saveAs("PNG", savedir+"/bgd/"+j+".tif");
j++;
}
close();
wait(300);
//wait(2000);
}
wait(1000);
run("Close");
}
import os
import numpy as np
import scipy as sp
import glob
import cv2
import matplotlib.pyplot as plt
# In[2]:
get_ipython().magic(u'matplotlib inline')
# In[3]:
dpath=os.path.abspath("")
dpath
# ###create data dictionary
# In[4]:
#folder name
folder = "/dataset"
imformat = ".png"
# In[5]:
neuron=glob.glob(dpath+folder+"/neuron/*"+imformat)
grea =glob.glob(dpath+folder+"/grea/*"+imformat)
bgd = glob.glob(dpath+folder+"/bgd/*"+imformat)
# In[6]:
dict = {}
dict["neuron"]=neuron
dict["grea"] = grea
dict["bgd"] = bgd
print dict.keys()
# In[8]:
print len(dict["neuron"])
print len(dict["grea"])
print len(dict["bgd"])
# In[68]:
impath=dpath+folder
print impath
# ###resize image
# In[69]:
for folder in dict.keys():
for i in xrange(len(dict[folder])):
path = dict[folder][i]
#path = (impath+"/{folder}/{im}".format(**{"folder": folder,"im": dict[folder][i]}))
img = cv2.imread(path,cv2.CV_LOAD_IMAGE_GRAYSCALE)
img1 = cv2.resize(img,(200,200))
cv2.imwrite(path,img1)
# ###create data matrix
# In[70]:
x_data = np.array([]).astype(np.float32)
for folder in dict.keys():
for i in xrange(len(dict[folder])):
path = dict[folder][i]
if i==0 and folder==dict.keys()[0]:
x_data = plt.imread(path)
else:
image = plt.imread(path)
x_data = np.dstack((x_data,image))
#print x_data.shape
x_data=x_data.astype(np.float32)
x_data.shape
# ###create labels
# In[71]:
t_data = np.array([]).astype(np.int32)
for label in xrange(len(dict.keys())):
t_data= np.append(t_data,(np.ones(len(dict[dict.keys()[label]])).astype(np.int32))*label)
# ### reshape data for chainer format (normalize pixcel 0~1, calculate mean)
# In[72]:
x_data_n = x_data[np.newaxis,:,:,:]/255.0
mean = np.sum(x_data_n, axis=3)/len(t_data)
# In[73]:
print x_data.shape
print x_data_n.shape
print t_data.shape
print mean.shape
# In[74]:
x_data_p =x_data_n - mean[:,:,:,np.newaxis]
x_data_p.shape
# ###Save data as .npy
# In[75]:
np.save(impath+"/x_data1.npy",x_data_n)
np.save(impath+"/t_data1.npy",t_data)
np.save(impath+"/mean.npy",mean)
# ###Load .npy files
# In[49]:
hoge = np.load(impath+"/x_data.npy")
fuga = np.load(impath+"/t_data.npy")
# In[50]:
print hoge.shape
print fuga.shape
# In[51]:
hoge = hoge[np.newaxis,:,:,:]
print hoge.shape
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment