Last active
December 8, 2016 07:19
-
-
Save jyoshida-sci/4eb8e6a875cb14f44c49312da08ec12c to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# -*- coding: utf-8 -*- | |
""" | |
Created on Jul. 26 | |
@author: jyoshida-sci | |
@input some *.png (microscopic image: background=white, grain=gray) in the same dir. | |
""" | |
import cv2 | |
import glob | |
import os | |
import numpy as np | |
import SuperImposer as si | |
if __name__ == '__main__': | |
files = glob.glob("*.png") | |
#files.sort(key=os.path.getmtime) | |
mysi = si.SuperImposer() | |
for i,filename in enumerate(files): | |
print filename | |
img = cv2.imread(filename,cv2.CV_LOAD_IMAGE_GRAYSCALE) | |
mysi.AddImg(img) | |
dst_org, dst_dog = mysi.MakeImg() | |
cv2.imwrite('dog_{0}.jpg'.format(filename), dst_dog) | |
cv2.imwrite('org_{0}.jpg'.format(filename), dst_org) | |
cv2.destroyAllWindows() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
using System; | |
using System.Collections.Generic; | |
using System.Linq; | |
using System.Text; | |
using SuperImposer; | |
using System.IO; | |
using OpenCvSharp; | |
using OpenCvSharp.CPlusPlus; | |
using System.Diagnostics; | |
namespace SuperImposer | |
{ | |
class si_app | |
{ | |
//Raw microtrack | |
public struct rawmicrotrack | |
{ | |
public int ph; | |
public int pv; | |
public int ax; | |
public int ay; | |
public int cx; | |
public int cy; | |
} | |
// this function makes mask | |
static Mat GetMask(Mat img, int height, int width) | |
{ | |
Mat mask = Mat.Zeros(img.Height, img.Width, MatType.CV_8UC1); | |
int x0 = mask.Width / 2 - width / 2; | |
int y0 = mask.Height / 2 - height / 2; | |
OpenCvSharp.CPlusPlus.Rect rect = new OpenCvSharp.CPlusPlus.Rect(x0, y0, width, height); | |
Cv2.Rectangle(mask, rect, 255, -1); | |
return mask; | |
} | |
//@iparam scanner: contours | |
//@iparam size: pixel size of big image | |
//return position of track in pixel-coordinate | |
static OpenCvSharp.CPlusPlus.Point GetMeanOfRawMicroTracks(CvContourScanner scanner, Size size) | |
{ | |
List<rawmicrotrack> rms = new List<rawmicrotrack>(); | |
foreach (CvSeq<CvPoint> c in scanner) | |
{ | |
CvMoments mom = new CvMoments(c, false); | |
if (c.ElemSize < 2) continue; | |
if (mom.M00 < 1.0) continue; | |
double mx = mom.M10 / mom.M00; | |
double my = mom.M01 / mom.M00; | |
rawmicrotrack rm = new rawmicrotrack(); | |
rm.ax = 0; | |
rm.ay = 0; | |
rm.cx = (int)(mx - size.Width / 2); | |
rm.cy = (int)(my - size.Height / 2); | |
rm.pv = (int)(mom.M00); | |
rms.Add(rm); | |
} | |
OpenCvSharp.CPlusPlus.Point trackpos = new OpenCvSharp.CPlusPlus.Point(0, 0); | |
if (rms.Count > 0) | |
{ | |
rawmicrotrack rm = new rawmicrotrack(); | |
double meancx = 0; | |
double meancy = 0; | |
double meanax = 0; | |
double meanay = 0; | |
double meanph = 0; | |
double meanpv = 0; | |
double sumpv = 0; | |
for (int i = 0; i < rms.Count; i++) | |
{ | |
meanpv += rms[i].pv * rms[i].pv; | |
meancx += rms[i].cx * rms[i].pv; | |
meancy += rms[i].cy * rms[i].pv; | |
meanax += rms[i].ax * rms[i].pv; | |
meanay += rms[i].ay * rms[i].pv; | |
sumpv += rms[i].pv; | |
} | |
meancx /= sumpv;//重心と傾きを輝度値で重み付き平均 | |
meancy /= sumpv; | |
meanax /= sumpv; | |
meanay /= sumpv; | |
meanph /= sumpv; | |
meanpv /= sumpv; | |
trackpos = new OpenCvSharp.CPlusPlus.Point( | |
(int)(meancx) + 256, | |
(int)(meancy) + 220 | |
); | |
// double anglex = (meanax * shiftpitch * 0.267) / (3.0 * 7.0 * 2.2); | |
// double angley = (meanay * shiftpitch * 0.267) / (3.0 * 7.0 * 2.2); | |
// Console.WriteLine(string.Format("{0:f4} {1:f4}", anglex, angley)); | |
} | |
else | |
{ | |
trackpos = new OpenCvSharp.CPlusPlus.Point(-1, -1); | |
} | |
return trackpos; | |
} | |
static OpenCvSharp.CPlusPlus.Point TrackDetection3(Mat img, int shiftx = 2, int shifty = 2, int shiftpitch = 4, int windowsize = 40, int phthresh = 5, double dx = 1.2, double dy = 1.2, bool debugflag = false) | |
{ | |
bool debug = true; | |
//rotationparam | |
double phi = Math.Atan2(dy, dx); | |
double angle = phi * (180 / Math.PI); | |
double tant = Math.Sqrt(dx * dx + dy * dy); | |
double pix = 0.26;//pix_to_micron | |
// Rotate image | |
Mat affineMask = Cv2.GetRotationMatrix2D(new OpenCvSharp.CPlusPlus.Point(img.Width / 2, img.Height / 2), -angle, 1); | |
Cv2.WarpAffine(img, img, affineMask, img.Size()); | |
if (debug) | |
{ | |
Cv2.ImShow("img", img); | |
Cv2.WaitKey(0); | |
} | |
//Making a mask | |
int heightL1 = 15; | |
int heightL2 = 30; | |
int width = (int)(90 + tant * 180); | |
width = (width > heightL1) ? width : heightL1; | |
Mat maskL1 = GetMask(img, heightL1, width); | |
Mat maskL2 = GetMask(img, heightL2, width); | |
if (debug) | |
{ | |
Cv2.ImShow("maskL1", maskL1); | |
Cv2.ImShow("maskL2", maskL2); | |
Cv2.WaitKey(0); | |
} | |
// Cut image with mask, thresholding for L1-area | |
Mat imgL1 = img.Clone(); | |
Cv2.BitwiseAnd(imgL1, maskL1, imgL1); | |
int pthresh2 = 200; | |
Cv2.Threshold(imgL1, imgL1, pthresh2, 255, ThresholdType.ToZero); | |
if (debug) | |
{ | |
Cv2.ImShow("imgL1", imgL1); | |
Cv2.WaitKey(0); | |
} | |
//inverse rotation for L1-area | |
Mat affineMask2 = Cv2.GetRotationMatrix2D(new OpenCvSharp.CPlusPlus.Point(imgL1.Width / 2, imgL1.Height / 2), angle, 1); | |
Cv2.WarpAffine(imgL1, imgL1, affineMask2, imgL1.Size()); | |
if (debug) | |
{ | |
Cv2.ImShow("imgL1", imgL1); | |
Cv2.WaitKey(0); | |
} | |
//raw micro tracks | |
List<rawmicrotrack> rms = new List<rawmicrotrack>(); | |
//contour detection for L1-area | |
using (CvMemStorage storage = new CvMemStorage()) | |
using (CvContourScanner scanner = new CvContourScanner(imgL1.ToIplImage(), storage, CvContour.SizeOf, ContourRetrieval.Tree, ContourChain.ApproxSimple)) | |
{ | |
foreach (CvSeq<CvPoint> c in scanner) | |
{ | |
CvMoments mom = new CvMoments(c, false); | |
if (c.ElemSize < 2) continue; | |
if (mom.M00 < 1.0) continue; | |
double mx = mom.M10 / mom.M00; | |
double my = mom.M01 / mom.M00; | |
rawmicrotrack rm = new rawmicrotrack(); | |
rm.ax = 0; | |
rm.ay = 0; | |
rm.cx = (int)(mx - imgL1.Width / 2); | |
rm.cy = (int)(my - imgL1.Height / 2); | |
rm.pv = (int)(mom.M00); | |
rms.Add(rm); | |
} | |
} | |
//if nocandidate, spread ROI | |
// Cut image with mask, thresholding for L2-area | |
Mat imgL2 = img.Clone(); | |
Cv2.BitwiseAnd(imgL2, maskL2, imgL2); | |
Cv2.Threshold(imgL2, imgL2, pthresh2, 255, ThresholdType.ToZero); | |
if (debug) | |
{ | |
Cv2.ImShow("imgL2", imgL2); | |
Cv2.WaitKey(0); | |
} | |
//inverse rotation for L2-area | |
Mat affineMaskL2 = Cv2.GetRotationMatrix2D(new OpenCvSharp.CPlusPlus.Point(imgL2.Width / 2, imgL2.Height / 2), angle, 1); | |
Cv2.WarpAffine(imgL2, imgL2, affineMask2, imgL2.Size()); | |
//contour detection for L2-area | |
//if nocandidate, not found | |
using (CvMemStorage storageL2 = new CvMemStorage()) | |
using (CvContourScanner scannerL2 = new CvContourScanner(imgL2.ToIplImage(), storageL2, CvContour.SizeOf, ContourRetrieval.Tree, ContourChain.ApproxSimple)) | |
{ | |
if (scannerL2.Count() != 0) | |
{ | |
foreach (CvSeq<CvPoint> c in scannerL2) | |
{ | |
CvMoments mom = new CvMoments(c, false); | |
if (c.ElemSize < 2) continue; | |
if (mom.M00 < 1.0) continue; | |
double mx = mom.M10 / mom.M00; | |
double my = mom.M01 / mom.M00; | |
rawmicrotrack rm = new rawmicrotrack(); | |
rm.ax = 0; | |
rm.ay = 0; | |
rm.cx = (int)(mx - imgL2.Width / 2); | |
rm.cy = (int)(my - imgL2.Height / 2); | |
rm.pv = (int)(mom.M00); | |
rms.Add(rm); | |
} | |
} | |
else | |
{ | |
//not detected | |
return new OpenCvSharp.CPlusPlus.Point(-1, -1); | |
} | |
}//using L2 contour | |
//calc average of raw-microtracks | |
OpenCvSharp.CPlusPlus.Point trackpos = new OpenCvSharp.CPlusPlus.Point(0, 0); | |
if (rms.Count > 0) | |
{ | |
rawmicrotrack rm = new rawmicrotrack(); | |
double meancx = 0; | |
double meancy = 0; | |
double meanax = 0; | |
double meanay = 0; | |
double meanph = 0; | |
double meanpv = 0; | |
double sumpv = 0; | |
for (int i = 0; i < rms.Count; i++) | |
{ | |
meanpv += rms[i].pv * rms[i].pv; | |
meancx += rms[i].cx * rms[i].pv; | |
meancy += rms[i].cy * rms[i].pv; | |
meanax += rms[i].ax * rms[i].pv; | |
meanay += rms[i].ay * rms[i].pv; | |
sumpv += rms[i].pv; | |
} | |
meancx /= sumpv;//weighted average | |
meancy /= sumpv; | |
meanax /= sumpv; | |
meanay /= sumpv; | |
meanph /= sumpv; | |
meanpv /= sumpv; | |
trackpos = new OpenCvSharp.CPlusPlus.Point( | |
(int)(meancx) + 256, | |
(int)(meancy) + 220 | |
); | |
// double anglex = (meanax * shiftpitch * 0.267) / (3.0 * 7.0 * 2.2); | |
// double angley = (meanay * shiftpitch * 0.267) / (3.0 * 7.0 * 2.2); | |
// Console.WriteLine(string.Format("{0:f4} {1:f4}", anglex, angley)); | |
return trackpos; | |
} | |
else | |
{ | |
return trackpos; | |
} | |
} | |
static void Main(string[] args) | |
{ | |
Mat img = Cv2.ImRead("superimpose.png", LoadMode.GrayScale); | |
bool flag = false; | |
if (flag) | |
{ | |
Mat mask = GetMask(img, 10, 20); | |
Cv2.ImShow("superimpose.png", img); | |
Cv2.ImShow("mask", mask); | |
Cv2.ImWrite("mask.png", mask); | |
Cv2.WaitKey(0); | |
} | |
OpenCvSharp.CPlusPlus.Point p = new Point(); | |
p = TrackDetection3(img); | |
Console.WriteLine(p); | |
} | |
} | |
} |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# -*- coding: utf-8 -*- | |
""" | |
Created on Jul. 26 | |
@author: jyoshida-sci | |
""" | |
import cv2 | |
import glob | |
import os | |
import numpy as np | |
import itertools | |
class SuperImposer: | |
def __init__(self): | |
self.imgs = [] | |
self.img2s = [] | |
self.h = 1000 | |
self.w = 1000 | |
def AddImg(self, img): | |
self.imgs.append(img) | |
def MakeImg(self): | |
self.img2s = [] | |
for i,p in enumerate(self.imgs): | |
pc = self.dog(p)#copy | |
pc = self.contrast(pc) | |
self.img2s.append(pc) | |
bigimg_dog = np.zeros((self.h, self.w),np.uint8) | |
bigimg_org = np.ones((self.h, self.w),np.uint8)*255 | |
#for i,p in enumerate(self.imgs): | |
# cv2.imshow('1', self.imgs[i]) | |
# cv2.imshow('2', self.img2s[i]) | |
# cv2.waitKey(0) | |
sumdx = 0 | |
sumdy = 0 | |
for i,p in enumerate(self.img2s): | |
dx = 0 | |
dy = 0 | |
val = 0.0 | |
if i != 0: | |
dx,dy,val = self.selfTemplateMatch(self.img2s[i-1], p) | |
#cv2.imshow('bigimg2', self.img2s[i-1]) | |
#cv2.imshow('imposed', p) | |
#cv2.waitKey(0) | |
sumdx += dx | |
sumdy += dy | |
#print i, dx, dy, sumdx, sumdy, val | |
shiftimg_dog = self.SpreadCanvas(p, self.w, self.h, sumdx, sumdy, False) | |
shiftimg_org = self.SpreadCanvas(self.imgs[i], self.w, self.h, sumdx, sumdy, True) | |
np.maximum(bigimg_dog, shiftimg_dog, bigimg_dog) | |
np.minimum(bigimg_org, shiftimg_org, bigimg_org) | |
cv2.imshow('bigimg_dog', bigimg_dog) | |
cv2.imshow('bigimg_org', bigimg_org) | |
cv2.waitKey(0) | |
return bigimg_dog, bigimg_org | |
def dog(self, img): | |
img= cv2.GaussianBlur(img, (3,3), 0) | |
gau= cv2.GaussianBlur(img, (31,31), 0) | |
sub = cv2.subtract(gau,img) | |
return sub | |
def contrast(self, img): | |
(minval,maxval,minloc,maxloc) = cv2.minMaxLoc(img) | |
if maxval-minval == 0: | |
return img | |
enh = np.zeros_like(img) | |
cv2.convertScaleAbs(img, enh, 255/(maxval-minval),-255*minval/(maxval-minval)) | |
return enh | |
def SpreadCanvas(self, img, w, h, dx, dy, isOrginal): | |
if isOrginal == True: | |
big_img = np.ones((h,w),np.uint8)*255 | |
if isOrginal == False: | |
big_img = np.zeros((h,w),np.uint8) | |
i_h, i_w = img.shape | |
sx = w/2 - i_w/2 + dx | |
sy = h/2 - i_h/2 + dy | |
big_img[sy:sy+i_h, sx:sx+i_w] += img | |
return big_img | |
def selfTemplateMatch(self, img1, img2): | |
mar = 50 | |
#print img2.shape | |
template = img2[mar:-mar,mar:-mar] | |
res = cv2.matchTemplate(img1, template, eval("cv2.TM_CCORR_NORMED")) | |
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) | |
top_left = max_loc | |
return top_left[0]-mar, top_left[1]-mar, max_val | |
#if __name__ == '__main__': | |
''' | |
files = glob.glob("*.png") | |
files.sort(key=os.path.getmtime) | |
mysi = SuperImposer() | |
for i,filename in enumerate(files): | |
#print file | |
img = cv2.imread(filename,cv2.CV_LOAD_IMAGE_GRAYSCALE) | |
mysi.addImg(img) | |
if i == 0: | |
canvas = img.copy() | |
else: | |
#np.maximum(canvas, img, canvas) | |
np.minimum(canvas, img, canvas) | |
dst = mysi.MakeImg() | |
cv2.imwrite('s_{0}.png'.format(filename), dst) | |
''' |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment