Skip to content

Instantly share code, notes, and snippets.

@toshikaz55
Created March 28, 2019 16:09
Show Gist options
  • Save toshikaz55/0b5abed44cd32b1978183f19252d927b to your computer and use it in GitHub Desktop.
Save toshikaz55/0b5abed44cd32b1978183f19252d927b to your computer and use it in GitHub Desktop.
Deeplearnig前処理のため顔領域抽出、切り抜き、バリエーション作成
import os
import glob
import argparse
import cv2
from PIL import Image
from PIL import ImageEnhance
import numpy as np
IMAGE_HEIGHT = 800
CASCADE_PATH = "./haarcascades/haarcascade_frontalface_alt.xml"
cascade = cv2.CascadeClassifier(CASCADE_PATH)
color = (255, 255, 255)
def detectFace(image):
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
facerect = cascade.detectMultiScale(image_gray, scaleFactor=1.07, minNeighbors=9, minSize=(10, 10))
return facerect
def pre_resize(before, after, height=IMAGE_HEIGHT, filename="", antialias_enable=True):
"""
Resize images according to the pre-defined image_heiht regardless of the size of them.
"""
img = Image.open(before, 'r')
before_x, before_y = img.size[0], img.size[1]
x = int(round(float(height / float(before_y) * float(before_x))))
y = height
resize_img = img
if antialias_enable:
resize_img.thumbnail((x, y), Image.ANTIALIAS)
else:
resize_img = resize_img.resize((x, y))
resize_img.save(after, 'jpeg', quality=100)
print( "RESIZED: %s[%sx%s] --> %sx%s" % (filename, before_x, before_y, x, y) )
def resize(image):
return cv2.resize(image, (128,128))
def rotate(image, r):
h, w, ch = image.shape # 画像の配列サイズ
M = cv2.getRotationMatrix2D((w/2, h/2), r, 1) # 画像を中心に回転させるための回転行列
rotated = cv2.warpAffine(image, M, (w, h))
return rotated
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='clip face-image from imagefile and do data argumentation.')
parser.add_argument('-p', required=True, help='set files path.', metavar='imagefile_path')
args = parser.parse_args()
# リサイズした画像を格納
resize_dir = args.p + "/_resize"
if not os.path.exists(resize_dir):
os.makedirs(resize_dir)
# 顔部分に囲いを追加した画像を格納
addbox_dir = args.p + "/_addbox"
if not os.path.exists(addbox_dir):
os.makedirs(addbox_dir)
# 顔部分をトリミングした画像を格納
trimming_dir = args.p + "/_trimming"
if not os.path.exists(trimming_dir):
os.makedirs(trimming_dir)
face_cnt = 0
# ガンマ補正のルックアップテーブルを作成
gamma = 1.8
lookUpTable = np.zeros((256, 1), dtype = 'uint8')
for i in range(256):
lookUpTable[i][0] = 255 * pow(float(i) / 255, 1.0 / gamma)
# ハイコントラストルックアップテーブルを作成
min_table = 50
max_table = 205
diff_table = max_table - min_table
LUT_HC = np.arange(256, dtype='uint8')
for i in range(0, min_table):
LUT_HC[i] = 0
for i in range(min_table, max_table):
LUT_HC[i] = 255 * (i - min_table) / diff_table
for i in range(max_table, 255):
LUT_HC[i] = 255
# jpgファイル取得
files = glob.glob( "%s/*.jp*g" % (args.p) )
for file_name in files:
before_path = file_name
filename = os.path.basename(file_name)
after_path = '%s/%s' % ( resize_dir, filename )
pre_resize(before_path, after_path, filename=file_name)
resize_files = glob.glob(resize_dir+"/*.jpg")
for file_name in resize_files:
print("detect face on file:"+file_name)
# 画像のロード
image = cv2.imread(file_name)
if image is None:
# 読み込み失敗
print("image is None")
continue
# -12~12度の範囲で3度ずつ回転
for r in range(-12,13,4):
image = rotate(image, r)
# 顔画像抽出
facerect_list = detectFace(image)
if len(facerect_list) == 0:
continue
basename = os.path.basename(file_name)
# 顔検知の囲い追加画像保存 どの程度の精度で検知できているかの確認
for rect in facerect_list:
cv2.rectangle(image, tuple(rect[0:2]),tuple(rect[0:2]+rect[2:4]), color, thickness=2)
cv2.imwrite(addbox_dir+"/"+basename, image)
# 顔部分切り抜き
for facerect in facerect_list:
# 顔画像部分の切り抜き
croped = image[facerect[1]:facerect[1]+facerect[3],facerect[0]:facerect[0]+facerect[2]]
# 出力
cv2.imwrite(trimming_dir+"/"+str(face_cnt)+".jpg", resize(croped))
face_cnt += 1
# 反転画像も出力
fliped = np.fliplr(croped)
cv2.imwrite(trimming_dir+"/"+str(face_cnt)+".jpg", resize(fliped))
face_cnt += 1
# 明るさの正規化
meaned = (croped - np.mean(croped)) / np.std(croped)*16+128
cv2.imwrite(trimming_dir+"/"+str(face_cnt)+".jpg", resize(meaned))
face_cnt += 1
# ガンマ補正
#img_max = image.max()
#gamma_up = img_max * (croped / img_max) ** ( 1/ 2.0)
#cv2.imwrite(trimming_dir+"/"+str(face_cnt)+".jpg", resize(gamma_up))
#face_cnt += 1
# ルックアップテーブルによるガンマ補正
gamma_lut = cv2.LUT(croped, lookUpTable)
cv2.imwrite(trimming_dir+"/"+str(face_cnt)+".jpg", resize(gamma_lut))
face_cnt += 1
# コントラストを変える
high_contrast = cv2.LUT(croped, LUT_HC)
cv2.imwrite(trimming_dir+"/"+str(face_cnt)+".jpg", resize(high_contrast))
face_cnt += 1
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment