Last active
June 11, 2017 23:22
-
-
Save ProGamerGov/d3988b914c92220ae45aa5bb1e31d6a7 to your computer and use it in GitHub Desktop.
Neural-Style with the laplacian feature from deep-photo-styletransfer. See the comments for setting up and using the new feature.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
extern "C" { | |
#include "lua.h" | |
#include "lualib.h" | |
#include "lauxlib.h" | |
} | |
#include "luaT.h" | |
#include "THC.h" | |
#include <stdio.h> | |
#include <assert.h> | |
#include <math_constants.h> | |
#include <math_functions.h> | |
#include <stdint.h> | |
#include <unistd.h> | |
#define TB 256 | |
#define EPS 1e-4 | |
THCState* getCutorchState(lua_State* L) | |
{ | |
lua_getglobal(L, "cutorch"); | |
lua_getfield(L, -1, "getState"); | |
lua_call(L, 0, 1); | |
THCState *state = (THCState*) lua_touserdata(L, -1); | |
lua_pop(L, 2); | |
return state; | |
} | |
void checkCudaError(lua_State *L) { | |
cudaError_t status = cudaPeekAtLastError(); | |
if (status != cudaSuccess) { | |
luaL_error(L, cudaGetErrorString(status)); | |
} | |
} | |
THCudaTensor *new_tensor_like(THCState *state, THCudaTensor *x) | |
{ | |
THCudaTensor *y = THCudaTensor_new(state); | |
THCudaTensor_resizeAs(state, y, x); | |
return y; | |
} | |
__global__ void matting_laplacian_kernel( | |
float *input, float *grad, int h, int w, | |
int *CSR_rowIdx, int *CSR_colIdx, float *CSR_val, | |
int N | |
) | |
{ | |
int size = h * w; | |
int _id = blockIdx.x * blockDim.x + threadIdx.x; | |
if (_id < size) { | |
int x = _id % w, y = _id / w; | |
int id = x * h + y; | |
/// Because matting laplacian L is systematic, sum row is sufficient | |
// 1.1 Binary search | |
int start = 0; | |
int end = N-1; | |
int mid = (start + end)/2; | |
int index = -1; | |
while (start <= end) { | |
int rowIdx = (CSR_rowIdx[mid]) - 1; | |
if (rowIdx == id) { | |
index = mid; break; | |
} | |
if (rowIdx > id) { | |
end = mid - 1; | |
mid = (start + end)/2; | |
} else { | |
start = mid + 1; | |
mid = (start + end)/2; | |
} | |
} | |
if (index != -1) { | |
// 1.2 Complete range | |
int index_s = index, index_e = index; | |
while ( index_s >= 0 && ((CSR_rowIdx[index_s] - 1) == id) ) | |
index_s--; | |
while ( index_e < N && ((CSR_rowIdx[index_e] - 1) == id) ) | |
index_e++; | |
// 1.3 Sum this row | |
for (int i = index_s + 1; i < index_e; i++) { | |
//int rowIdx = CSR_rowIdx[i] - 1; | |
int _colIdx = (CSR_colIdx[i]) - 1; | |
float val = CSR_val[i]; | |
int _x = _colIdx / h, _y = _colIdx % h; | |
int colIdx = _y *w + _x; | |
grad[_id] += 2*val * input[colIdx]; | |
grad[_id + size] += 2*val * input[colIdx + size]; | |
grad[_id + 2*size] += 2*val * input[colIdx + 2*size]; | |
} | |
} | |
} | |
return ; | |
} | |
//cuda_utils.matting_laplacian(input, h, w, CSR_rowIdx, CSR_colIdx, CSR_val, CSC_rowIdx, CSC_colIdx, CSC_val, N) | |
int matting_laplacian(lua_State *L) { | |
THCState *state = getCutorchState(L); | |
THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); | |
int h = luaL_checknumber(L, 2); | |
int w = luaL_checknumber(L, 3); | |
THCudaIntTensor *CSR_rowIdx = (THCudaIntTensor*)luaT_checkudata(L, 4, "torch.CudaIntTensor"); | |
THCudaIntTensor *CSR_colIdx = (THCudaIntTensor*)luaT_checkudata(L, 5, "torch.CudaIntTensor"); | |
THCudaTensor *CSR_val = (THCudaTensor*)luaT_checkudata(L, 6, "torch.CudaTensor"); | |
int N = luaL_checknumber(L, 7); | |
THCudaTensor *grad = new_tensor_like(state, input); | |
THCudaTensor_zero(state, grad); | |
matting_laplacian_kernel<<<(h*w-1)/TB+1, TB>>>( | |
THCudaTensor_data(state, input), | |
THCudaTensor_data(state, grad), | |
h, w, | |
THCudaIntTensor_data(state, CSR_rowIdx), | |
THCudaIntTensor_data(state, CSR_colIdx), | |
THCudaTensor_data(state, CSR_val), | |
N | |
); | |
checkCudaError(L); | |
luaT_pushudata(L, grad, "torch.CudaTensor"); | |
return 1; | |
} | |
__device__ bool InverseMat4x4(double m_in[4][4], double inv_out[4][4]) { | |
double m[16], inv[16]; | |
for (int i = 0; i < 4; i++) { | |
for (int j = 0; j < 4; j++) { | |
m[i * 4 + j] = m_in[i][j]; | |
} | |
} | |
inv[0] = m[5] * m[10] * m[15] - | |
m[5] * m[11] * m[14] - | |
m[9] * m[6] * m[15] + | |
m[9] * m[7] * m[14] + | |
m[13] * m[6] * m[11] - | |
m[13] * m[7] * m[10]; | |
inv[4] = -m[4] * m[10] * m[15] + | |
m[4] * m[11] * m[14] + | |
m[8] * m[6] * m[15] - | |
m[8] * m[7] * m[14] - | |
m[12] * m[6] * m[11] + | |
m[12] * m[7] * m[10]; | |
inv[8] = m[4] * m[9] * m[15] - | |
m[4] * m[11] * m[13] - | |
m[8] * m[5] * m[15] + | |
m[8] * m[7] * m[13] + | |
m[12] * m[5] * m[11] - | |
m[12] * m[7] * m[9]; | |
inv[12] = -m[4] * m[9] * m[14] + | |
m[4] * m[10] * m[13] + | |
m[8] * m[5] * m[14] - | |
m[8] * m[6] * m[13] - | |
m[12] * m[5] * m[10] + | |
m[12] * m[6] * m[9]; | |
inv[1] = -m[1] * m[10] * m[15] + | |
m[1] * m[11] * m[14] + | |
m[9] * m[2] * m[15] - | |
m[9] * m[3] * m[14] - | |
m[13] * m[2] * m[11] + | |
m[13] * m[3] * m[10]; | |
inv[5] = m[0] * m[10] * m[15] - | |
m[0] * m[11] * m[14] - | |
m[8] * m[2] * m[15] + | |
m[8] * m[3] * m[14] + | |
m[12] * m[2] * m[11] - | |
m[12] * m[3] * m[10]; | |
inv[9] = -m[0] * m[9] * m[15] + | |
m[0] * m[11] * m[13] + | |
m[8] * m[1] * m[15] - | |
m[8] * m[3] * m[13] - | |
m[12] * m[1] * m[11] + | |
m[12] * m[3] * m[9]; | |
inv[13] = m[0] * m[9] * m[14] - | |
m[0] * m[10] * m[13] - | |
m[8] * m[1] * m[14] + | |
m[8] * m[2] * m[13] + | |
m[12] * m[1] * m[10] - | |
m[12] * m[2] * m[9]; | |
inv[2] = m[1] * m[6] * m[15] - | |
m[1] * m[7] * m[14] - | |
m[5] * m[2] * m[15] + | |
m[5] * m[3] * m[14] + | |
m[13] * m[2] * m[7] - | |
m[13] * m[3] * m[6]; | |
inv[6] = -m[0] * m[6] * m[15] + | |
m[0] * m[7] * m[14] + | |
m[4] * m[2] * m[15] - | |
m[4] * m[3] * m[14] - | |
m[12] * m[2] * m[7] + | |
m[12] * m[3] * m[6]; | |
inv[10] = m[0] * m[5] * m[15] - | |
m[0] * m[7] * m[13] - | |
m[4] * m[1] * m[15] + | |
m[4] * m[3] * m[13] + | |
m[12] * m[1] * m[7] - | |
m[12] * m[3] * m[5]; | |
inv[14] = -m[0] * m[5] * m[14] + | |
m[0] * m[6] * m[13] + | |
m[4] * m[1] * m[14] - | |
m[4] * m[2] * m[13] - | |
m[12] * m[1] * m[6] + | |
m[12] * m[2] * m[5]; | |
inv[3] = -m[1] * m[6] * m[11] + | |
m[1] * m[7] * m[10] + | |
m[5] * m[2] * m[11] - | |
m[5] * m[3] * m[10] - | |
m[9] * m[2] * m[7] + | |
m[9] * m[3] * m[6]; | |
inv[7] = m[0] * m[6] * m[11] - | |
m[0] * m[7] * m[10] - | |
m[4] * m[2] * m[11] + | |
m[4] * m[3] * m[10] + | |
m[8] * m[2] * m[7] - | |
m[8] * m[3] * m[6]; | |
inv[11] = -m[0] * m[5] * m[11] + | |
m[0] * m[7] * m[9] + | |
m[4] * m[1] * m[11] - | |
m[4] * m[3] * m[9] - | |
m[8] * m[1] * m[7] + | |
m[8] * m[3] * m[5]; | |
inv[15] = m[0] * m[5] * m[10] - | |
m[0] * m[6] * m[9] - | |
m[4] * m[1] * m[10] + | |
m[4] * m[2] * m[9] + | |
m[8] * m[1] * m[6] - | |
m[8] * m[2] * m[5]; | |
double det = m[0] * inv[0] + m[1] * inv[4] + m[2] * inv[8] + m[3] * inv[12]; | |
if (abs(det) < 1e-9) { | |
return false; | |
} | |
det = 1.0 / det; | |
for (int i = 0; i < 4; i++) { | |
for (int j = 0; j < 4; j++) { | |
inv_out[i][j] = inv[i * 4 + j] * det; | |
} | |
} | |
return true; | |
} | |
__global__ void best_local_affine_kernel( | |
float *output, float *input, float *affine_model, | |
int h, int w, float epsilon, int kernel_radius | |
) | |
{ | |
int size = h * w; | |
int id = blockIdx.x * blockDim.x + threadIdx.x; | |
if (id < size) { | |
int x = id % w, y = id / w; | |
double Mt_M[4][4] = {}; // 4x4 | |
double invMt_M[4][4] = {}; | |
double Mt_S[3][4] = {}; // RGB -> 1x4 | |
double A[3][4] = {}; | |
for (int i = 0; i < 4; i++) | |
for (int j = 0; j < 4; j++) { | |
Mt_M[i][j] = 0, invMt_M[i][j] = 0; | |
if (i != 3) { | |
Mt_S[i][j] = 0, A[i][j] = 0; | |
if (i == j) | |
Mt_M[i][j] = 1e-3; | |
} | |
} | |
for (int dy = -kernel_radius; dy <= kernel_radius; dy++) { | |
for (int dx = -kernel_radius; dx <= kernel_radius; dx++) { | |
int xx = x + dx, yy = y + dy; | |
int id2 = yy * w + xx; | |
if (0 <= xx && xx < w && 0 <= yy && yy < h) { | |
Mt_M[0][0] += input[id2 + 2*size] * input[id2 + 2*size]; | |
Mt_M[0][1] += input[id2 + 2*size] * input[id2 + size]; | |
Mt_M[0][2] += input[id2 + 2*size] * input[id2]; | |
Mt_M[0][3] += input[id2 + 2*size]; | |
Mt_M[1][0] += input[id2 + size] * input[id2 + 2*size]; | |
Mt_M[1][1] += input[id2 + size] * input[id2 + size]; | |
Mt_M[1][2] += input[id2 + size] * input[id2]; | |
Mt_M[1][3] += input[id2 + size]; | |
Mt_M[2][0] += input[id2] * input[id2 + 2*size]; | |
Mt_M[2][1] += input[id2] * input[id2 + size]; | |
Mt_M[2][2] += input[id2] * input[id2]; | |
Mt_M[2][3] += input[id2]; | |
Mt_M[3][0] += input[id2 + 2*size]; | |
Mt_M[3][1] += input[id2 + size]; | |
Mt_M[3][2] += input[id2]; | |
Mt_M[3][3] += 1; | |
Mt_S[0][0] += input[id2 + 2*size] * output[id2 + 2*size]; | |
Mt_S[0][1] += input[id2 + size] * output[id2 + 2*size]; | |
Mt_S[0][2] += input[id2] * output[id2 + 2*size]; | |
Mt_S[0][3] += output[id2 + 2*size]; | |
Mt_S[1][0] += input[id2 + 2*size] * output[id2 + size]; | |
Mt_S[1][1] += input[id2 + size] * output[id2 + size]; | |
Mt_S[1][2] += input[id2] * output[id2 + size]; | |
Mt_S[1][3] += output[id2 + size]; | |
Mt_S[2][0] += input[id2 + 2*size] * output[id2]; | |
Mt_S[2][1] += input[id2 + size] * output[id2]; | |
Mt_S[2][2] += input[id2] * output[id2]; | |
Mt_S[2][3] += output[id2]; | |
} | |
} | |
} | |
bool success = InverseMat4x4(Mt_M, invMt_M); | |
for (int i = 0; i < 3; i++) { | |
for (int j = 0; j < 4; j++) { | |
for (int k = 0; k < 4; k++) { | |
A[i][j] += invMt_M[j][k] * Mt_S[i][k]; | |
} | |
} | |
} | |
for (int i = 0; i < 3; i++) { | |
for (int j = 0; j < 4; j++) { | |
int affine_id = i * 4 + j; | |
affine_model[12 * id + affine_id] = A[i][j]; | |
} | |
} | |
} | |
return ; | |
} | |
__global__ void bilateral_smooth_kernel( | |
float *affine_model, float *filtered_affine_model, float *guide, | |
int h, int w, int kernel_radius, float sigma1, float sigma2 | |
) | |
{ | |
int id = blockIdx.x * blockDim.x + threadIdx.x; | |
int size = h * w; | |
if (id < size) { | |
int x = id % w; | |
int y = id / w; | |
double sum_affine[12] = {}; | |
double sum_weight = 0; | |
for (int dx = -kernel_radius; dx <= kernel_radius; dx++) { | |
for (int dy = -kernel_radius; dy <= kernel_radius; dy++) { | |
int yy = y + dy, xx = x + dx; | |
int id2 = yy * w + xx; | |
if (0 <= xx && xx < w && 0 <= yy && yy < h) { | |
float color_diff1 = guide[yy*w + xx] - guide[y*w + x]; | |
float color_diff2 = guide[yy*w + xx + size] - guide[y*w + x + size]; | |
float color_diff3 = guide[yy*w + xx + 2*size] - guide[y*w + x + 2*size]; | |
float color_diff_sqr = | |
(color_diff1*color_diff1 + color_diff2*color_diff2 + color_diff3*color_diff3) / 3; | |
float v1 = exp(-(dx * dx + dy * dy) / (2 * sigma1 * sigma1)); | |
float v2 = exp(-(color_diff_sqr) / (2 * sigma2 * sigma2)); | |
float weight = v1 * v2; | |
for (int i = 0; i < 3; i++) { | |
for (int j = 0; j < 4; j++) { | |
int affine_id = i * 4 + j; | |
sum_affine[affine_id] += weight * affine_model[id2*12 + affine_id]; | |
} | |
} | |
sum_weight += weight; | |
} | |
} | |
} | |
for (int i = 0; i < 3; i++) { | |
for (int j = 0; j < 4; j++) { | |
int affine_id = i * 4 + j; | |
filtered_affine_model[id*12 + affine_id] = sum_affine[affine_id] / sum_weight; | |
} | |
} | |
} | |
return ; | |
} | |
__global__ void reconstruction_best_kernel( | |
float *input, float *filtered_affine_model, float *filtered_best_output, | |
int h, int w | |
) | |
{ | |
int id = blockIdx.x * blockDim.x + threadIdx.x; | |
int size = h * w; | |
if (id < size) { | |
double out1 = | |
input[id + 2*size] * filtered_affine_model[id*12 + 0] + // A[0][0] + | |
input[id + size] * filtered_affine_model[id*12 + 1] + // A[0][1] + | |
input[id] * filtered_affine_model[id*12 + 2] + // A[0][2] + | |
filtered_affine_model[id*12 + 3]; //A[0][3]; | |
double out2 = | |
input[id + 2*size] * filtered_affine_model[id*12 + 4] + //A[1][0] + | |
input[id + size] * filtered_affine_model[id*12 + 5] + //A[1][1] + | |
input[id] * filtered_affine_model[id*12 + 6] + //A[1][2] + | |
filtered_affine_model[id*12 + 7]; //A[1][3]; | |
double out3 = | |
input[id + 2*size] * filtered_affine_model[id*12 + 8] + //A[2][0] + | |
input[id + size] * filtered_affine_model[id*12 + 9] + //A[2][1] + | |
input[id] * filtered_affine_model[id*12 + 10] + //A[2][2] + | |
filtered_affine_model[id*12 + 11]; // A[2][3]; | |
filtered_best_output[id] = out1; | |
filtered_best_output[id + size] = out2; | |
filtered_best_output[id + 2*size] = out3; | |
} | |
return ; | |
} | |
// local best01 = cuda_utils.smooth_local_affine(output01, input01, epsilon, patch, h, w, filter_radius, sigma1, sigma2) | |
int smooth_local_affine(lua_State *L) { | |
THCState *state = getCutorchState(L); | |
THCudaTensor *output = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); | |
THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); | |
float epsilon = luaL_checknumber(L, 3); | |
int patch = luaL_checknumber(L, 4); | |
int h = luaL_checknumber(L, 5); | |
int w = luaL_checknumber(L, 6); | |
int f_r = luaL_checknumber(L, 7); | |
float sigma1 = luaL_checknumber(L, 8); | |
float sigma2 = luaL_checknumber(L, 9); | |
THCudaTensor *filtered_best_output = new_tensor_like(state, input); | |
THCudaTensor_zero(state, filtered_best_output); | |
THCudaTensor *affine_model = THCudaTensor_new(state); | |
THCudaTensor_resize2d(state, affine_model, h*w, 12); | |
THCudaTensor_zero(state, affine_model); | |
THCudaTensor *filtered_affine_model = THCudaTensor_new(state); | |
THCudaTensor_resize2d(state, filtered_affine_model, h*w, 12); | |
THCudaTensor_zero(state, filtered_affine_model); | |
int radius = (patch-1) / 2; | |
best_local_affine_kernel<<<(h*w)/TB+1, TB>>>( | |
THCudaTensor_data(state, output), | |
THCudaTensor_data(state, input), | |
THCudaTensor_data(state, affine_model), | |
h, w, epsilon, radius | |
); | |
checkCudaError(L); | |
bilateral_smooth_kernel<<<(h*w)/TB+1, TB>>>( | |
THCudaTensor_data(state, affine_model), | |
THCudaTensor_data(state, filtered_affine_model), | |
THCudaTensor_data(state, input), | |
h, w, f_r, sigma1, sigma2 | |
); | |
checkCudaError(L); | |
THCudaTensor_free(state, affine_model); | |
reconstruction_best_kernel<<<(h*w)/TB+1, TB>>>( | |
THCudaTensor_data(state, input), | |
THCudaTensor_data(state, filtered_affine_model), | |
THCudaTensor_data(state, filtered_best_output), | |
h, w | |
); | |
checkCudaError(L); | |
THCudaTensor_free(state, filtered_affine_model); | |
luaT_pushudata(L, filtered_best_output, "torch.CudaTensor"); | |
return 1; | |
} | |
static const struct luaL_Reg funcs[] = { | |
{"matting_laplacian", matting_laplacian}, | |
{"smooth_local_affine", smooth_local_affine}, | |
{NULL, NULL} | |
}; | |
extern "C" int luaopen_libcuda_utils(lua_State *L) { | |
luaL_openlib(L, "cuda_utils", funcs, 0); | |
return 1; | |
} |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Standalone version of the Matting Laplacian code from here: https://github.com/martinbenson/deep-photo-styletransfer | |
# Usage: python3 laplacian.py -in_dir <directory> -lap_dir <directory> -width <value> | |
# Install the depdendencies with: pip3 install numpy scipy Pillow | |
# This script is intended for use with artistic style transfer neural networks, and Deep Photo Style Transfer. | |
# Please note that the chosen -width value, must be the same value as the -image_size value in neural_style_laplacian.lua | |
# Input images currently must be in png form in order to be detected by the script. | |
import argparse | |
import glob | |
import os | |
import shutil | |
import multiprocessing | |
import math | |
import subprocess | |
import scipy.misc as spm | |
import scipy.ndimage as spi | |
import scipy.sparse as sps | |
import numpy as np | |
def getlaplacian1(i_arr: np.ndarray, consts: np.ndarray, epsilon: float = 0.0000001, win_size: int = 1): | |
neb_size = (win_size * 2 + 1) ** 2 | |
h, w, c = i_arr.shape | |
img_size = w * h | |
consts = spi.morphology.grey_erosion(consts, footprint=np.ones(shape=(win_size * 2 + 1, win_size * 2 + 1))) | |
indsM = np.reshape(np.array(range(img_size)), newshape=(h, w), order='F') | |
tlen = int((-consts[win_size:-win_size, win_size:-win_size] + 1).sum() * (neb_size ** 2)) | |
row_inds = np.zeros(tlen) | |
col_inds = np.zeros(tlen) | |
vals = np.zeros(tlen) | |
l = 0 | |
for j in range(win_size, w - win_size): | |
for i in range(win_size, h - win_size): | |
if consts[i, j]: | |
continue | |
win_inds = indsM[i - win_size:i + win_size + 1, j - win_size: j + win_size + 1] | |
win_inds = win_inds.ravel(order='F') | |
win_i = i_arr[i - win_size:i + win_size + 1, j - win_size: j + win_size + 1, :] | |
win_i = win_i.reshape((neb_size, c), order='F') | |
win_mu = np.mean(win_i, axis=0).reshape(1, win_size * 2 + 1) | |
win_var = np.linalg.inv( | |
np.matmul(win_i.T, win_i) / neb_size - np.matmul(win_mu.T, win_mu) + epsilon / neb_size * np.identity( | |
c)) | |
win_i2 = win_i - win_mu | |
tvals = (1 + np.matmul(np.matmul(win_i2, win_var), win_i2.T)) / neb_size | |
ind_mat = np.broadcast_to(win_inds, (neb_size, neb_size)) | |
row_inds[l: (neb_size ** 2 + l)] = ind_mat.ravel(order='C') | |
col_inds[l: neb_size ** 2 + l] = ind_mat.ravel(order='F') | |
vals[l: neb_size ** 2 + l] = tvals.ravel(order='F') | |
l += neb_size ** 2 | |
vals = vals.ravel(order='F') | |
row_inds = row_inds.ravel(order='F') | |
col_inds = col_inds.ravel(order='F') | |
a_sparse = sps.csr_matrix((vals, (row_inds, col_inds)), shape=(img_size, img_size)) | |
sum_a = a_sparse.sum(axis=1).T.tolist()[0] | |
a_sparse = sps.diags([sum_a], [0], shape=(img_size, img_size)) - a_sparse | |
return a_sparse | |
def im2double(im): | |
min_val = np.min(im.ravel()) | |
max_val = np.max(im.ravel()) | |
return (im.astype('float') - min_val) / (max_val - min_val) | |
def reshape_img(in_img, l=512): | |
in_h, in_w, _ = in_img.shape | |
if in_h > in_w: | |
h2 = l | |
w2 = int(in_w * h2 / in_h) | |
else: | |
w2 = l | |
h2 = int(in_h * w2 / in_w) | |
return spm.imresize(in_img, (h2, w2)) | |
if __name__ == "__main__": | |
parser = argparse.ArgumentParser() | |
parser.add_argument("-in_dir", "--in_directory", help="Path to input images", required=True) | |
parser.add_argument("-lap_dir", "--laplacian_directory", help="Path to where the laplacians are saved", required=True) | |
parser.add_argument("-width", "--width", help="Image width", default=512) | |
args = parser.parse_args() | |
width = int(args.width) | |
if not os.path.exists("/tmp/deep_photo/"): | |
os.makedirs("/tmp/deep_photo/") | |
if not os.path.exists("/tmp/deep_photo/in"): | |
os.makedirs("/tmp/deep_photo/in") | |
if not os.path.exists(args.laplacian_directory): | |
os.makedirs(args.laplacian_directory) | |
files = [] | |
for f in glob.iglob(os.path.join(args.in_directory, '*.png')): | |
files.append(f) | |
good_images = [] | |
for f in files: | |
image_name = os.path.basename(f) | |
good_images.append(image_name) | |
def process_image(image_name): | |
filename = os.path.join(args.in_directory, image_name) | |
lap_name = os.path.join(args.laplacian_directory, | |
image_name.replace(".png", "") + "_" + str(args.width) + ".csv") | |
img = spi.imread(filename, mode="RGB") | |
resized_img = reshape_img(img, width) | |
spm.imsave("/tmp/deep_photo/in/" + image_name, resized_img) | |
#if not os.path.exists(lap_name): | |
print("Calculating matting laplacian for " + str(image_name) + "...") | |
img = im2double(resized_img) | |
h, w, c = img.shape | |
csr = getlaplacian1(img, np.zeros(shape=(h, w)), 1e-7, 1) | |
coo = csr.tocoo() | |
zipped = zip(coo.row + 1, coo.col + 1, coo.data) | |
with open(lap_name, 'w') as out_file: | |
out_file.write(str(len(coo.data))+"\n") | |
for row, col, val in zipped: | |
out_file.write("%d,%d,%.15f\n" % (row, col, val)) | |
pool = multiprocessing.Pool(multiprocessing.cpu_count()) | |
pool.map(process_image, good_images) | |
shutil.rmtree("/tmp/deep_photo/", ignore_errors=True) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
PREFIX=/home/ubuntu/torch/install/ | |
NVCC_PREFIX=/usr/local/cuda-8.0/bin | |
CFLAGS=-I$(PREFIX)/include/THC -I$(PREFIX)/include/TH -I$(PREFIX)/include | |
LDFLAGS_NVCC=-L$(PREFIX)/lib -Xlinker -rpath,$(PREFIX)/lib -lluaT -lTHC -lTH -lpng | |
all: libcuda_utils.so | |
libcuda_utils.so: cuda_utils.cu | |
$(NVCC_PREFIX)/nvcc -arch sm_35 -O3 -DNDEBUG --compiler-options '-fPIC' -o libcuda_utils.so --shared cuda_utils.cu $(CFLAGS) $(LDFLAGS_NVCC) | |
clean: | |
find . -type f | xargs -n 5 touch | |
rm -f libcuda_utils.so |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
-- The original Neural Style code can be found here: https://github.com/jcjohnson/neural-style | |
-- Matting laplacian code from: github.com/martinbenson/deep-photo-styletransfer/ | |
-- Generate the laplacian with: https://gist.github.com/ProGamerGov/290f26afccc5e013d1a8425ef6a594f2 | |
-- Two use the laplacian feature, first get a required file called: "libcuda_utils.so", from: github.com/martinbenson/deep-photo-styletransfer, | |
-- and then make sure to place it into your Neural-Style directory. | |
-- Then configure the makefile script so that the the first two lines: https://gist.github.com/ProGamerGov/64c03b70db4fbac80dbf00ed047eccb8#file-makefile-L1-L2, | |
-- are setup to make your directory location, and your CUDA version. Run the makefile script via: make clean && make | |
-- For convenience, the makefile script, and the laplacian creator/generator script have been added to this gist. You still however have to collect: | |
-- libcuda_utils.so, for the laplacian feature to work. | |
-- Install the required luarocks dependency via: luarocks install csvigo | |
require 'torch' | |
require 'nn' | |
require 'image' | |
require 'optim' | |
require 'loadcaffe' | |
local cmd = torch.CmdLine() | |
-- Basic options | |
cmd:option('-style_image', 'examples/inputs/seated-nude.jpg', | |
'Style target image') | |
cmd:option('-style_blend_weights', 'nil') | |
cmd:option('-content_image', 'examples/inputs/tubingen.jpg', | |
'Content target image') | |
cmd:option('-image_size', 512, 'Maximum height / width of generated image') | |
cmd:option('-gpu', '0', 'Zero-indexed ID of the GPU to use; for CPU mode set -gpu = -1') | |
cmd:option('-multigpu_strategy', '', 'Index of layers to split the network across GPUs') | |
-- Optimization options | |
cmd:option('-content_weight', 5e0) | |
cmd:option('-style_weight', 1e2) | |
cmd:option('-tv_weight', 1e-3) | |
cmd:option('-num_iterations', 1000) | |
cmd:option('-normalize_gradients', false) | |
cmd:option('-init', 'random', 'random|image') | |
cmd:option('-init_image', '') | |
cmd:option('-optimizer', 'lbfgs', 'lbfgs|adam') | |
cmd:option('-learning_rate', 1e1) | |
cmd:option('-lbfgs_num_correction', 0) | |
-- Output options | |
cmd:option('-print_iter', 50) | |
cmd:option('-save_iter', 100) | |
cmd:option('-output_image', 'out.png') | |
-- Other options | |
cmd:option('-style_scale', 1.0) | |
cmd:option('-original_colors', 0) | |
cmd:option('-pooling', 'max', 'max|avg') | |
cmd:option('-proto_file', 'models/VGG_ILSVRC_19_layers_deploy.prototxt') | |
cmd:option('-model_file', 'models/VGG_ILSVRC_19_layers.caffemodel') | |
cmd:option('-backend', 'nn', 'nn|cudnn|clnn') | |
cmd:option('-cudnn_autotune', false) | |
cmd:option('-seed', -1) | |
cmd:option('-content_layers', 'relu4_2', 'layers for content') | |
cmd:option('-style_layers', 'relu1_1,relu2_1,relu3_1,relu4_1,relu5_1', 'layers for style') | |
-- Experimental Photorealistim Related Parameters | |
cmd:option('-laplacian', '', 'Laplacian generated from your content image') | |
-- Local affine params | |
cmd:option('-lambda', 1e4) | |
cmd:option('-patch', 3) | |
cmd:option('-eps', 1e-7) | |
-- Reconstruct best local affine using joint bilateral smoothing | |
cmd:option('-f_radius', 7) | |
cmd:option('-f_edge', 0.05) | |
cmd:option('-index', 1) | |
cmd:option('-serial', 'serial_example') | |
local function main(params) | |
local dtype, multigpu = setup_gpu(params) | |
local loadcaffe_backend = params.backend | |
if params.backend == 'clnn' then loadcaffe_backend = 'nn' end | |
local cnn = loadcaffe.load(params.proto_file, params.model_file, loadcaffe_backend):type(dtype) | |
local content_image = image.load(params.content_image, 3) | |
content_image = image.scale(content_image, params.image_size, 'bilinear') | |
local content_image_caffe = preprocess(content_image):float() | |
local style_size = math.ceil(params.style_scale * params.image_size) | |
local style_image_list = params.style_image:split(',') | |
local style_images_caffe = {} | |
for _, img_path in ipairs(style_image_list) do | |
local img = image.load(img_path, 3) | |
img = image.scale(img, style_size, 'bilinear') | |
local img_caffe = preprocess(img):float() | |
table.insert(style_images_caffe, img_caffe) | |
end | |
local init_image = nil | |
if params.init_image ~= '' then | |
init_image = image.load(params.init_image, 3) | |
local H, W = content_image:size(2), content_image:size(3) | |
init_image = image.scale(init_image, W, H, 'bilinear') | |
init_image = preprocess(init_image):float() | |
end | |
local CSR | |
local c, h, w | |
if params.laplacian ~= '' then | |
-- load matting laplacian | |
local CSR_fn = params.laplacian | |
print('loading matting laplacian...', CSR_fn) | |
local csvFile = io.open(CSR_fn, 'r') | |
local ROWS = tonumber(csvFile:read()) | |
CSR = torch.Tensor(ROWS, 3) | |
local i = 0 | |
for line in csvFile:lines('*l') do | |
i = i + 1 | |
local l = line:split(',') | |
for key, val in ipairs(l) do | |
CSR[i][key] = val | |
end | |
end | |
csvFile:close() | |
paths.mkdir(tostring(params.serial)) | |
print('Exp serial:', params.serial) | |
c, h, w = content_image:size(1), content_image:size(2), content_image:size(3) | |
require 'libcuda_utils' | |
end | |
-- Handle style blending weights for multiple style inputs | |
local style_blend_weights = nil | |
if params.style_blend_weights == 'nil' then | |
-- Style blending not specified, so use equal weighting | |
style_blend_weights = {} | |
for i = 1, #style_image_list do | |
table.insert(style_blend_weights, 1.0) | |
end | |
else | |
style_blend_weights = params.style_blend_weights:split(',') | |
assert(#style_blend_weights == #style_image_list, | |
'-style_blend_weights and -style_images must have the same number of elements') | |
end | |
-- Normalize the style blending weights so they sum to 1 | |
local style_blend_sum = 0 | |
for i = 1, #style_blend_weights do | |
style_blend_weights[i] = tonumber(style_blend_weights[i]) | |
style_blend_sum = style_blend_sum + style_blend_weights[i] | |
end | |
for i = 1, #style_blend_weights do | |
style_blend_weights[i] = style_blend_weights[i] / style_blend_sum | |
end | |
local content_layers = params.content_layers:split(",") | |
local style_layers = params.style_layers:split(",") | |
-- Set up the network, inserting style and content loss modules | |
local content_losses, style_losses = {}, {} | |
local next_content_idx, next_style_idx = 1, 1 | |
local net = nn.Sequential() | |
if params.tv_weight > 0 then | |
local tv_mod = nn.TVLoss(params.tv_weight):type(dtype) | |
net:add(tv_mod) | |
end | |
for i = 1, #cnn do | |
if next_content_idx <= #content_layers or next_style_idx <= #style_layers then | |
local layer = cnn:get(i) | |
local name = layer.name | |
local layer_type = torch.type(layer) | |
local is_pooling = (layer_type == 'cudnn.SpatialMaxPooling' or layer_type == 'nn.SpatialMaxPooling') | |
if is_pooling and params.pooling == 'avg' then | |
assert(layer.padW == 0 and layer.padH == 0) | |
local kW, kH = layer.kW, layer.kH | |
local dW, dH = layer.dW, layer.dH | |
local avg_pool_layer = nn.SpatialAveragePooling(kW, kH, dW, dH):type(dtype) | |
local msg = 'Replacing max pooling at layer %d with average pooling' | |
print(string.format(msg, i)) | |
net:add(avg_pool_layer) | |
else | |
net:add(layer) | |
end | |
if name == content_layers[next_content_idx] then | |
print("Setting up content layer", i, ":", layer.name) | |
local norm = params.normalize_gradients | |
local loss_module = nn.ContentLoss(params.content_weight, norm):type(dtype) | |
net:add(loss_module) | |
table.insert(content_losses, loss_module) | |
next_content_idx = next_content_idx + 1 | |
end | |
if name == style_layers[next_style_idx] then | |
print("Setting up style layer ", i, ":", layer.name) | |
local norm = params.normalize_gradients | |
local loss_module = nn.StyleLoss(params.style_weight, norm):type(dtype) | |
net:add(loss_module) | |
table.insert(style_losses, loss_module) | |
next_style_idx = next_style_idx + 1 | |
end | |
end | |
end | |
if multigpu then | |
net = setup_multi_gpu(net, params) | |
end | |
net:type(dtype) | |
-- Capture content targets | |
for i = 1, #content_losses do | |
content_losses[i].mode = 'capture' | |
end | |
print 'Capturing content targets' | |
print(net) | |
content_image_caffe = content_image_caffe:type(dtype) | |
net:forward(content_image_caffe:type(dtype)) | |
-- Capture style targets | |
for i = 1, #content_losses do | |
content_losses[i].mode = 'none' | |
end | |
for i = 1, #style_images_caffe do | |
print(string.format('Capturing style target %d', i)) | |
for j = 1, #style_losses do | |
style_losses[j].mode = 'capture' | |
style_losses[j].blend_weight = style_blend_weights[i] | |
end | |
net:forward(style_images_caffe[i]:type(dtype)) | |
end | |
-- Set all loss modules to loss mode | |
for i = 1, #content_losses do | |
content_losses[i].mode = 'loss' | |
end | |
for i = 1, #style_losses do | |
style_losses[i].mode = 'loss' | |
end | |
-- We don't need the base CNN anymore, so clean it up to save memory. | |
cnn = nil | |
for i=1, #net.modules do | |
local module = net.modules[i] | |
if torch.type(module) == 'nn.SpatialConvolutionMM' then | |
-- remove these, not used, but uses gpu memory | |
module.gradWeight = nil | |
module.gradBias = nil | |
end | |
end | |
collectgarbage() | |
-- Initialize the image | |
if params.seed >= 0 then | |
torch.manualSeed(params.seed) | |
end | |
local img = nil | |
if params.init == 'random' then | |
img = torch.randn(content_image:size()):float():mul(0.001) | |
elseif params.init == 'image' then | |
if init_image then | |
img = init_image:clone() | |
else | |
img = content_image_caffe:clone() | |
end | |
else | |
error('Invalid init type') | |
end | |
img = img:type(dtype) | |
local mean_pixel | |
local meanImage | |
if params.laplcian ~= '' then | |
mean_pixel = torch.CudaTensor({103.939, 116.779, 123.68}) | |
meanImage = mean_pixel:view(3, 1, 1):expandAs(content_image_caffe) | |
end | |
-- Run it through the network once to get the proper size for the gradient | |
-- All the gradients will come from the extra loss modules, so we just pass | |
-- zeros into the top of the net on the backward pass. | |
local y = net:forward(img) | |
local dy = img.new(#y):zero() | |
-- Declaring this here lets us access it in maybe_print | |
local optim_state = nil | |
if params.optimizer == 'lbfgs' then | |
optim_state = { | |
maxIter = params.num_iterations, | |
verbose=true, | |
tolX=-1, | |
tolFun=-1, | |
} | |
if params.lbfgs_num_correction > 0 then | |
optim_state.nCorrection = params.lbfgs_num_correction | |
end | |
elseif params.optimizer == 'adam' then | |
optim_state = { | |
learningRate = params.learning_rate, | |
} | |
else | |
error(string.format('Unrecognized optimizer "%s"', params.optimizer)) | |
end | |
local function maybe_print(t, loss) | |
local verbose = (params.print_iter > 0 and t % params.print_iter == 0) | |
if verbose then | |
print(string.format('Iteration %d / %d', t, params.num_iterations)) | |
for i, loss_module in ipairs(content_losses) do | |
print(string.format(' Content %d loss: %f', i, loss_module.loss)) | |
end | |
for i, loss_module in ipairs(style_losses) do | |
print(string.format(' Style %d loss: %f', i, loss_module.loss)) | |
end | |
print(string.format(' Total loss: %f', loss)) | |
end | |
end | |
local function maybe_save(t) | |
local should_save = params.save_iter > 0 and t % params.save_iter == 0 | |
should_save = should_save or t == params.num_iterations | |
if should_save then | |
local disp = deprocess(img:double()) | |
disp = image.minmax{tensor=disp, min=0, max=1} | |
local filename = build_filename(params.output_image, t) | |
if t == params.num_iterations then | |
filename = params.output_image | |
end | |
-- Maybe perform postprocessing for color-independent style transfer | |
if params.original_colors == 1 then | |
disp = original_colors(content_image, disp) | |
end | |
image.save(filename, disp) | |
end | |
end | |
-- Function to evaluate loss and gradient. We run the net forward and | |
-- backward to get the gradient, and sum up losses from the loss modules. | |
-- optim.lbfgs internally handles iteration and calls this function many | |
-- times, so we manually count the number of iterations to handle printing | |
-- and saving intermediate results. | |
local num_calls = 0 | |
local function feval(x) | |
num_calls = num_calls + 1 | |
local grad | |
if params.laplacian ~= '' then | |
local output = torch.add(img, meanImage) | |
local input = torch.add(content_image_caffe, meanImage) | |
net:forward(img) | |
local gradient_VggNetwork = net:updateGradInput(img, dy) | |
local gradient_LocalAffine = MattingLaplacian(output, CSR, h, w):mul(params.lambda) | |
if num_calls % params.save_iter == 0 then | |
local best = SmoothLocalAffine(output, input, params.eps, params.patch, h, w, params.f_radius, params.f_edge) | |
fn = params.serial .. '/best' .. tostring(params.index) .. '_t_' .. tostring(num_calls) .. '.png' | |
image.save(fn, best) | |
end | |
grad = torch.add(gradient_VggNetwork, gradient_LocalAffine) | |
else | |
net:forward(x) | |
grad = net:updateGradInput(x, dy) | |
end | |
local loss = 0 | |
for _, mod in ipairs(content_losses) do | |
loss = loss + mod.loss | |
end | |
for _, mod in ipairs(style_losses) do | |
loss = loss + mod.loss | |
end | |
maybe_print(num_calls, loss) | |
maybe_save(num_calls) | |
collectgarbage() | |
-- optim.lbfgs expects a vector for gradients | |
return loss, grad:view(grad:nElement()) | |
end | |
-- Run optimization. | |
if params.optimizer == 'lbfgs' then | |
print('Running optimization with L-BFGS') | |
local x, losses = optim.lbfgs(feval, img, optim_state) | |
elseif params.optimizer == 'adam' then | |
print('Running optimization with ADAM') | |
for t = 1, params.num_iterations do | |
local x, losses = optim.adam(feval, img, optim_state) | |
end | |
end | |
end | |
function setup_gpu(params) | |
local multigpu = false | |
if params.gpu:find(',') then | |
multigpu = true | |
params.gpu = params.gpu:split(',') | |
for i = 1, #params.gpu do | |
params.gpu[i] = tonumber(params.gpu[i]) + 1 | |
end | |
else | |
params.gpu = tonumber(params.gpu) + 1 | |
end | |
local dtype = 'torch.FloatTensor' | |
if multigpu or params.gpu > 0 then | |
if params.backend ~= 'clnn' then | |
require 'cutorch' | |
require 'cunn' | |
if multigpu then | |
cutorch.setDevice(params.gpu[1]) | |
else | |
cutorch.setDevice(params.gpu) | |
end | |
dtype = 'torch.CudaTensor' | |
else | |
require 'clnn' | |
require 'cltorch' | |
if multigpu then | |
cltorch.setDevice(params.gpu[1]) | |
else | |
cltorch.setDevice(params.gpu) | |
end | |
dtype = torch.Tensor():cl():type() | |
end | |
else | |
params.backend = 'nn' | |
end | |
if params.backend == 'cudnn' then | |
require 'cudnn' | |
if params.cudnn_autotune then | |
cudnn.benchmark = true | |
end | |
cudnn.SpatialConvolution.accGradParameters = nn.SpatialConvolutionMM.accGradParameters -- ie: nop | |
end | |
return dtype, multigpu | |
end | |
function setup_multi_gpu(net, params) | |
local DEFAULT_STRATEGIES = { | |
[2] = {3}, | |
} | |
local gpu_splits = nil | |
if params.multigpu_strategy == '' then | |
-- Use a default strategy | |
gpu_splits = DEFAULT_STRATEGIES[#params.gpu] | |
-- Offset the default strategy by one if we are using TV | |
if params.tv_weight > 0 then | |
for i = 1, #gpu_splits do gpu_splits[i] = gpu_splits[i] + 1 end | |
end | |
else | |
-- Use the user-specified multigpu strategy | |
gpu_splits = params.multigpu_strategy:split(',') | |
for i = 1, #gpu_splits do | |
gpu_splits[i] = tonumber(gpu_splits[i]) | |
end | |
end | |
assert(gpu_splits ~= nil, 'Must specify -multigpu_strategy') | |
local gpus = params.gpu | |
local cur_chunk = nn.Sequential() | |
local chunks = {} | |
for i = 1, #net do | |
cur_chunk:add(net:get(i)) | |
if i == gpu_splits[1] then | |
table.remove(gpu_splits, 1) | |
table.insert(chunks, cur_chunk) | |
cur_chunk = nn.Sequential() | |
end | |
end | |
table.insert(chunks, cur_chunk) | |
assert(#chunks == #gpus) | |
local new_net = nn.Sequential() | |
for i = 1, #chunks do | |
local out_device = nil | |
if i == #chunks then | |
out_device = gpus[1] | |
end | |
new_net:add(nn.GPU(chunks[i], gpus[i], out_device)) | |
end | |
return new_net | |
end | |
-- Matting Laplacian Related Functions: | |
function MattingLaplacian(output, CSR, h, w) | |
local N, c = CSR:size(1), CSR:size(2) | |
local CSR_rowIdx = torch.CudaIntTensor(N):copy(torch.round(CSR[{{1,-1},1}])) | |
local CSR_colIdx = torch.CudaIntTensor(N):copy(torch.round(CSR[{{1,-1},2}])) | |
local CSR_val = torch.CudaTensor(N):copy(CSR[{{1,-1},3}]) | |
local output01 = torch.div(output, 256.0) | |
local grad = cuda_utils.matting_laplacian(output01, h, w, CSR_rowIdx, CSR_colIdx, CSR_val, N) | |
grad:div(256.0) | |
return grad | |
end | |
function SmoothLocalAffine(output, input, epsilon, patch, h, w, f_r, f_e) | |
local output01 = torch.div(output, 256.0) | |
local input01 = torch.div(input, 256.0) | |
local filter_radius = f_r | |
local sigma1, sigma2 = filter_radius / 3, f_e | |
local best01= cuda_utils.smooth_local_affine(output01, input01, epsilon, patch, h, w, filter_radius, sigma1, sigma2) | |
return best01 | |
end | |
function ErrorMapLocalAffine(output, input, epsilon, patch, h, w) | |
local output01 = torch.div(output, 256.0) | |
local input01 = torch.div(input, 256.0) | |
local err_map, best01, Mt_M, invMt_M = cuda_utils.error_map_local_affine(output01, input01, epsilon, patch, h, w) | |
return err_map, best01 | |
end | |
function build_filename(output_image, iteration) | |
local ext = paths.extname(output_image) | |
local basename = paths.basename(output_image, ext) | |
local directory = paths.dirname(output_image) | |
return string.format('%s/%s_%d.%s',directory, basename, iteration, ext) | |
end | |
-- Preprocess an image before passing it to a Caffe model. | |
-- We need to rescale from [0, 1] to [0, 255], convert from RGB to BGR, | |
-- and subtract the mean pixel. | |
function preprocess(img) | |
local mean_pixel = torch.DoubleTensor({103.939, 116.779, 123.68}) | |
local perm = torch.LongTensor{3, 2, 1} | |
img = img:index(1, perm):mul(256.0) | |
mean_pixel = mean_pixel:view(3, 1, 1):expandAs(img) | |
img:add(-1, mean_pixel) | |
return img | |
end | |
-- Undo the above preprocessing. | |
function deprocess(img) | |
local mean_pixel = torch.DoubleTensor({103.939, 116.779, 123.68}) | |
mean_pixel = mean_pixel:view(3, 1, 1):expandAs(img) | |
img = img + mean_pixel | |
local perm = torch.LongTensor{3, 2, 1} | |
img = img:index(1, perm):div(256.0) | |
return img | |
end | |
-- Combine the Y channel of the generated image and the UV channels of the | |
-- content image to perform color-independent style transfer. | |
function original_colors(content, generated) | |
local generated_y = image.rgb2yuv(generated)[{{1, 1}}] | |
local content_uv = image.rgb2yuv(content)[{{2, 3}}] | |
return image.yuv2rgb(torch.cat(generated_y, content_uv, 1)) | |
end | |
-- Define an nn Module to compute content loss in-place | |
local ContentLoss, parent = torch.class('nn.ContentLoss', 'nn.Module') | |
function ContentLoss:__init(strength, normalize) | |
parent.__init(self) | |
self.strength = strength | |
self.target = torch.Tensor() | |
self.normalize = normalize or false | |
self.loss = 0 | |
self.crit = nn.MSECriterion() | |
self.mode = 'none' | |
end | |
function ContentLoss:updateOutput(input) | |
if self.mode == 'loss' then | |
self.loss = self.crit:forward(input, self.target) * self.strength | |
elseif self.mode == 'capture' then | |
self.target:resizeAs(input):copy(input) | |
end | |
self.output = input | |
return self.output | |
end | |
function ContentLoss:updateGradInput(input, gradOutput) | |
if self.mode == 'loss' then | |
if input:nElement() == self.target:nElement() then | |
self.gradInput = self.crit:backward(input, self.target) | |
end | |
if self.normalize then | |
self.gradInput:div(torch.norm(self.gradInput, 1) + 1e-8) | |
end | |
self.gradInput:mul(self.strength) | |
self.gradInput:add(gradOutput) | |
else | |
self.gradInput:resizeAs(gradOutput):copy(gradOutput) | |
end | |
return self.gradInput | |
end | |
local Gram, parent = torch.class('nn.GramMatrix', 'nn.Module') | |
function Gram:__init() | |
parent.__init(self) | |
end | |
function Gram:updateOutput(input) | |
assert(input:dim() == 3) | |
local C, H, W = input:size(1), input:size(2), input:size(3) | |
local x_flat = input:view(C, H * W) | |
self.output:resize(C, C) | |
self.output:mm(x_flat, x_flat:t()) | |
return self.output | |
end | |
function Gram:updateGradInput(input, gradOutput) | |
assert(input:dim() == 3 and input:size(1)) | |
local C, H, W = input:size(1), input:size(2), input:size(3) | |
local x_flat = input:view(C, H * W) | |
self.gradInput:resize(C, H * W):mm(gradOutput, x_flat) | |
self.gradInput:addmm(gradOutput:t(), x_flat) | |
self.gradInput = self.gradInput:view(C, H, W) | |
return self.gradInput | |
end | |
-- Define an nn Module to compute style loss in-place | |
local StyleLoss, parent = torch.class('nn.StyleLoss', 'nn.Module') | |
function StyleLoss:__init(strength, normalize) | |
parent.__init(self) | |
self.normalize = normalize or false | |
self.strength = strength | |
self.target = torch.Tensor() | |
self.mode = 'none' | |
self.loss = 0 | |
self.gram = nn.GramMatrix() | |
self.blend_weight = nil | |
self.G = nil | |
self.crit = nn.MSECriterion() | |
end | |
function StyleLoss:updateOutput(input) | |
self.G = self.gram:forward(input) | |
self.G:div(input:nElement()) | |
if self.mode == 'capture' then | |
if self.blend_weight == nil then | |
self.target:resizeAs(self.G):copy(self.G) | |
elseif self.target:nElement() == 0 then | |
self.target:resizeAs(self.G):copy(self.G):mul(self.blend_weight) | |
else | |
self.target:add(self.blend_weight, self.G) | |
end | |
elseif self.mode == 'loss' then | |
self.loss = self.strength * self.crit:forward(self.G, self.target) | |
end | |
self.output = input | |
return self.output | |
end | |
function StyleLoss:updateGradInput(input, gradOutput) | |
if self.mode == 'loss' then | |
local dG = self.crit:backward(self.G, self.target) | |
dG:div(input:nElement()) | |
self.gradInput = self.gram:backward(input, dG) | |
if self.normalize then | |
self.gradInput:div(torch.norm(self.gradInput, 1) + 1e-8) | |
end | |
self.gradInput:mul(self.strength) | |
self.gradInput:add(gradOutput) | |
else | |
self.gradInput = gradOutput | |
end | |
return self.gradInput | |
end | |
local TVLoss, parent = torch.class('nn.TVLoss', 'nn.Module') | |
function TVLoss:__init(strength) | |
parent.__init(self) | |
self.strength = strength | |
self.x_diff = torch.Tensor() | |
self.y_diff = torch.Tensor() | |
end | |
function TVLoss:updateOutput(input) | |
self.output = input | |
return self.output | |
end | |
-- TV loss backward pass inspired by kaishengtai/neuralart | |
function TVLoss:updateGradInput(input, gradOutput) | |
self.gradInput:resizeAs(input):zero() | |
local C, H, W = input:size(1), input:size(2), input:size(3) | |
self.x_diff:resize(3, H - 1, W - 1) | |
self.y_diff:resize(3, H - 1, W - 1) | |
self.x_diff:copy(input[{{}, {1, -2}, {1, -2}}]) | |
self.x_diff:add(-1, input[{{}, {1, -2}, {2, -1}}]) | |
self.y_diff:copy(input[{{}, {1, -2}, {1, -2}}]) | |
self.y_diff:add(-1, input[{{}, {2, -1}, {1, -2}}]) | |
self.gradInput[{{}, {1, -2}, {1, -2}}]:add(self.x_diff):add(self.y_diff) | |
self.gradInput[{{}, {1, -2}, {2, -1}}]:add(-1, self.x_diff) | |
self.gradInput[{{}, {2, -1}, {1, -2}}]:add(-1, self.y_diff) | |
self.gradInput:mul(self.strength) | |
self.gradInput:add(gradOutput) | |
return self.gradInput | |
end | |
function TVGradient(input, gradOutput, strength) | |
local C, H, W = input:size(1), input:size(2), input:size(3) | |
local gradInput = torch.CudaTensor(C, H, W):zero() | |
local x_diff = torch.CudaTensor() | |
local y_diff = torch.CudaTensor() | |
x_diff:resize(3, H - 1, W - 1) | |
y_diff:resize(3, H - 1, W - 1) | |
x_diff:copy(input[{{}, {1, -2}, {1, -2}}]) | |
x_diff:add(-1, input[{{}, {1, -2}, {2, -1}}]) | |
y_diff:copy(input[{{}, {1, -2}, {1, -2}}]) | |
y_diff:add(-1, input[{{}, {2, -1}, {1, -2}}]) | |
gradInput[{{}, {1, -2}, {1, -2}}]:add(x_diff):add(y_diff) | |
gradInput[{{}, {1, -2}, {2, -1}}]:add(-1, x_diff) | |
gradInput[{{}, {2, -1}, {1, -2}}]:add(-1, y_diff) | |
gradInput:mul(strength) | |
gradInput:add(gradOutput) | |
return gradInput | |
end | |
local params = cmd:parse(arg) | |
main(params) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment