Skip to content

Instantly share code, notes, and snippets.

View creotiv's full-sized avatar

Andrey Nikishaev creotiv

View GitHub Profile
imgx = cv2.imread('img_poly.jpg')[...,::-1] # opencv read image in BGR format so need to flip channels
imgy = cv2.imread('img_poly_m.jpg')[...,::-1] # opencv read image in BGR format so need to flip channels
# https://en.wikipedia.org/wiki/Polynomial_interpolation
def vander_matrix_1d(x, deg):
# https://en.wikipedia.org/wiki/Vandermonde_matrix
x = np.array(x, copy=False, ndmin=1) + 0.0
dims = (deg + 1,) + x.shape
dtype = x.dtype
@creotiv
creotiv / poly2.py
Last active July 1, 2019 16:06
image curves
imgx = cv2.imread('img_poly.jpg')[...,::-1] # opencv read image in BGR format so need to flip channels
imgy = cv2.imread('img_poly_m.jpg')[...,::-1] # opencv read image in BGR format so need to flip channels
# for each channel running polynof fitting to get transformation function
x,y = imgx[...,0].reshape(-1), imgy[...,0].reshape(-1)
# finding coefs of polynom
r = np.polyfit(x,y, 3)
x,y = imgx[...,1].reshape(-1), imgy[...,1].reshape(-1)
g = np.polyfit(x,y, 3)
x,y = imgx[...,2].reshape(-1), imgy[...,2].reshape(-1)
@creotiv
creotiv / poly1.py
Last active July 1, 2019 16:07
Simple polynom
x = np.linspace(-1,1,51)
y = 3.14*x**2 + 2.11*x + 34
# finding coefs of polynom
r = np.polyfit(x,y, 1)
# dont use this, for some reason it doesnt work well
# r = np.polynomial.polynomial.polyfit(x,y, 2)
print('Polynom coeffs',r[::-1])
#building function from coefs
@creotiv
creotiv / dstyle.py
Created June 18, 2019 18:10
Dynamic style with .to(device) works ok
style = load_image(args.style_image, size=args.style_size)
style = style_transform(style)
style = style.repeat(args.batch_size, 1, 1, 1)
for e in range(args.epochs):
transformer.train()
agg_content_loss = 0.
agg_style_loss = 0.
count = 0
for batch_id, (x, _) in enumerate(train_loader):
@creotiv
creotiv / sstyle.py
Created June 18, 2019 17:39
static style
style = load_image(args.style_image, size=args.style_size)
style = style_transform(style)
style = style.repeat(args.batch_size, 1, 1, 1).to(device)
features_style = vgg(normalize_batch(style.div_(255.0))).style
gram_style = [gram_matrix(y) for y in features_style]
for e in range(args.epochs):
transformer.train()
agg_content_loss = 0.
agg_style_loss = 0.
@creotiv
creotiv / dstyle.py
Created June 18, 2019 17:39
dynamic style gram
style = load_image(args.style_image, size=args.style_size)
style = style_transform(style)
style = style.repeat(args.batch_size, 1, 1, 1).to(device)
for e in range(args.epochs):
transformer.train()
agg_content_loss = 0.
agg_style_loss = 0.
count = 0
for batch_id, (x, _) in enumerate(train_loader):
ft_in = keras.Input((None,None,128))
context_in = keras.Input((None,None,128))
batch = tf.shape(context_in)[0]
height = tf.shape(context_in)[1]
width = tf.shape(context_in)[2]
channels = tf.shape(context_in)[3]
context = tf.reshape(context_in, (batch,width*height*channels))
def made(x):
feature_size = 128
film_params_gammas = tf.keras.layers.Dense(feature_size)(x[0])
context = tf.keras.layers.Input((None, None, 3))
height = tf.shape(context)[1]
width = tf.shape(context)[2]
channels = tf.shape(context)[3]
batch = tf.shape(context)[0]
feature_size=128
context = tf.reshape(context, [batch, 1, -1, height*width*channels])
# context = tf.keras.layers.GlobalAveragePooling2D()(context)
film_params = tf.keras.layers.Conv2D(2*feature_size, 1)(context)
from fabric2 import task, Connection
from fabric2.transfer import Transfer
import time
import getpass
HOST = 'ec2-13-53-129-145.eu-north-1.compute.amazonaws.com'
KEY_FILE = '../aws-sk.pem'
DEFAULT_LOGIN = '[email protected]'
DEFAULT_PASS = 'demo'
DEFAULT_USER = 'demo'
version: '3'
services:
nginx:
image: awsdemo-nginx:latest
ports:
- "80:80"
volumes:
- static_volume:/usr/src/app/staticfiles
- media_volume:/usr/src/app/mediafiles