from clip_text_custom_embedder import text_embeddings
from diffusers import StableDiffusionPipeline
import torch
import numpy as np | |
import cv2 | |
def correction( | |
img, | |
shadow_amount_percent, shadow_tone_percent, shadow_radius, | |
highlight_amount_percent, highlight_tone_percent, highlight_radius, | |
color_percent | |
): | |
""" |
def get_jacobian(net, x, noutputs): | |
x = x.squeeze() | |
n = x.size()[0] | |
x = x.repeat(noutputs, 1) | |
x.requires_grad_(True) | |
y = net(x) | |
y.backward(torch.eye(noutputs)) | |
return x.grad.data |
import torch | |
import torch.nn as nn | |
import torch.nn.functional as F | |
import torchvision.models as tmodels | |
from functools import partial | |
import collections | |
# dummy data: 10 batches of images with batch size 16 | |
dataset = [torch.rand(16,3,224,224).cuda() for _ in range(10)] |
Here's a simple implementation of bilinear interpolation on tensors using PyTorch.
I wrote this up since I ended up learning a lot about options for interpolation in both the numpy and PyTorch ecosystems. More generally than just interpolation, too, it's also a nice case study in how PyTorch magically can put very numpy-like code on the GPU (and by the way, do autodiff for you too).
For interpolation in PyTorch, this open issue calls for more interpolation features. There is now a nn.functional.grid_sample()
feature but at least at first this didn't look like what I needed (but we'll come back to this later).
In particular I wanted to take an image, W x H x C
, and sample it many times at different random locations. Note also that this is different than upsampling which exhaustively samples and also doesn't give us fle
import tensorflow as tf | |
from tensorflow.contrib.layers.python.layers import initializers | |
slim = tf.contrib.slim | |
''' | |
============================================================================ | |
LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation | |
============================================================================ | |
Based on the paper: https://arxiv.org/pdf/1707.03718.pdf | |
''' |
import numpy as np | |
import scipy | |
import scipy.ndimage | |
from scipy.ndimage.filters import gaussian_filter | |
from scipy.ndimage.interpolation import map_coordinates | |
import collections | |
from PIL import Image | |
import numbers | |
__author__ = "Wei OUYANG" |
- Curriculum Learning - When training machine learning models, start with easier subtasks and gradually increase the difficulty level of the tasks.
- Motivation comes from the observation that humans and animals seem to learn better when trained with a curriculum like a strategy.
- Link to the paper.
import numpy as np | |
# the 2d array of our samples, | |
# each component is a category label | |
a = np.array([[1,2,3],[4,5,6]]) | |
# the 3d array that will be the one-hot representation | |
# a.max() + 1 is the number of labels we have | |
b = np.zeros((a.shape[0], a.shape[1], a.max() + 1)) |
Python version of the MATLAB code in this Stack Overflow post: https://stackoverflow.com/a/18648210/97160
The example shows how to determine the best-fit plane/surface (1st or higher order polynomial) over a set of three-dimensional points.
Implemented in Python + NumPy + SciPy + matplotlib.