Last active
January 6, 2021 17:10
-
-
Save Vindaar/dab56f30c98e5ceb8f4605e582a9c6cd to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import ggplotnim, numericalnim, arraymancer, sequtils | |
import nimpyArraymancer | |
proc genRand(): Tensor[float] = | |
result = randomTensor([10, 10], 75).asType(float) | |
# [[43.0, 10.0], | |
# [22.0, 75.0]].toTensor() | |
proc grad(tIn: Tensor[float], xdiff: float = 1.0): Tensor[float] = | |
let t = tIn.squeeze | |
doAssert t.rank == 1, " no was " & $t | |
result = newTensor[float](t.size.int) | |
var | |
yMinus, yPlus: float | |
mxdiff = xdiff | |
for i in 0 ..< t.size: | |
if i == 0: | |
yMinus = t[0] | |
yPlus = t[1] | |
mxdiff = xdiff | |
elif i == t.size - 1: | |
yMinus = t[i - 1] | |
yPlus = t[i] | |
mxdiff = xdiff | |
else: | |
yMinus = t[i - 1] | |
yPlus = t[i + 1] | |
mxdiff = xdiff * 2 | |
result[i] = (yPlus - 2 * t[i] + yMinus) / (mxdiff * mxdiff) | |
proc grad(t: Tensor[float], axis: int): Tensor[float] = | |
## given 2D tensor, calc gradient in axis direction | |
result = newTensor[float](t.shape) | |
for idx, ax in enumerateAxis(t, axis): | |
## TODO: eh, how to do this in one? | |
case axis | |
of 0: result[idx, _] = grad(ax, 1.0).unsqueeze(0) | |
of 1: result[_, idx] = grad(ax, 1.0).unsqueeze(1) | |
else: doAssert false | |
proc computeAlpha(f: Tensor[float]): Tensor[float] = | |
var m1 = [[1, 0, 0, 0], | |
[0, 0, 1, 0], | |
[-3, 3, -2, -1], | |
[2, -2, 1, 1]].toTensor.asType(float) | |
var m2 = [[1, 0, -3, 2], | |
[0, 0, 3, -2], | |
[0, 1, -2, 1], | |
[0, 0, -1, 1]].toTensor.asType(float) | |
let fx = grad(f, 0) | |
let fy = grad(f, 1) | |
let fxy = grad(grad(f, 0), 1) | |
var A = [[f[0, 0], f[0, 1], fy[0, 0], fy[0, 1]], | |
[f[1, 0], f[1, 1], fy[1, 0], fy[1, 1]], | |
[fx[0, 0], fx[0, 1], fxy[0, 0], fxy[0, 1]], | |
[fx[1, 0], fx[1, 1], fxy[1, 0], fxy[1, 1]]].toTensor().asType(float) | |
result = m1 * A * m2 | |
proc computeAlpha(f: Tensor[float], fx, fy, fxy: Tensor[float], | |
x, y: int): Tensor[float] = | |
var m1 = [[1, 0, 0, 0], | |
[0, 0, 1, 0], | |
[-3, 3, -2, -1], | |
[2, -2, 1, 1]].toTensor.asType(float) | |
var m2 = [[1, 0, -3, 2], | |
[0, 0, 3, -2], | |
[0, 1, -2, 1], | |
[0, 0, -1, 1]].toTensor.asType(float) | |
var A = [[f[x, y], f[x, y + 1], fy[x, y], fy[x, y+1]], | |
[f[x + 1, y], f[x + 1, y + 1], fy[x+1, y], fy[x+1, y+1]], | |
[fx[x, y], fx[x, y+1], fxy[x, y], fxy[x, y+1]], | |
[fx[x+1, y], fx[x+1, y+1], fxy[x+1, y], fxy[x+1, y+1]]].toTensor().asType(float) | |
result = m1 * A * m2 | |
proc bicubic(t: Tensor[float], size: int): Tensor[float] = | |
result = newTensor[float]([size, size]) | |
let blockSizeX = size.float / t.shape[0].float | |
let blockSizeY = size.float / t.shape[1].float | |
let fx = grad(t, 0) | |
let fy = grad(t, 1) | |
let fxy = grad(grad(t, 0), 1) | |
var | |
tX: int | |
tY: int | |
alpha = t.computeAlpha(fx, fy, fxy, 0, 0) | |
for y in 0 ..< size: | |
tY = (y.float / blockSizeY).floor.int | |
for x in 0 ..< size: | |
if tX != (x.float / blockSizeX).floor.int: | |
tX = (x.float / blockSizeX).floor.int | |
alpha = t.computeAlpha(fx, fy, fxy, tX, tY) | |
let xIn = (x mod blockSizeX.int).float / blockSizeX.float | |
let yIn = (y mod blockSizeY.int).float / blockSizeY.float | |
result[x, y] = dot([1.0, xIn, xIn*xIn, xIn*xIn*xIn].toTensor, | |
alpha * [1.0, yIn, yIn*yIn, yIn*yIn*yIn].toTensor) | |
proc splitXYZ(t: Tensor[float]): (Tensor[float], Tensor[float], Tensor[float]) = | |
var | |
x = newTensor[float](t.size.int) | |
y = newTensor[float](t.size.int) | |
z = newTensor[float](t.size.int) | |
var i = 0 | |
for idx, val in t: | |
x[i] = idx[0].float | |
y[i] = idx[1].float | |
z[i] = val.float | |
inc i | |
result = (x, y, z) | |
proc plot(t: Tensor[float], name: string) = | |
let (x, y, z) = t.splitXYZ() | |
let df = seqsToDf(x, y, z) | |
ggplot(df, aes("x", "y", fill = "z")) + | |
geom_raster() + | |
scale_x_continuous() + scale_y_continuous() + | |
scale_fill_continuous(scale = (low: 0.0, high: 70.0)) + | |
xlim(0, t.shape[0] + 1) + ylim(0, t.shape[1] + 1) + | |
ggsave(name & ".pdf", width = 900, height = 800) | |
let t = genRand() | |
echo t | |
echo grad(grad(t, 0), 1) | |
const Size = 1000 | |
echo "bicubic calc:" | |
let res = bicubic(t, Size) | |
plot(res, "test_cubic") | |
echo "plot input" | |
plot(t, "test_input") | |
echo "calc scipy" | |
import nimpy | |
# create scipy interpolation for comparison: | |
let scipy = pyImport("scipy.interpolate") | |
let (x, y, z) = t.splitXYZ() | |
let sciInterp = scipy.interp2d(x.toRawSeq, y.toRawSeq, z.toRawSeq, "cubic") | |
let (xNew, yNew, _) = newTensor[float]([Size, Size]).splitXYZ() | |
let sciRes = callMethod(sciInterp, "__call__", linspace(0, 9, 1000).toRawSeq, linspace(0, 9, 1000).toRawSeq) | |
let np = pyImport("numpy") | |
let sb = newSafePyBuffer(np.ascontiguousarray(sciRes)) | |
let sciT = toTensor[float](sb) | |
echo "plot scipy" | |
plot(sciT, "test_scipy_compare") |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import arraymancer, | |
nimpy, | |
nimpy / [raw_buffers, py_types] | |
type | |
SafePyBuffer* = ref SafePyBufferObj | |
SafePyBufferObj = object | |
pyBuf*: RawPyBuffer | |
shape*: seq[int] | |
when defined(gcDestructors): | |
proc `=destroy`(s: var SafePyBufferObj) = | |
if not s.pyBuf.buf.isNil: | |
s.pyBuf.release() | |
else: | |
proc finalizer(s: SafePyBuffer) = | |
if not s.isNil and not s.pyBuf.buf.isNil: | |
s.pyBuf.release() | |
proc newSafePyBuffer*(a: PyObject): SafePyBuffer = | |
when defined(gcDestructors): | |
new(result) | |
else: | |
new(result, finalizer) | |
var buf: RawPyBuffer | |
getBuffer(a, buf, PyBUF_WRITABLE or PyBUF_ND) | |
let shapeArray = cast[ptr UncheckedArray[Py_ssize_t]](buf.shape) | |
result.shape = newSeq[int](buf.ndim) | |
for i in 0 ..< buf.ndim: | |
result.shape[i] = shapeArray[i].int # py_ssize_t is csize | |
result.pyBuf = buf | |
proc toTensor*[T](s: SafePyBuffer): Tensor[T] = | |
result = fromBuffer[T](s.pyBuf.buf, s.shape) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Input:
Scipy:
Ours: