Here's the API interface to bun:sqlite
,
class Database {
constructor(
filename: string,
options?:
| number
| {
readonly?: boolean;
# This is a test (not implementation) of the impact bucketMul has on matrix multiplications | |
# https://kolinko.github.io/effort/bucketmul.html | |
import torch | |
import torch.nn.functional as F | |
import math | |
torch.manual_seed(1337) | |
B = 2 | |
N = 8 | |
M = 16 |
Here's the API interface to bun:sqlite
,
class Database {
constructor(
filename: string,
options?:
| number
| {
readonly?: boolean;
import time | |
import multiprocessing | |
def test_lock(lock, iterations, shared_value): | |
for _ in range(iterations): | |
with lock: | |
shared_value.value += 1 | |
def benchmark(lock_type, num_processes, iterations_per_process): | |
shared_value = multiprocessing.Value('i', 0) |
import torch | |
def cudagraph(f): | |
_graphs = {} | |
def f_(*args): | |
key = hash(tuple(tuple(a.shape) for a in args)) | |
if key in _graphs: | |
wrapped, *_ = _graphs[key] | |
return wrapped(*args) | |
g = torch.cuda.CUDAGraph() |
console.log("Oh no! bun hit an error\n") | |
const report = 'bun report blah blah blah' | |
const url = `https://duckduckgo.com/?q=${report}` | |
const str = '[click to upload report]' | |
console.log(`\u001b]8;;${url}\u001b\\${str}\u001b]8;;\u001b\\`) |
read -p "this script will remove libarrayfire from your system and install all requirements to build from source. continue? [Y/n]" -n 1 -r | |
echo | |
if [[ $REPLY =~ ^[Yy]$ ]] | |
then | |
sudo apt remove libarrayfire-dev libarrayfire-cpu3 libarrayfire-cpu-dev | |
sudo apt install -y libblas-dev liblapack-dev liblapacke-dev libfftw3-dev libboost-all-dev cmake make g++ | |
cd /tmp | |
rm -rf arrayfire | |
git clone https://github.com/arrayfire/arrayfire.git | |
cd arrayfire |
# conv bwd implemented with fwd functions | |
import torch | |
import torch.nn.functional as F | |
def dconv2d(grad, x, w, stride, padding, groups): | |
batch = grad.shape[0] | |
channel_out = grad.shape[1] | |
channel_in = x.shape[1] |
# example of backward pass implemented with only forward functions | |
import torch | |
import torch.nn.functional as F | |
def dconv(grad, x, w, stride, padding, groups): | |
batch = grad.shape[0] | |
channel_out = grad.shape[1] | |
channel_in = x.shape[1] |
# examples of backward passes implemented with fwd functions | |
import torch | |
import torch.nn.functional as F | |
def simple(): | |
print("simple") | |
x = torch.randn(1, 1, 4, 4) | |
x.requires_grad = True |