Skip to content

Instantly share code, notes, and snippets.

View wkcn's full-sized avatar
🐳
Tell Your World 🎵

JackieWu wkcn

🐳
Tell Your World 🎵
  • China
View GitHub Profile
@wkcn
wkcn / ConstantOP.py
Last active July 5, 2018 09:26
ConstantOP for MobulaOP
import mobula_op
@mobula_op.operator.register(need_top_grad = False)
class ConstantOP:
def __init__(self, constant):
self.constant = mx.nd.array(constant)
def forward(self, dummy):
return self.constant
def backward(self, dy):
return [0]
@wkcn
wkcn / dlpack_test.py
Last active August 7, 2018 06:59
DLPackTest
import mxnet as mx
import numpy as np
import torch
from torch.utils import dlpack
def test_dlpack():
for dtype in [np.float32, np.int32]:
for shape in [(3, 4, 5, 6), (2, 10), (15,)]:
a = mx.nd.random.uniform(shape = shape)
a_np = a.asnumpy()
@wkcn
wkcn / test_del.py
Last active February 21, 2020 04:27
testdel
import ctypes
from ctypes import *
ctypes.pythonapi.PyCapsule_New.restype = ctypes.py_object
ctypes.pythonapi.PyCapsule_New.argtypes = [ctypes.c_void_p, ctypes.c_char_p,
ctypes.c_void_p]
def dfunc(dltensor):
pass
@wkcn
wkcn / mx_nd_waitall.py
Created August 16, 2018 03:00
MXNet nd.waitall()
import mxnet as mx
from mxnet import nd
class TestOP(mx.operator.CustomOp):
def __init__(self):
super(TestOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
print ("Run test OP")
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@wkcn
wkcn / MobulaOP-tutorial-cn.md
Created September 11, 2018 04:39
这可能是创建自定义C++ Operator最简单的方式 - MobulaOP使用说明

这可能是创建自定义C++ Operator最简单的方式 - MobulaOP使用说明

大家好,我想在这里给大家介绍我的一个项目:MobulaOP. MobulaOP是一个简单且灵活的跨框架算子创建工具。使用MobulaOP, 不需要重新编译深度学习框架的源码,就可以创建自定义的C++算子。而且只需要一份C++代码和简单的定义,自定义算子就可以在CPU和GPU上运行。

之所以建立这个项目,是因为我发现MXNet创建自定义算子的方法不太方便,其他深度学习框架也存在这个问题。 当前,创建自定义算子的方法主要为:

  1. 重新编译深度学习框架的源码 重新编译源码耗时过长。需要了解对应框架的算子实现形式,编写出的代码不适用于其他框架。
@wkcn
wkcn / replace_index.py
Last active November 2, 2018 09:09
replace index
def get_new_code(filename, num):
fin = open(filename)
code = list(fin.readlines())
# template: num - 2
tnum = num - 2
assert 0 <= tnum < len(code), (filename, num, tnum, len(code), code)
tcode = code[tnum]
if 'template' in tcode:
zi = tcode.index('<')
new_tcode = tcode[0:zi+1] # include <
@wkcn
wkcn / test_roi_align.py
Created December 18, 2018 02:22
Test ROI Align OP
import mxnet as mx
import numpy as np
from mxnet.test_utils import *
def test_op_roi_align():
# Adapted from https://github.com/wkcn/MobulaOP/blob/master/tests/test_op/test_roi_align_op.py
T = np.float32
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
@wkcn
wkcn / bufferIter.py
Created January 4, 2019 09:55
Buffer Iteration
import mxnet as mx
import threading
try:
import Queue
except:
import queue as Queue
class DataThread(threading.Thread):
def __init__(self, bufferIter):
super(DataThread, self).__init__()
@wkcn
wkcn / test_fcn_for_mxnet.py
Created January 19, 2019 13:07
test_fcn_for_mxnet
import os
os.environ['PYTHONUNBUFFERED'] = '1'
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
os.environ['MXNET_ENABLE_GPU_P2P'] = '0'
import time
import logging
import argparse
import mxnet as mx
from mxnet import gluon, autograd, nd
import numpy as np
@wkcn
wkcn / test_bn.py
Last active March 29, 2019 06:30
test bn
from __future__ import print_function
import sys
import os
import tempfile
import time
import multiprocessing as mp
import unittest
import random
import mxnet as mx
import numpy as np