Skip to content

Instantly share code, notes, and snippets.

View marty1885's full-sized avatar
πŸ‘¨β€πŸ’»
Writing code

Martin Chang marty1885

πŸ‘¨β€πŸ’»
Writing code
View GitHub Profile
#include <xtensor/xarray.hpp>
#include <xtensor/xrandom.hpp>
#include <xtensor/xio.hpp>
#include <xtensor/xindexview.hpp>
#include <xtensor-blas/xlinalg.hpp>
#include <iostream>
using namespace std;
void s1()
__kernel void raycastStream(__global Ray* rays, __global BVHNode* bvh, __global Triangle* triangles, __global RayHit* hits)
{
int workID = get_global_id(0);
Ray ray = rays[workID];
int signs[3];
signs[0] = ray.direction[0] < 0;
signs[1] = ray.direction[1] < 0;
signs[2] = ray.direction[2] < 0;
__kernel void raycastStream(__global Ray* rays, __global BVHNode* bvh, __global Triangle* triangles, __global RayHit* hits)
{
int workID = get_global_id(0);
Ray ray = rays[workID];
int signs[3];
signs[0] = ray.direction[0] < 0;
signs[1] = ray.direction[1] < 0;
signs[2] = ray.direction[2] < 0;
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define OP_STATE 0
#define NUM_STATE 1
#define FBRAC_STATE 2
#define BBRAC_STATE 3
typedef void(*TransFunc)(int*, char);
#include <iostream>
#include <vector>
#include "tiny_dnn/tiny_dnn.h"
using namespace tiny_dnn;
using namespace tiny_dnn::activation;
using namespace tiny_dnn::layers;
int main()
{
#define CNN_USE_AVX //Enable the AVX backend for faster computing
#include "tiny_dnn/tiny_dnn.h"
using namespace tiny_dnn;
using namespace tiny_dnn::activation;
using namespace tiny_dnn::layers;
#include "AudioFile.h"
#include <vector>
#include <random>
#include <iostream>
#include <algorithm>
#include <list>
#include <xtensor/xarray.hpp>
#include <xtensor/xio.hpp>
#include <hayai/hayai.hpp>
#include <hayai/hayai_main.hpp>
template<typename VType>
void Switch(const VType& val)
{
}
template<typename VType, typename Func>
void Switch(const VType& val, Func func)
{
func();
}
#include <assimp/Importer.hpp> // C++ importer interface
#include <assimp/scene.h> // Output data structure
#include <assimp/postprocess.h> // Post processing flags
#include <iostream>
#include <string>
#include <fstream>
#include <vector>
using namespace std;
#include <Athena/Athena.hpp>
#include <Athena/XtensorBackend.hpp>
#include <Athena/NNPACKBackend.hpp>
#include "mnist_reader.hpp"
//Need to use xtensor API due to incomplete Tensor implementation
#include <xtensor/xarray.hpp>
#include <iostream>