This tutorial will guide you through the setup of the HTC Vive Tracker in Python 3.6 on Ubuntu 14.04.
Up to date graphics drivers
x86 architecture
SteamVR requires >4GB disk space
#pragma once | |
#include <cmath> | |
#include <memory> | |
#include <vector> | |
#include <cassert> | |
namespace sp { | |
/** |
# A simple limiter | |
from sounddevice import Stream, CallbackStop | |
from time import sleep | |
from numpy import array, random, zeros | |
import matplotlib.pyplot as plt | |
################################### Constants ################################## | |
fs = 44100 # Hz |
/* | |
Super Easy Diy Drumpad Example | |
GIT: https://gist.github.com/billju/ce1337ea3c1dbb4341ce22dca1b55442 | |
2017 by Billju | |
Inspired by Evan Kale | |
*/ | |
#include <Keyboard.h> | |
#include "MIDIUSB.h" |
#!/bin/bash | |
# Sets each CUDA device to persistence mode and sets the application clock | |
# and power limit to the device's maximum supported values. | |
# When run with "--dry-run" as first command line argument or not as superuser, | |
# will display the commands, otherwise it will execute them. | |
# | |
# Hint: To run this at boot time, place this script in /root and create a file | |
# /etc/cron.d/nvidia_boost with the following single line: | |
# @reboot root /root/nvidia_boost.sh >/dev/null | |
# |
# How to apply exponential moving average decay for variables? | |
# https://discuss.pytorch.org/t/how-to-apply-exponential-moving-average-decay-for-variables/10856/2 | |
class EMA(nn.Module): | |
def __init__(self, mu): | |
super(EMA, self).__init__() | |
self.mu = mu | |
def forward(self,x, last_average): | |
new_average = self.mu*x + (1-self.mu)*last_average | |
return new_average |
''' | |
Memory profiling utilities | |
''' | |
import gc | |
import inspect | |
import linecache | |
import os.path | |
import sys | |
import time | |
import threading |
// An example of using the PyTorch C++ API to implement a custom forward and backward function | |
#include <iostream> | |
#include <vector> | |
#include <torch/torch.h> | |
#include <torch/csrc/autograd/variable.h> | |
#include <torch/csrc/autograd/function.h> | |
#include <torch/csrc/autograd/VariableTypeUtils.h> | |
#include <torch/csrc/autograd/functions/utils.h> |
import torch | |
import torch.nn.functional as F | |
def maml_grad(model, inputs, outputs, lr, batch=1): | |
""" | |
Update a model's gradient using MAML. | |
The gradient will point in the direction that | |
improves the total loss across all inner-loop |