type below:
brew update
brew install redis
To have launchd start redis now and restart at login:
brew services start redis
// An example of using the PyTorch C++ API to implement a custom forward and backward function | |
#include <iostream> | |
#include <vector> | |
#include <torch/torch.h> | |
#include <torch/csrc/autograd/variable.h> | |
#include <torch/csrc/autograd/function.h> | |
#include <torch/csrc/autograd/VariableTypeUtils.h> | |
#include <torch/csrc/autograd/functions/utils.h> |
type below:
brew update
brew install redis
To have launchd start redis now and restart at login:
brew services start redis
from selenium import webdriver | |
from selenium.webdriver.support.ui import WebDriverWait | |
from selenium.webdriver.common.by import By | |
from selenium.webdriver.support import expected_conditions as EC | |
import requests | |
import os | |
def download_all_papers(base_url, save_dir, driver_path): | |
driver = webdriver.Chrome(driver_path) |
# set a proxy | |
set HTTP_PROXY= | |
set HTTPS_PROXY=%HTTP_PROXY% | |
npm config set proxy %HTTP_PROXY% | |
npm config set https.proxy %HTTPS_PROXY% | |
npm config set https-proxy %HTTPS_PROXY% | |
git config --global http.proxy %HTTP_PROXY% | |
git config --global https.proxy %HTTPS_PROXY% | |
# unset proxy |
{-# LANGUAGE GADTs #-} | |
{- | |
The following code is based on experimental code by Aslan Askerov | |
based on Ramsey and Pfeffers "Stochastic Lambda Calculus and Monads of | |
Probability Distributions". Implementation of random n is from | |
Audebaud and Paulin-Mohring paper, so is the random walk example. | |
This gist is used here http://madsbuch.com/blog/the-probability-monad/ | |
The class hierarchy is as follows: |
import matplotlib.pyplot as plt | |
def draw_neural_net(ax, left, right, bottom, top, layer_sizes): | |
''' | |
Draw a neural network cartoon using matplotilb. | |
:usage: | |
>>> fig = plt.figure(figsize=(12, 12)) | |
>>> draw_neural_net(fig.gca(), .1, .9, .1, .9, [4, 7, 2]) | |
loss=tf.reduce_mean(-elbo) | |
lr=tf.constant(0.001) | |
train_op=tf.train.AdamOptimizer(learning_rate=lr).minimize(loss,var_list=slim.get_model_variables()) | |
init_op=tf.initialize_all_variables() | |
# get data | |
data = input_data.read_data_sets('/tmp/', one_hot=True).train | |
BATCH_SIZE=100 | |
NUM_ITERS=50000 | |
tau0=1.0 # initial temperature |
package thunder.streaming | |
import org.apache.spark.{SparkConf, Logging} | |
import org.apache.spark.rdd.RDD | |
import org.apache.spark.SparkContext._ | |
import org.apache.spark.streaming._ | |
import org.apache.spark.streaming.dstream.DStream | |
import org.apache.spark.mllib.clustering.KMeansModel | |
import scala.util.Random.nextDouble |
from keras.layers import GRU, initializations, K | |
from collections import OrderedDict | |
class GRULN(GRU): | |
'''Gated Recurrent Unit with Layer Normalization | |
Current impelemtation only works with consume_less = 'gpu' which is already | |
set. | |
# Arguments | |
output_dim: dimension of the internal projections and the final output. |