This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// | |
// onehiddenlayerperceptron.cu | |
// onehiddenlayerperceptron | |
// | |
// Created by Sergei Bugrov on 8/21/17. | |
// Copyright © 2017 Sergei Bugrov. All rights reserved. | |
// | |
#include <stdio.h> |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#by MBT on StackOverflow | |
import numpy as np | |
# D_in is input dimension; | |
# H is hidden dimension; | |
# D_out is output dimension. | |
Batch_Size, D_in, H, D_out = 12, 1000, 100, 10 | |
# Create random input and output data | |
x = np.random.randn(Batch_Size, D_in) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#by MBT on StackOverflow | |
import numpy as np | |
# D_in is input dimension; | |
# H is hidden dimension; | |
# D_out is output dimension. | |
Batch_Size, D_in, H, D_out = 64, 1000, 100, 10 | |
# Create random input and output data | |
x = np.random.randn(Batch_Size, D_in) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#pytorch team | |
for epoch in range(2): # loop over the dataset multiple times | |
running_loss = 0.0 | |
for i, data in enumerate(trainloader, 0): | |
# get the inputs; data is a list of [inputs, labels] | |
inputs, labels = data | |
# zero the parameter gradients | |
optimizer.zero_grad() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#by deepmind | |
import tensorflow as tf | |
import sonnet as snt | |
#setup a 'module' | |
mlp = snt.Sequential([ | |
snt.Linear(1024), | |
tf.nn.relu, | |
snt.Linear(10), | |
]) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#Step 1 - Import biopython modules for sequence alignment | |
from Bio import pairwise2 | |
from Bio.Seq import Seq | |
#Step 2- define multiple sequences | |
seq1 = Seq("ACCGGT") | |
seq2 = Seq("ACGT") | |
#step 3 - align | |
alignments = pairwise2.align.globalxx(seq1, seq2) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
greeting_inputs = ("hey", "good morning", "good evening", "morning", "evening", "hi", "whatsup") | |
greeting_responses = ["hey", "hey hows you?", "*nods*", "hello, how you doing", "hello", "Welcome, I am good and you"] | |
def generate_greeting_response(greeting): | |
for token in greeting.split(): | |
if token.lower() in greeting_inputs: | |
return random.choice(greeting_responses) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#(From the Official "Reformer" Repository) | |
def hash_vectors(self, vecs, rng): | |
# If we factorize the hash, find a factor dividing n_buckets nicely. | |
rot_size, factor_list = self.n_buckets, [self.n_buckets] | |
if self._factorize_hash: | |
# If we are given a list of factors, verify it and use later. | |
if isinstance(self._factorize_hash, list): | |
rot_size, product = 0, 1 | |
factor_list = self._factorize_hash |