Skip to content

Instantly share code, notes, and snippets.

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import begin
l1_nodes = 200
l2_nodes = 100
final_layer_nodes = 10
# define placeholder for data
# also considered as the "visibale layer, the layer that we see"
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in range(1000):
batch_X, batch_y = mnist.train.next_batch(100)
# associate data with placeholders
train_data = {X: batch_X, Y_: batch_y}
# train model
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import begin
l1_nodes = 200
l2_nodes = 100
final_layer_nodes = 10
# define placeholder for data
# also considered as the "visibale layer, the layer that we see"
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.utils import np_utils
import numpy as np
l1_nodes = 200
l2_nodes = 100
final_layer_nodes = 10
library(modelr)
library(dplyr)
library(ggplot2)
library(animation)
library(gganimate)
options(scipen = 999)
n <- 200 # number of observations
bias <- 4
@conormm
conormm / torch_test_1.py
Created February 28, 2018 21:59
Torch regression example on boston housing dataset.
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.functional as F
from torch.utils.data import Dataset, DataLoader
import numpy as np
from sklearn.datasets import load_boston
from sklearn.preprocessing import StandardScaler
@conormm
conormm / torch_regression_example.py
Last active August 3, 2022 20:04
Torch regression example - data loading and simple feed forward network.
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
from sklearn.datasets import load_boston
from sklearn.preprocessing import StandardScaler
logloss <- function(label, p) {
if (label == 1) {
x <- -log(p)
} else {
x <- -log(1 - p)
}
x
}
library(tidyverse)
# create range of probabilities
probs <- seq(0, 1, 0.001)
# apply log loss over full range of propabilites
t1 <- logloss(1, probs)
t0 <- logloss(0, probs)
# tidy the data
df %>%
ggplot(aes(probs, logloss, colour = label)) +
geom_line(size = .8) +
labs(
title = "Log Loss over estimated probability",
x = "Estimated probabilty",
y = "Log Loss",
colour = "Label"
) +
annotate("text", label = "Log Loss penalises overly confident,