©2014 Almar Klein

| """ | |
| Code for training RBMs with contrastive divergence. Tries to be as | |
| quick and memory-efficient as possible while utilizing only pure Python | |
| and NumPy. | |
| """ | |
| # Copyright (c) 2009, David Warde-Farley | |
| # All rights reserved. | |
| # | |
| # Redistribution and use in source and binary forms, with or without |
| # Network building | |
| net = tflearn.input_data([None, 100]) | |
| net = tflearn.embedding(net, input_dim=10000, output_dim=128) | |
| net = tflearn.lstm(net, 128, dropout=0.8) | |
| net = tflearn.fully_connected(net, 2, activation='softmax') | |
| net = tflearn.regression(net, optimizer='adam', learning_rate=0.001, | |
| loss='categorical_crossentropy') | |
| # Training | |
| model = tflearn.DNN(net) |
This is just a quick list of resourses on TDA that I put together for @rickasaurus after he was asking for links to papers, books, etc on Twitter and is by no means an exhaustive list.
Both Carlsson's and Ghrist's survey papers offer a very good introduction to the subject
Mapper algorithm.| from keras.layers import Recurrent | |
| import keras.backend as K | |
| from keras import activations | |
| from keras import initializers | |
| from keras import regularizers | |
| from keras import constraints | |
| from keras.engine import Layer | |
| from keras.engine import InputSpec |
| import pandas as pd | |
| from random import random | |
| flow = (list(range(1,10,1)) + list(range(10,1,-1)))*100 | |
| pdata = pd.DataFrame({"a":flow, "b":flow}) | |
| pdata.b = pdata.b.shift(9) | |
| data = pdata.iloc[10:] * random() # some noise | |
| import numpy as np |