- Intel i7-6850K @ 3.60GHz
- GTX 1070
- Fresh install of Ubuntu 16.04
Note - did not encrypt home directory or xda drive
// FROM: https://releases.llvm.org/2.2/docs/tutorial/JITTutorial1.html | |
#include <llvm/IR/Module.h> | |
#include <llvm/IR/Function.h> | |
#include <llvm/IR/CallingConv.h> | |
#include "llvm/IR/IRBuilder.h" | |
#include "llvm/IR/BasicBlock.h" | |
int main(int argc, char **argv) |
#!/bin/bash | |
export MAKEFLAGS="-j 3" | |
set -e | |
VERSION="1.18.0" | |
LIBNICE_VERSION="0.1.17" # libnice (v>=0.1.14) needed for webrtcbin | |
LIBSRTP_VERSION="2.3.0" # libsrtp (v>=2.2.0) required for srtp plugin | |
WEBRTCAUDIO_VERSION="0.3.1" # webrtc-audio-processing required for webrtcdsp | |
USRSCTP_VERSION="0.9.3.0" # usrsctp required for webrtc data channels (sctp) |
sudo apt-get install libtool pkg-config build-essential autoconf automake uuid-dev | |
git clone git://github.com/zeromq/libzmq.git | |
cd libzmq | |
./autogen.sh | |
# do not specify "--with-libsodium" if you prefer to use internal tweetnacl security implementation (recommended for development) | |
./configure --with-libsodium | |
make check | |
sudo make install | |
sudo ldconfig |
#include <Servo.h> | |
Servo myservo; | |
Servo stearing; | |
int state = 99; | |
const int trigPin = 2; | |
const int echoPin = 4; | |
// for power servo |
/* | |
Analog Input | |
Demonstrates analog input by reading an analog sensor on analog pin 0 and | |
turning on and off a light emitting diode(LED) connected to digital pin 13. | |
The amount of time the LED will be on and off depends on | |
the value obtained by analogRead(). | |
The circuit: | |
* Potentiometer attached to analog input 0 | |
* center pin of the potentiometer to the analog pin |
### Preprocess the data here. | |
### Feel free to use as many code cells as needed. | |
from tensorflow.contrib.layers import flatten | |
def LeNet(x): | |
# Hyperparameters | |
mu = 0 | |
sigma = 0.1 | |
##VGG16 model for Keras
This is the Keras model of the 16-layer network used by the VGG team in the ILSVRC-2014 competition.
It has been obtained by directly converting the Caffe model provived by the authors.
Details about the network architecture can be found in the following arXiv paper:
Very Deep Convolutional Networks for Large-Scale Image Recognition
K. Simonyan, A. Zisserman
from keras.models import Sequential | |
from keras.layers.core import Dense, Dropout, Activation, Flatten | |
from keras.layers.convolutional import Convolution2D, MaxPooling2D | |
from keras.layers.normalization import BatchNormalization | |
#AlexNet with batch normalization in Keras | |
#input image is 224x224 | |
model = Sequential() | |
model.add(Convolution2D(64, 3, 11, 11, border_mode='full')) |