Created
February 13, 2017 13:44
-
-
Save yabyzq/84bc6f6c6bc94a26ded0d03d41b5cf29 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
library(Hmisc) # useful for multi histogram | |
hist.data.frame(full) # histogram of each variable | |
pairs(full) # pairwise scatter plots | |
data = mx.symbol.Variable('data') | |
fc1 = mx.symbol.FullyConnected(data=data, num_hidden=7) | |
sigm1 = mx.symbol.Activation(data=fc1, act_type="relu") | |
fc2 = mx.symbol.FullyConnected(data=sigm1, num_hidden=2) | |
net = mx.symbol.SoftmaxOutput(data=fc2) | |
device = mx.cpu(); | |
model <- mx.model.FeedForward.create( | |
X = as.matrix(train.x), | |
y = as.numeric(train.y)-1, | |
ctx = device, # device is either the cpu or gpu (graphical processor unit) | |
symbol = net, # this is the network structure | |
eval.metric = mx.metric.accuracy, | |
num.round = 100, # how many batches to work with | |
learning.rate = 0.01, # 0.01 is a good start | |
momentum = 0.9, # using second derivative | |
wd = 0.0001, # what is this for? | |
#initializer = mx.init.normal(1/sqrt(dim(train)[1])), # the standard devation is scaled with the number of | |
#observations to prevent slow learning if by chance all weights are large or small | |
#initializer = mx.init.uniform(0.1), # the standard devation is scaled with the number of | |
array.batch.size = 100, | |
#epoch.end.callback = mx.callback.save.checkpoint("titanic"), | |
#batch.end.callback = mx.callback.log.train.metric(100), | |
array.layout="rowmajor" | |
); | |
mx.model.save(model, 'titanic100', 100) | |
model4 <- mx.model.FeedForward.create( | |
symbol = model3$symbol, | |
X = as.matrix(train.x), | |
y = as.numeric(train.y)-1, | |
# eval.data = list("data"=g_test,"label"=y_test), | |
ctx = device, # device is either the cpu or gpu (graphical processor unit) | |
eval.metric = mx.metric.accuracy, | |
num.round = 1500, # how many batches to work with | |
learning.rate = 0.0005, # 0.01 is a good start | |
momentum = 0.9, # using second derivative | |
wd = 0.0001, # what is this for? | |
array.batch.size = 100, | |
arg.params=model3$arg.params, | |
aux.params=model3$aux.params, | |
verbose=FALSE, | |
#epoch.end.callback = mx.callback.save.checkpoint("titanic"), | |
#batch.end.callback = mx.callback.log.train.metric(100), | |
array.layout="rowmajor" | |
); | |
mx.model.save(model4, 'titanic31000', 1500) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment