Skip to content

Instantly share code, notes, and snippets.

@Orbifold
Created September 1, 2018 05:49
Show Gist options
  • Select an option

  • Save Orbifold/3d66ff4d6c4947e0496a1586baa679cd to your computer and use it in GitHub Desktop.

Select an option

Save Orbifold/3d66ff4d6c4947e0496a1586baa679cd to your computer and use it in GitHub Desktop.
Gluon hello world
	from mxnet import gluon

	import mxnet as mx
	import numpy as np
	x_input = mx.nd.empty((1, 5), mx.cpu())
	x_input[:] = np.array([[1,2,3,4,5]], np.float32)

	y_input = mx.nd.empty((1, 5), mx.cpu())
	y_input[:] = np.array([[10, 15, 20, 22.5, 25]], np.float32)


	net = gluon.nn.Sequential()
	with net.name_scope():
	    net.add(gluon.nn.Dense(16, activation="relu"))
	    net.add(gluon.nn.Dense(len(y_input)))


	net.collect_params().initialize(mx.init.Normal(), ctx=mx.cpu())


	softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
	trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': .1})


	n_epochs = 10

	for e in range(n_epochs):
	    for i in range(len(x_input)):
	        input = x_input[i]
	        target = y_input[i]
	        with mx.autograd.record():
	            output = net(input)
	            loss = softmax_cross_entropy(output, target)
	            loss.backward()
	        trainer.step(input.shape[0])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment