Skip to content

Instantly share code, notes, and snippets.

@JuYoungAhn
Created November 6, 2016 16:45
Show Gist options
  • Save JuYoungAhn/9ca2c306ccfa0faffad754a34cf3076e to your computer and use it in GitHub Desktop.
Save JuYoungAhn/9ca2c306ccfa0faffad754a34cf3076e to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"\"\"\"\n",
" Keras Fully Connected Network Tutorial\n",
" http://machinelearningmastery.com/tutorial-first-neural-network-python-keras/\n",
"\"\"\"\n",
"from keras.models import Sequential\n",
"from keras.layers import Dense\n",
"import numpy\n",
"\n",
"seed = 7\n",
"numpy.random.seed(seed)"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# load pima indians dataset\n",
"# http://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data\n",
"dataset = numpy.loadtxt(\"c:\\workspace\\data\\pima-indians-diabetes.csv\", delimiter=\",\")\n",
"\n",
"# split into input (X) and output (Y) variables\n",
"X = dataset[:,0:8] # 모든 row가져오고 0부터 7까지의의 column가져옴\n",
"Y = dataset[:,8] # 모든 row, 8번열 가져옴"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"array([[ 6. , 148. , 72. , ..., 0.627, 50. , 1. ],\n",
" [ 1. , 85. , 66. , ..., 0.351, 31. , 0. ],\n",
" [ 8. , 183. , 64. , ..., 0.672, 32. , 1. ],\n",
" ..., \n",
" [ 5. , 121. , 72. , ..., 0.245, 30. , 0. ],\n",
" [ 1. , 126. , 60. , ..., 0.349, 47. , 1. ],\n",
" [ 1. , 93. , 70. , ..., 0.315, 23. , 0. ]])"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"dataset"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"' \\n input layer \\xeb\\x8a\\x94 8\\xea\\xb0\\x9c\\xec\\x9d\\x98 layer\\xec\\x9d\\xb8\\xeb\\x8d\\xb0 input layer\\xeb\\x8a\\x94 \\xec\\x83\\x9d\\xeb\\x9e\\xb5\\xed\\x95\\x98\\xea\\xb3\\xa0 \\xea\\xb8\\xb0\\xec\\x88\\xa0\\xed\\x95\\xa8.\\n input layer\\xea\\xb9\\x8c\\xec\\xa7\\x80 \\xed\\x8f\\xac\\xed\\x95\\xa8\\xed\\x95\\x98\\xeb\\xa9\\xb4 \\xec\\x9c\\x84 \\xeb\\x84\\xa4\\xed\\x8a\\xb8\\xec\\x9b\\x8c\\xed\\x81\\xac\\xeb\\x8a\\x94 \\xec\\xb4\\x9d 4\\xea\\xb0\\x9c\\xec\\x9d\\x98 layer\\xeb\\xa1\\x9c \\xec\\x9d\\xb4\\xeb\\xa3\\xa8\\xec\\x96\\xb4\\xec\\xa0\\xb8 \\xec\\x9e\\x88\\xec\\x9d\\x8c.\\n \\n init \\xeb\\xb3\\x80\\xec\\x88\\x98\\xeb\\x8a\\x94 weight\\xec\\x9d\\x98 distribution\\xec\\x9d\\x84 \\xec\\xa0\\x95\\xed\\x95\\xa8 uniform\\xec\\x9d\\x98 \\xea\\xb2\\xbd\\xec\\x9a\\xb0 \\xea\\xb7\\xa0\\xec\\x9d\\xbc \\xeb\\xb6\\x84\\xed\\x8f\\xac\\xeb\\xa1\\x9c weight\\xeb\\xa5\\xbc initialize\\xed\\x95\\xa8\\n \\xeb\\x98\\x90\\xed\\x95\\x9c init\\xeb\\xb3\\x80\\xec\\x88\\x98\\xeb\\x8a\\x94 weight\\xeb\\xa5\\xbc \\xec\\xa7\\x81\\xec\\xa0\\x91\\xec\\xa0\\x81\\xec\\x9c\\xbc\\xeb\\xa1\\x9c \\xeb\\xaa\\x85\\xec\\x8b\\x9c\\xed\\x95\\x98\\xec\\xa7\\x80 \\xec\\x95\\x8a\\xeb\\x8a\\x94 \\xea\\xb2\\xbd\\xec\\x9a\\xb0\\xec\\x97\\x90\\xeb\\xa7\\x8c \\xed\\x98\\xb8\\xec\\xb6\\x9c\\xeb\\x90\\xa8\\n \\xec\\xb6\\x9c\\xec\\xb2\\x98 : http://machinelearningmastery.com/tutorial-first-neural-network-python-keras/\\n \\n activation \\xeb\\xb3\\x80\\xec\\x88\\x98\\xeb\\x8a\\x94 \\xed\\x95\\xb4\\xeb\\x8b\\xb9 layer\\xec\\x9d\\x98 activation function\\xec\\x9d\\x84 \\xeb\\xaa\\x85\\xec\\x8b\\x9c\\xed\\x95\\xa8\\n output layer\\xeb\\x8a\\x94 sigmoid function\\xec\\x9d\\xb4\\xea\\xb3\\xa0 \\xeb\\x82\\x98\\xeb\\xa8\\xb8\\xec\\xa7\\x80\\xeb\\x8a\\x94 relu function\\xec\\x9e\\x84\\n'"
]
},
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# create model\n",
"model = Sequential()\n",
"model.add(Dense(12, input_dim=8, init='uniform', activation='relu'))\n",
"model.add(Dense(8, init='uniform', activation='relu'))\n",
"model.add(Dense(1, init='uniform', activation='sigmoid'))\n",
"\n",
"\"\"\" \n",
" input layer 는 8개의 layer인데 input layer는 생략하고 기술함.\n",
" input layer까지 포함하면 위 네트워크는 총 4개의 layer로 이루어져 있음.\n",
" \n",
" init 변수는 weight의 distribution을 정함 uniform의 경우 균일 분포로 weight를 initialize함\n",
" 또한 init변수는 weight를 직접적으로 명시하지 않는 경우에만 호출됨\n",
" 출처 : http://machinelearningmastery.com/tutorial-first-neural-network-python-keras/\n",
" \n",
" activation 변수는 해당 layer의 activation function을 명시함\n",
" output layer는 sigmoid function이고 나머지는 relu function임\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"'\\n adam optimizer\\xeb\\x8a\\x94 stochastic gradient descent \\xec\\x95\\x8c\\xea\\xb3\\xa0\\xeb\\xa6\\xac\\xec\\xa6\\x98\\xec\\xa4\\x91 \\xed\\x9a\\xa8\\xea\\xb3\\xbc\\xec\\xa0\\x81\\xec\\x9d\\xb4\\xeb\\x9d\\xbc\\xea\\xb3\\xa0 \\xec\\x95\\x8c\\xeb\\xa0\\xa4\\xec\\xa7\\x84 \\xec\\x95\\x8c\\xea\\xb3\\xa0\\xeb\\xa6\\xac\\xec\\xa6\\x98\\n \\xeb\\x98\\x90\\xed\\x95\\x9c \\xec\\x9d\\xb4\\xec\\xa7\\x84\\xeb\\xb6\\x84\\xeb\\xa5\\x98 \\xeb\\xac\\xb8\\xec\\xa0\\x9c\\xec\\x9d\\xb4\\xeb\\xaf\\x80\\xeb\\xa1\\x9c loss fuction\\xec\\x9d\\x80 binary_crossentropy\\xec\\x9d\\xb4\\xeb\\x8b\\xa4.\\n metrics \\xeb\\x8a\\x94 classification accuracy\\xeb\\xa5\\xbc metrics\\xeb\\xa1\\x9c \\xec\\x88\\x98\\xec\\xa7\\x91\\xed\\x95\\x98\\xea\\xb3\\xa0 \\xeb\\xb3\\xb4\\xea\\xb3\\xa0\\xed\\x95\\x98\\xeb\\x8a\\x94 \\xea\\xb2\\x83\\xec\\x9d\\x84 \\xeb\\x9c\\xbb\\xed\\x95\\xa8\\n'"
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Compile model\n",
"model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n",
"\"\"\"\n",
" adam optimizer는 stochastic gradient descent 알고리즘중 효과적이라고 알려진 알고리즘\n",
" 또한 이진분류 문제이므로 loss fuction은 binary_crossentropy이다.\n",
" metrics 는 classification accuracy를 metrics로 수집하고 보고하는 것을 뜻함\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"'\\n training \\xec\\x8b\\x9c\\xed\\x82\\xb4\\n np_epoch\\xeb\\x8a\\x94 epoch\\xeb\\xa5\\xbc \\xeb\\xaa\\x87 \\xeb\\xb2\\x88 \\xeb\\x8f\\x8c\\xec\\xa7\\x80\\xeb\\xa5\\xbc \\xec\\xa7\\x80\\xec\\xa0\\x95\\xed\\x95\\xa8\\n batch_size\\xeb\\x8a\\x94 stochastic gradient descent\\xec\\x97\\x90\\xec\\x84\\x9c \\xeb\\xaa\\x87 \\xeb\\xb2\\x88 \\xeb\\x8f\\x8c\\xea\\xb3\\xa0 gradient\\xeb\\xa5\\xbc update \\xed\\x95\\xa0\\xec\\xa7\\x80\\xeb\\xa5\\xbc \\xeb\\x9c\\xbb\\xed\\x95\\xa8\\n'"
]
},
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Fit the model\n",
"model.fit(X, Y, nb_epoch=10, batch_size=10, verbose=0)\n",
"\n",
"\"\"\"\n",
" training 시킴\n",
" np_epoch는 epoch를 몇 번 돌지를 지정함\n",
" batch_size는 stochastic gradient descent에서 몇 번 돌고 gradient를 update 할지를 뜻함\n",
" verbose = 0 을 넣으면 막대바가 출력되지 않음\n",
" jupyter notebook에서는 verbose=0을 입력 안하면 에러가 뜸\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"768/768 [==============================] - 0s \n",
"acc: 69.66%\n"
]
},
{
"data": {
"text/plain": [
"'\\n evaluate \\xed\\x95\\xa8\\xec\\x88\\x98\\xeb\\x8a\\x94 training dataset\\xec\\x97\\x90 \\xeb\\x8c\\x80\\xed\\x95\\x9c accuracy\\xeb\\xa5\\xbc \\xec\\xb8\\xa1\\xec\\xa0\\x95\\xed\\x95\\xa8\\n \\xed\\x95\\x98\\xec\\xa7\\x80\\xeb\\xa7\\x8c \\xec\\x9d\\xb4\\xeb\\x8a\\x94 training accuracy\\xeb\\xa7\\x8c\\xec\\x9d\\x84 \\xeb\\x9c\\xbb\\xed\\x95\\xa0\\xeb\\xbf\\x90 test accuracy\\xeb\\x8a\\x94 \\xec\\x95\\x8c \\xec\\x88\\x98 \\xec\\x97\\x86\\xec\\x9d\\x8c\\n'"
]
},
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# evaluate the model\n",
"scores = model.evaluate(X, Y)\n",
"print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n",
"\"\"\"\n",
" evaluate 함수는 training dataset에 대한 accuracy를 측정함\n",
" 하지만 이는 training accuracy만을 뜻할뿐 test accuracy는 알 수 없음\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"768/768 [==============================] - 0s \n",
"acc: 73.83%\n"
]
},
{
"data": {
"text/plain": [
"'\\n epoch\\xeb\\xa5\\xbc \\xeb\\x8d\\x94 \\xeb\\x8f\\x8c\\xeb\\xa0\\xa4\\xeb\\xb3\\xb4\\xeb\\xa9\\xb4?\\n'"
]
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Fit the model\n",
"model.fit(X, Y, nb_epoch=30, batch_size=10, verbose=0)\n",
"\n",
"# evaluate the model\n",
"scores = model.evaluate(X, Y)\n",
"print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n",
"\"\"\"\n",
" epoch를 더 돌려보면?\n",
" accuracy가 증가하는 것을 알 수 있음.\n",
" 아직 underfitting이라는 증거임\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 28,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n"
]
}
],
"source": [
"predictions = model.predict(X)\n",
"# round predictions\n",
"# output 의 값이 0.5이상이면 1로 0.5미만이면 0으로 예측함\n",
"rounded = [round(x) for x in predictions]\n",
"print(rounded)"
]
},
{
"cell_type": "code",
"execution_count": 42,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"accurary : 0.73828125\n"
]
}
],
"source": [
"# Accuracy를 수동으로 구해보자. Keras가 맞는지 확인해보자.\n",
"\n",
"count = 0\n",
"for i in range(Y.size) :\n",
" if(Y[i] == rounded[i]) :\n",
" count += 1\n",
"\n",
"print 'accurary : ', count/float(Y.size)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python [Root]",
"language": "python",
"name": "Python [Root]"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.12"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment