Last active
October 15, 2020 14:15
-
-
Save mattherbert1/854fe25b74ecdd674ab1acaa7b117cf9 to your computer and use it in GitHub Desktop.
khf2.ipynb
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"nbformat": 4, | |
"nbformat_minor": 0, | |
"metadata": { | |
"colab": { | |
"name": "khf2.ipynb", | |
"provenance": [], | |
"authorship_tag": "ABX9TyM9f89fSYzHCwUdMK7BmoFN", | |
"include_colab_link": true | |
}, | |
"kernelspec": { | |
"name": "python3", | |
"display_name": "Python 3" | |
} | |
}, | |
"cells": [ | |
{ | |
"cell_type": "markdown", | |
"metadata": { | |
"id": "view-in-github", | |
"colab_type": "text" | |
}, | |
"source": [ | |
"<a href=\"https://colab.research.google.com/gist/mattherbert1/854fe25b74ecdd674ab1acaa7b117cf9/khf2.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": { | |
"id": "Mt4XrJmh8WXZ" | |
}, | |
"source": [ | |
"Herbert Máté - LNHG6Y\n", | |
"Usage: comment out the methods in the code which is not used in the particular scenario" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "GAWeFmC1sFjM" | |
}, | |
"source": [ | |
"import numpy as np\n", | |
"from sklearn import preprocessing\n", | |
"import copy" | |
], | |
"execution_count": 9, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "-6bClFPKscUF" | |
}, | |
"source": [ | |
"def activation(x):\n", | |
" return 1 / (1 + np.exp(-x))" | |
], | |
"execution_count": 2, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "TvSEAd5WtaBG" | |
}, | |
"source": [ | |
"def dactivation(x):\n", | |
" return np.exp(-x)/((1+np.exp(-x))**2)" | |
], | |
"execution_count": 3, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "yInbWKD6tce8" | |
}, | |
"source": [ | |
"# MLP osztály létrehozása.\n", | |
"class MLP:\n", | |
" \n", | |
" # A hálózat inicializálása az argumentumként megadott méretek alapján.\n", | |
" def __init__(self, *args):\n", | |
" # random seed megadása\n", | |
" np.random.seed(123)\n", | |
" # A hálózat formája (rétegek száma), amely megegyezik a paraméterek számával\n", | |
" self.shape = args\n", | |
" n = len(args)\n", | |
" # Rétegek létrehozása\n", | |
" self.layers = []\n", | |
" # Bemeneti réteg létrehozása (+1 egység a BIAS-nak)\n", | |
" self.layers.append(np.ones(self.shape[0]+1))\n", | |
" # Rejtett réteg(ek) és a kimeneti réteg létrehozása\n", | |
" for i in range(1,n):\n", | |
" self.layers.append(np.ones(self.shape[i]))\n", | |
" # Súlymátrix létrehozása\n", | |
" self.weights = []\n", | |
" for i in range(n-1):\n", | |
" self.weights.append(np.zeros((self.layers[i].size,\n", | |
" self.layers[i+1].size)))\n", | |
" # dw fogja tartalmazni a súlyok utolsó módosításait (később pl. a momentum módszer számára)\n", | |
" self.dw = [0,]*len(self.weights)\n", | |
" # Súlyok újrainicializálása\n", | |
" self.reset()\n", | |
" \n", | |
" # Súlyok újrainicializálási függvényének definiálása\n", | |
" def reset(self):\n", | |
" for i in range(len(self.weights)):\n", | |
" # véletlen számok [0,1) tartományban \n", | |
" Z = np.random.random((self.layers[i].size,self.layers[i+1].size))\n", | |
" # átskálázzuk a súlyokat -1..1 tartományba\n", | |
" self.weights[i][...] = (2*Z-1)*1\n", | |
"\n", | |
" # A bemenő adatok végigküldése a hálózaton, kimeneti rétegig (forward propagation)\n", | |
" def propagate_forward(self, data):\n", | |
" # Bemeneti réteg beállítása (tanító adatok)\n", | |
" self.layers[0][0:-1] = data\n", | |
" # Az adatok végigküldése a bemeneti rétegtől az utolsó előtti rétegig (az utolsó ugyanis a kimeneti réteg).\n", | |
" # A szigmoid aktivációs függvény használatával, mátrixszorzások alkalmazásával.\n", | |
" # Az előadáson a \"layers\" változót jelöltük \"a\"-val.\n", | |
" for i in range(1,len(self.shape)):\n", | |
" self.layers[i][...] = activation(np.dot(self.layers[i-1],self.weights[i-1]))\n", | |
" # Visszatérés a hálózat által becsült eredménnyel\n", | |
" return self.layers[-1]\n", | |
"\n", | |
" # Hibavisszaterjesztés (backpropagation) definiálása. \n", | |
" # A a learning rate (tanulási ráta) paraméter befolyásolja, hogy a hálózat súlyait milyen\n", | |
" # mértékben módosítsuk a gradiens függvényében. Ha ez az érték túl magas, akkor a háló \n", | |
" # \"oszcillálhat\" egy lokális vagy globális minimum körül. Ha túl kicsi értéket választunk,\n", | |
" # akkor pedig jelentősen több időbe telik mire elérjük a legjobb megoldást vagy leakad egy lokális \n", | |
" # minimumban és sose éri el azt.\n", | |
" \n", | |
" def propagate_backward(self, target, lrate=0.1):\n", | |
" deltas = []\n", | |
" # Hiba kiszámítása a kimeneti rétegen\n", | |
" error = -(target-self.layers[-1]) # y-y_kalap\n", | |
" # error*dactivation(s(3))\n", | |
" delta = np.multiply(error,dactivation(np.dot(self.layers[-2],self.weights[-1])))\n", | |
" deltas.append(delta)\n", | |
" # Gradiens kiszámítása a rejtett réteg(ek)ben\n", | |
" for i in range(len(self.shape)-2,0,-1):\n", | |
" # pl. utolsó rejtett réteg: delta(3)*(W(2).T)*dactivation(s(2)) (lásd előadás)\n", | |
" delta=np.dot(deltas[0],self.weights[i].T)*dactivation(np.dot(self.layers[i-1],self.weights[i-1]))\n", | |
" deltas.insert(0,delta) \n", | |
" # Súlyok módosítása\n", | |
" for i in range(len(self.weights)):\n", | |
" layer = np.atleast_2d(self.layers[i])\n", | |
" delta = np.atleast_2d(deltas[i])\n", | |
" # pl. utolsó rétegben: delta(3)*a(2) (lásd előadás)\n", | |
" dw = -lrate*np.dot(layer.T,delta)\n", | |
"\n", | |
" # HF2 start l1reg\n", | |
" # weight modification part of l1reg\n", | |
" # dw -= self.l1reg(self.weights[i],10**(-5),lrate)\n", | |
" # HF2 end l1reg\n", | |
"\n", | |
" # HF2 start l2reg\n", | |
" # weight modification part of l2reg\n", | |
" # dw -= self.l2reg(self.weights[i],10**(-5),lrate)\n", | |
" # HF2 end l2reg\n", | |
"\n", | |
" # HF2 start momentum\n", | |
" # weight modification part of momentum\n", | |
" # dw -= self.momentum(self.dw[i], 0.75)\n", | |
" # HF2 end momentum\n", | |
"\n", | |
" # súlyok módosítása\n", | |
" self.weights[i] += dw \n", | |
"\n", | |
" # a súlymódosítás eltárolása\n", | |
" self.dw[i] = dw\n", | |
"\n", | |
" cost = 0 # used to cummulate the cost caused by the particular procedure (l1 or l2reg)\n", | |
"\n", | |
" # HF2 start l1reg\n", | |
" # cost part of l1reg\n", | |
" # cost = l1reg_cost(10**(-5))\n", | |
" # HF2 end l1reg\n", | |
"\n", | |
" # HF2 start l2reg\n", | |
" # cost part of l2reg\n", | |
" # cost = l2reg_cost(10**(-5))\n", | |
" # HF2 end l2reg\n", | |
"\n", | |
" err_ret = (error**2).sum() + cost\n", | |
"\n", | |
" # Visszatérés a hibával\n", | |
" return err_ret\n", | |
" \n", | |
" # HF2 start l1reg\n", | |
" def l1reg(weights, lambda_1=10**(-5), lrate=0.1):\n", | |
" return lrate * lambda_1 * np.sign(weights)\n", | |
"\n", | |
" def l1reg_cost(lambda_1=10**(-5)):\n", | |
" return lambda_1 * np.abs(self.weights).sum()\n", | |
" # HF2 end l1reg\n", | |
" \n", | |
" # HF2 start l2reg\n", | |
" def l2reg(weights, lambda_2=10**(-5), lrate=0.1):\n", | |
" return lrate * lambda_2 * weights\n", | |
"\n", | |
" def l2reg_cost(lambda_2=10**(-5)):\n", | |
" return 0.5 * lambda_2 * (self.weights**2).sum()\n", | |
" # HF2 end l2reg\n", | |
"\n", | |
" # HF2 start momentum\n", | |
" def momentum(dw, alpha=0.75):\n", | |
" return alpha * dw\n", | |
"\n", | |
" # HF2 end momentum" | |
], | |
"execution_count": 10, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "EwFKCIPPuAck" | |
}, | |
"source": [ | |
"def learn(network, X, Y, valid_split, test_split, epochs=20, lrate=0.1):\n", | |
"\n", | |
" # train-validation-test minták különválasztása\n", | |
" X_train = X[0:int(nb_samples*(1-valid_split-test_split))]\n", | |
" Y_train = Y[0:int(nb_samples*(1-valid_split-test_split))]\n", | |
" X_valid = X[int(nb_samples*(1-valid_split-test_split)):int(nb_samples*(1-test_split))]\n", | |
" Y_valid = Y[int(nb_samples*(1-valid_split-test_split)):int(nb_samples*(1-test_split))]\n", | |
" X_test = X[int(nb_samples*(1-test_split)):]\n", | |
" Y_test = Y[int(nb_samples*(1-test_split)):]\n", | |
" \n", | |
" # standardizálás\n", | |
" scaler = preprocessing.StandardScaler().fit(X_train)\n", | |
" X_train = scaler.transform(X_train)\n", | |
" X_valid = scaler.transform(X_valid)\n", | |
" X_test = scaler.transform(X_test)\n", | |
" \n", | |
" # ugyanolyan sorrendben keverjük be a bemeneteket és kimeneteket, a három külön adatbázisra\n", | |
" randperm = np.random.permutation(len(X_train))\n", | |
" X_train, Y_train = X_train[randperm], Y_train[randperm]\n", | |
" \n", | |
" # Tanítási fázis, epoch-szor megyünk át 1-1 véltelenszerűen kiválasztott mintán.\n", | |
" for i in range(epochs):\n", | |
" # Jelen megoldás azt a módszert használja, hogy a megadott \n", | |
" # tanító adatokon végigmegyünk és minden elemet először végigküldünk\n", | |
" # a hálózaton, majd terjeszti vissza a kapott eltérést az\n", | |
" # elvárt eredménytől. Ezt hívjuk SGD-ek (stochastic gradient descent).\n", | |
" train_err = 0\n", | |
" for k in range(X_train.shape[0]):\n", | |
" network.propagate_forward( X_train[k] )\n", | |
" train_err += network.propagate_backward( Y_train[k], lrate )\n", | |
" train_err /= X_train.shape[0]\n", | |
"\n", | |
" # validációs fázis\n", | |
" valid_err = 0\n", | |
" o_valid = np.zeros(X_valid.shape[0])\n", | |
" for k in range(X_valid.shape[0]):\n", | |
" o_valid[k] = network.propagate_forward(X_valid[k])\n", | |
" valid_err += (o_valid[k]-Y_valid[k])**2\n", | |
" valid_err /= X_valid.shape[0]\n", | |
"\n", | |
" print(\"%d epoch, train_err: %.4f, valid_err: %.4f\" % (i, train_err, valid_err))\n", | |
"\n", | |
" # Tesztelési fázis\n", | |
" print(\"\\n--- TESZTELÉS ---\\n\")\n", | |
" test_err = 0\n", | |
" o_test = np.zeros(X_test.shape[0])\n", | |
" for k in range(X_test.shape[0]):\n", | |
" o_test[k] = network.propagate_forward(X_test[k])\n", | |
" test_err += (o_test[k]-Y_test[k])**2\n", | |
" print(k, X_test[k], '%.2f' % o_test[k], ' (elvart eredmeny: %.2f)' % Y_test[k])\n", | |
" test_err /= X_test.shape[0]" | |
], | |
"execution_count": 11, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "CzhTRhQJuJB1" | |
}, | |
"source": [ | |
"# Mesterséges neurális hálózat létrehozása, 2 bemenettel, 10 rejtett neuronnal és 1 kimenettel\n", | |
"network = MLP(2,10,1)" | |
], | |
"execution_count": 13, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "WO7JohWLufT3" | |
}, | |
"source": [ | |
"# Tanító, validációs és teszt adatok megadása a rendszernek (zajjal terhelt XOR adatok)\n", | |
"nb_samples=1000\n", | |
"X = np.zeros((nb_samples,2))\n", | |
"Y = np.zeros(nb_samples)\n", | |
"for i in range(0,nb_samples,4):\n", | |
" noise = np.random.normal(0,1,8)\n", | |
" X[i], Y[i] = (-2+noise[0],-2+noise[1]), 0\n", | |
" X[i+1], Y[i+1] = (2+noise[2],-2+noise[3]), 1\n", | |
" X[i+2], Y[i+2] = (-2+noise[4],2+noise[5]), 1\n", | |
" X[i+3], Y[i+3] = (2+noise[6],2+noise[7]), 0" | |
], | |
"execution_count": 7, | |
"outputs": [] | |
}, | |
{ | |
"cell_type": "code", | |
"metadata": { | |
"id": "oEtfgbdwusN3", | |
"outputId": "ee251b9e-905a-4e51-ab0c-ad22cd1407fd", | |
"colab": { | |
"base_uri": "https://localhost:8080/", | |
"height": 1000 | |
} | |
}, | |
"source": [ | |
"# Tanítás/Tesztelés indítása\n", | |
"network.reset()\n", | |
"learn(network, X, Y, 0.2, 0.1)" | |
], | |
"execution_count": 12, | |
"outputs": [ | |
{ | |
"output_type": "stream", | |
"text": [ | |
"0 epoch, train_err: 0.2575, valid_err: 0.2487\n", | |
"1 epoch, train_err: 0.2515, valid_err: 0.2455\n", | |
"2 epoch, train_err: 0.2472, valid_err: 0.2403\n", | |
"3 epoch, train_err: 0.2395, valid_err: 0.2306\n", | |
"4 epoch, train_err: 0.2253, valid_err: 0.2131\n", | |
"5 epoch, train_err: 0.2017, valid_err: 0.1865\n", | |
"6 epoch, train_err: 0.1699, valid_err: 0.1549\n", | |
"7 epoch, train_err: 0.1366, valid_err: 0.1260\n", | |
"8 epoch, train_err: 0.1091, valid_err: 0.1041\n", | |
"9 epoch, train_err: 0.0894, valid_err: 0.0890\n", | |
"10 epoch, train_err: 0.0760, valid_err: 0.0787\n", | |
"11 epoch, train_err: 0.0667, valid_err: 0.0715\n", | |
"12 epoch, train_err: 0.0601, valid_err: 0.0662\n", | |
"13 epoch, train_err: 0.0553, valid_err: 0.0623\n", | |
"14 epoch, train_err: 0.0516, valid_err: 0.0592\n", | |
"15 epoch, train_err: 0.0488, valid_err: 0.0568\n", | |
"16 epoch, train_err: 0.0465, valid_err: 0.0548\n", | |
"17 epoch, train_err: 0.0446, valid_err: 0.0531\n", | |
"18 epoch, train_err: 0.0430, valid_err: 0.0517\n", | |
"19 epoch, train_err: 0.0417, valid_err: 0.0505\n", | |
"\n", | |
"--- TESZTELÉS ---\n", | |
"\n", | |
"0 [-0.49551261 -0.7444607 ] 0.11 (elvart eredmeny: 0.00)\n", | |
"1 [ 0.89288499 -1.26981044] 0.94 (elvart eredmeny: 1.00)\n", | |
"2 [-0.17687074 1.45002022] 0.72 (elvart eredmeny: 1.00)\n", | |
"3 [0.23518118 1.54853674] 0.36 (elvart eredmeny: 0.00)\n", | |
"4 [-0.91921004 -1.15158256] 0.04 (elvart eredmeny: 0.00)\n", | |
"5 [ 0.45007887 -0.45520451] 0.63 (elvart eredmeny: 1.00)\n", | |
"6 [-0.7232066 0.52937545] 0.86 (elvart eredmeny: 1.00)\n", | |
"7 [0.52408431 0.74905089] 0.11 (elvart eredmeny: 0.00)\n", | |
"8 [-1.13124799 -0.64346812] 0.09 (elvart eredmeny: 0.00)\n", | |
"9 [ 1.05619852 -1.16867333] 0.95 (elvart eredmeny: 1.00)\n", | |
"10 [-1.64331284 0.32178486] 0.84 (elvart eredmeny: 1.00)\n", | |
"11 [1.27592446 0.47490425] 0.10 (elvart eredmeny: 0.00)\n", | |
"12 [-0.85234509 -1.67766207] 0.06 (elvart eredmeny: 0.00)\n", | |
"13 [ 1.06439281 -0.01519469] 0.45 (elvart eredmeny: 1.00)\n", | |
"14 [-1.49598073 1.1721386 ] 0.98 (elvart eredmeny: 1.00)\n", | |
"15 [1.00867052 1.33834844] 0.04 (elvart eredmeny: 0.00)\n", | |
"16 [-0.96073952 -0.75856217] 0.06 (elvart eredmeny: 0.00)\n", | |
"17 [ 0.96820285 -0.62312564] 0.91 (elvart eredmeny: 1.00)\n", | |
"18 [-1.27731201 1.01086655] 0.97 (elvart eredmeny: 1.00)\n", | |
"19 [1.24921224 1.45125364] 0.03 (elvart eredmeny: 0.00)\n", | |
"20 [-0.68034828 -1.39272014] 0.07 (elvart eredmeny: 0.00)\n", | |
"21 [ 0.19203454 -0.69725794] 0.54 (elvart eredmeny: 1.00)\n", | |
"22 [-1.35447081 1.3051462 ] 0.98 (elvart eredmeny: 1.00)\n", | |
"23 [0.75811891 1.38066171] 0.07 (elvart eredmeny: 0.00)\n", | |
"24 [ 0.04865448 -0.90985725] 0.47 (elvart eredmeny: 0.00)\n", | |
"25 [ 0.83494361 -0.42420374] 0.81 (elvart eredmeny: 1.00)\n", | |
"26 [-0.42625033 1.69214366] 0.85 (elvart eredmeny: 1.00)\n", | |
"27 [0.9747302 0.63829576] 0.05 (elvart eredmeny: 0.00)\n", | |
"28 [-0.89859695 -1.16324684] 0.04 (elvart eredmeny: 0.00)\n", | |
"29 [ 0.99406789 -1.27367092] 0.95 (elvart eredmeny: 1.00)\n", | |
"30 [-0.4986541 1.23885647] 0.90 (elvart eredmeny: 1.00)\n", | |
"31 [0.94899899 0.93553572] 0.04 (elvart eredmeny: 0.00)\n", | |
"32 [-0.98989922 -0.28933404] 0.25 (elvart eredmeny: 0.00)\n", | |
"33 [ 1.4178541 -0.81594234] 0.93 (elvart eredmeny: 1.00)\n", | |
"34 [-1.28515493 0.39272723] 0.87 (elvart eredmeny: 1.00)\n", | |
"35 [1.01544978 0.6359506 ] 0.05 (elvart eredmeny: 0.00)\n", | |
"36 [-0.68920096 -1.05628831] 0.06 (elvart eredmeny: 0.00)\n", | |
"37 [ 1.23285696 -0.78808995] 0.94 (elvart eredmeny: 1.00)\n", | |
"38 [-0.45570044 -0.08445807] 0.36 (elvart eredmeny: 1.00)\n", | |
"39 [0.78528024 1.92471042] 0.17 (elvart eredmeny: 0.00)\n", | |
"40 [-2.02150252 -0.50132607] 0.34 (elvart eredmeny: 0.00)\n", | |
"41 [ 1.07375928 -0.30132133] 0.78 (elvart eredmeny: 1.00)\n", | |
"42 [-0.97967981 1.46134931] 0.97 (elvart eredmeny: 1.00)\n", | |
"43 [1.43104178 1.01282183] 0.03 (elvart eredmeny: 0.00)\n", | |
"44 [-1.42261241 -0.46956705] 0.19 (elvart eredmeny: 0.00)\n", | |
"45 [ 0.45148347 -0.9437387 ] 0.85 (elvart eredmeny: 1.00)\n", | |
"46 [-0.47857327 1.4169799 ] 0.89 (elvart eredmeny: 1.00)\n", | |
"47 [0.73120781 1.20702752] 0.06 (elvart eredmeny: 0.00)\n", | |
"48 [-1.59189506 -1.0949691 ] 0.04 (elvart eredmeny: 0.00)\n", | |
"49 [ 1.54374629 -0.27527979] 0.76 (elvart eredmeny: 1.00)\n", | |
"50 [-0.6027629 0.85996166] 0.91 (elvart eredmeny: 1.00)\n", | |
"51 [1.26573007 0.91350389] 0.03 (elvart eredmeny: 0.00)\n", | |
"52 [-0.82681195 -0.8418245 ] 0.06 (elvart eredmeny: 0.00)\n", | |
"53 [ 0.53999516 -0.55402011] 0.76 (elvart eredmeny: 1.00)\n", | |
"54 [-1.38054512 1.27422799] 0.98 (elvart eredmeny: 1.00)\n", | |
"55 [0.45722329 0.59802441] 0.15 (elvart eredmeny: 0.00)\n", | |
"56 [-2.17252156 -0.95218351] 0.16 (elvart eredmeny: 0.00)\n", | |
"57 [ 0.97142945 -0.50835919] 0.88 (elvart eredmeny: 1.00)\n", | |
"58 [-0.31580073 0.25126539] 0.54 (elvart eredmeny: 1.00)\n", | |
"59 [1.32725243 0.7145116 ] 0.05 (elvart eredmeny: 0.00)\n", | |
"60 [-0.93427312 -0.92924414] 0.05 (elvart eredmeny: 0.00)\n", | |
"61 [ 0.68579039 -1.28345398] 0.92 (elvart eredmeny: 1.00)\n", | |
"62 [-0.7771762 1.29016349] 0.96 (elvart eredmeny: 1.00)\n", | |
"63 [0.32342975 0.91520552] 0.19 (elvart eredmeny: 0.00)\n", | |
"64 [-1.18335154 -0.38699305] 0.20 (elvart eredmeny: 0.00)\n", | |
"65 [ 1.10755805 -0.20065101] 0.69 (elvart eredmeny: 1.00)\n", | |
"66 [-1.5946805 1.39085526] 0.98 (elvart eredmeny: 1.00)\n", | |
"67 [0.95956766 0.59662601] 0.06 (elvart eredmeny: 0.00)\n", | |
"68 [-0.68326656 -0.17477146] 0.31 (elvart eredmeny: 0.00)\n", | |
"69 [ 1.60443149 -0.51055056] 0.85 (elvart eredmeny: 1.00)\n", | |
"70 [-1.345509 1.06387085] 0.97 (elvart eredmeny: 1.00)\n", | |
"71 [1.0399581 1.48467107] 0.04 (elvart eredmeny: 0.00)\n", | |
"72 [-0.97578159 -0.8746935 ] 0.05 (elvart eredmeny: 0.00)\n", | |
"73 [ 0.59456082 -0.58012958] 0.80 (elvart eredmeny: 1.00)\n", | |
"74 [-0.99838184 1.00794573] 0.97 (elvart eredmeny: 1.00)\n", | |
"75 [0.67683025 0.94531504] 0.07 (elvart eredmeny: 0.00)\n", | |
"76 [-1.38522558 -0.80909313] 0.07 (elvart eredmeny: 0.00)\n", | |
"77 [ 0.61733912 -0.56602641] 0.80 (elvart eredmeny: 1.00)\n", | |
"78 [-0.07583425 1.30576314] 0.63 (elvart eredmeny: 1.00)\n", | |
"79 [0.38597268 0.76179308] 0.16 (elvart eredmeny: 0.00)\n", | |
"80 [-1.62192946 -0.87559713] 0.07 (elvart eredmeny: 0.00)\n", | |
"81 [ 0.0933582 -1.42405863] 0.67 (elvart eredmeny: 1.00)\n", | |
"82 [-0.41178207 1.31268606] 0.87 (elvart eredmeny: 1.00)\n", | |
"83 [0.20688708 0.72200264] 0.28 (elvart eredmeny: 0.00)\n", | |
"84 [-0.6947673 -0.70454052] 0.09 (elvart eredmeny: 0.00)\n", | |
"85 [ 0.15912428 -0.76434734] 0.53 (elvart eredmeny: 1.00)\n", | |
"86 [-1.17388859 1.46510224] 0.98 (elvart eredmeny: 1.00)\n", | |
"87 [0.79729717 1.08618224] 0.05 (elvart eredmeny: 0.00)\n", | |
"88 [-0.95084721 -0.58087122] 0.10 (elvart eredmeny: 0.00)\n", | |
"89 [ 0.54332732 -0.58005466] 0.77 (elvart eredmeny: 1.00)\n", | |
"90 [-0.36137803 1.75261358] 0.82 (elvart eredmeny: 1.00)\n", | |
"91 [1.59511841 0.68196924] 0.09 (elvart eredmeny: 0.00)\n", | |
"92 [-0.64393552 -0.51719602] 0.14 (elvart eredmeny: 0.00)\n", | |
"93 [ 1.15069897 -0.54158161] 0.90 (elvart eredmeny: 1.00)\n", | |
"94 [-0.71245188 0.67784608] 0.90 (elvart eredmeny: 1.00)\n", | |
"95 [0.97944428 1.30625314] 0.04 (elvart eredmeny: 0.00)\n", | |
"96 [-0.6956089 -1.37664792] 0.06 (elvart eredmeny: 0.00)\n", | |
"97 [ 0.49433522 -1.06646624] 0.88 (elvart eredmeny: 1.00)\n", | |
"98 [-0.99364269 0.99498732] 0.96 (elvart eredmeny: 1.00)\n", | |
"99 [0.44984778 1.28285523] 0.15 (elvart eredmeny: 0.00)\n" | |
], | |
"name": "stdout" | |
} | |
] | |
} | |
] | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment