This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| from keras.models import Sequential | |
| from keras.layers.core import Dense, Dropout | |
| from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D | |
| def lstm(optimizer,epochs,batchsize): | |
| model = Sequential() | |
| model.add(Embedding(input_dim=vocab_size, | |
| output_dim=embedding_dim, | |
| input_length=maxlen)) | |
| model.add(SpatialDropout1D(0.4)) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| from keras.preprocessing.text import Tokenizer | |
| from keras.preprocessing.sequence import pad_sequences | |
| maxlen = 100 | |
| embedding_dim = 100 | |
| x = df.text.values | |
| y = df.sentiment.astype("category").cat.codes.values | |
| # train validation and test split |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import optuna | |
| from xgboost import XGBClassifier | |
| from optuna.trial import TrialState | |
| from sklearn.metrics import accuracy_score | |
| # optuna's objective function | |
| def objective(trial): | |
| learning_rate = trial.suggest_float("learning_rate", 1e-5, 1e-1, log=True) | |
| max_depth = trial.suggest_int("max_depth", 2, 10,step=2, log=False) | |
| n_estimators = trial.suggest_int("n_estimators", 100, 300,step=100, log=False) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| from sklearn.feature_extraction.text import CountVectorizer | |
| from sklearn.model_selection import train_test_split | |
| #vectorization | |
| cv = CountVectorizer(lowercase= False) | |
| text_vector = cv.fit_transform(df.text.values) | |
| x = text_vector | |
| y = df.iloc[:,-1].values |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import pandas as pd | |
| import numpy as np | |
| import re | |
| import string | |
| import unicodedata | |
| import nltk | |
| from nltk.corpus import stopwords | |
| from nltk.tokenize import RegexpTokenizer | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| from spektral.data.loaders import SingleLoader | |
| learning_rate = 1e-1 | |
| epochs = 100 | |
| # Train model | |
| loader_tr = SingleLoader(dataset, sample_weights=weights_tr) | |
| model.fit(loader_tr.load(), | |
| steps_per_epoch=loader_tr.steps_per_epoch, |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| optimizer_name = "Adam" | |
| lr = 1e-1 | |
| optimizer = getattr(torch.optim, optimizer_name)(model.parameters(), lr=lr) | |
| epochs = 100 | |
| def train(): | |
| model.train() | |
| optimizer.zero_grad() | |
| F.nll_loss(model(data, node_features)[train_mask], | |
| node_labels[train_mask]).backward() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import tensorflow as tf | |
| from tensorflow.keras.losses import BinaryCrossentropy | |
| from tensorflow.keras.optimizers import Adam | |
| from spektral.models.gcn import GCN | |
| seed = 42 | |
| tf.random.set_seed(seed=seed) | |
| # We convert the binary masks to sample weights so that we can compute the | |
| # average loss over the nodes (following original implementation by |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| from dgl.nn import GraphConv | |
| import dgl.function as fn | |
| class GCN(nn.Module): | |
| def __init__(self, in_feats, h_feats): | |
| super(GCN, self).__init__() | |
| self.conv1 = GraphConv(in_feats, h_feats) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import torch | |
| import networkx as nx | |
| from spektral.data import Dataset | |
| from sklearn.preprocessing import OneHotEncoder | |
| from sklearn.model_selection import train_test_split | |
| from spektral.transforms import AdjToSpTensor, LayerPreprocess | |
| from spektral.layers import GCNConv | |
| # spektral custom dataset class | |
| class KarateDataset(Dataset): |