This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import tensorflow as tf | |
def normalize(image, label): | |
return tf.cast(image, dtype=tf.float32) / 255.0, label | |
train = ds['train'].map(normalize).batch(32) | |
test = ds['test'].map(normalize).batch(32) | |
encoding_model = tf.keras.Model(inputs=model.input, outputs=model.get_layer('flatten').output) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import tensorflow_data_validation as tfdv | |
from tensorflow_metadata.proto.v0 import anomalies_pb2 | |
stats = tfdv.load_stats_binary(GCS_PATH_FOR_STATS_PB) | |
tfdv.visualize_statistics(stats) | |
# Display output in screenshot later. | |
schema = tfdv.load_schema_text(GCS_PATH_FOR_SCHEMA_PBTXT) | |
tfdv.display_schema(schema) | |
# Display output in screenshot later. |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import dgl | |
import tensorflow as tf | |
dataset = dgl.data.CoraGraphDataset() | |
# A DGL dataset may contain multiple graphs. | |
# In the case of Cora, there is only one graph. | |
g = dataset[0] | |
# g.ndata is a dictionary of nodes related data. |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
model = tf.keras.models.Sequential() | |
model.add(tf.keras.layers.Input(shape=(1433,))) | |
model.add(tf.keras.layers.Dense(32, activation='relu')) | |
model.add(tf.keras.layers.Dropout(0.5)) | |
model.add(tf.keras.layers.Dense(7)) | |
model.compile( | |
optimizer='adam', | |
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), | |
metrics=['accuracy']) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from dgl.nn.tensorflow import GraphConv | |
class GCN(tf.keras.Model): | |
def __init__(self, g, feat_dim, hidden_dim, class_num): | |
super(GCN, self).__init__() | |
self.g = g | |
self.h1 = GraphConv(feat_dim, hidden_dim, activation=tf.nn.relu, allow_zero_in_degree=True) | |
self.dropout = tf.keras.layers.Dropout(0.5) | |
self.h2 = GraphConv(hidden_dim, class_num), allow_zero_in_degree=True | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
for epoch in range(20): | |
with tf.GradientTape() as tape: | |
logits = g_model(g.ndata['feat']) | |
# Loss is only calculated on the training set. | |
loss = loss_fcn(g.ndata['label'][g.ndata['train_mask']], logits[g.ndata['train_mask']]) | |
grads = tape.gradient(loss, g_model.trainable_weights) | |
opt.apply_gradients(zip(grads, g_model.trainable_weights)) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import dgl | |
import tensorflow as tf | |
def eval(model, g, node_ids_to_eval): | |
logits = model(g, g.ndata['feat'], training=False) | |
logits = tf.gather(logits, node_ids_to_eval) | |
labels = tf.gather(g.ndata['label'], node_ids_to_eval) | |
indices = tf.math.argmax(logits, axis=1) | |
acc = tf.reduce_mean(tf.cast(indices == labels, dtype=tf.float32)) | |
return acc.numpy().item() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from dgl.nn.tensorflow import GraphConv | |
class GCN(tf.keras.Model): | |
def __init__(self, feat_dim, hidden_dim, class_num): | |
super(GCN, self).__init__() | |
self.h1 = GraphConv(feat_dim, hidden_dim, activation=tf.nn.relu, allow_zero_in_degree=True) | |
self.dropout = tf.keras.layers.Dropout(0.5) | |
self.h2 = GraphConv(hidden_dim, class_num, allow_zero_in_degree=True) | |
def call(self, g, features): |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from dgl.nn.tensorflow import SAGEConv | |
class SAGE(tf.keras.Model): | |
def __init__(self, feat_dim, hidden_dim, class_num): | |
super(SAGE, self).__init__() | |
self.h1 = SAGEConv(in_feats=feat_dim, out_feats=hidden_dim, aggregator_type='pool', feat_drop=0.5, activation=tf.nn.relu) | |
self.h2 = SAGEConv(in_feats=feat_dim, out_feats=class_num, aggregator_type='pool') | |
def call(self, g, features): | |
h = features |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from dgl.nn.tensorflow import GINConv | |
class GIN(tf.keras.Model): | |
def mlp(feat_dim, hidden_dim, out_dim): | |
m = tf.keras.models.Sequential() | |
m.add(tf.keras.layers.Input(shape=(feat_dim,))) | |
m.add(tf.keras.layers.Dense(hidden_dim, activation='relu')) | |
m.add(tf.keras.layers.Dropout(0.5)) | |
m.add(tf.keras.layers.Dense(out_dim)) | |
return m |