Created
May 30, 2019 15:41
-
-
Save khuangaf/9183847a8b2d50a4554cc6166b7a080f to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
from torch.nn import Sequential as Seq, Linear, ReLU | |
from torch_geometric.nn import MessagePassing | |
from torch_geometric.utils import remove_self_loops, add_self_loops | |
class SAGEConv(MessagePassing): | |
def __init__(self, in_channels, out_channels): | |
super(SAGEConv, self).__init__(aggr='max') # "Max" aggregation. | |
self.lin = torch.nn.Linear(in_channels, out_channels) | |
self.act = torch.nn.ReLU() | |
self.update_lin = torch.nn.Linear(in_channels + out_channels, in_channels, bias=False) | |
self.update_act = torch.nn.ReLU() | |
def forward(self, x, edge_index): | |
# x has shape [N, in_channels] | |
# edge_index has shape [2, E] | |
edge_index, _ = remove_self_loops(edge_index) | |
edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0)) | |
return self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x) | |
def message(self, x_j): | |
# x_j has shape [E, in_channels] | |
x_j = self.lin(x_j) | |
x_j = self.act(x_j) | |
return x_j | |
def update(self, aggr_out, x): | |
# aggr_out has shape [N, out_channels] | |
new_embedding = torch.cat([aggr_out, x], dim=1) | |
new_embedding = self.update_lin(new_embedding) | |
new_embedding = self.update_act(new_embedding) | |
return new_embedding |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment