Created
February 8, 2024 23:54
-
-
Save rtkclouds/5be78525875e4348a5507cfe8703f1cd to your computer and use it in GitHub Desktop.
Over the past weeks, I have been diligently working on developing a novel neural network layer, which I've tentatively named idNorm. The core idea of this layer is to perform data compression and expansion, simulating a form of 'learned intelligence' through the manipulation of compacted data layers. My hypothesis is that this mimics a fundament…
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
import torch.nn as nn | |
import torch.nn.functional as F | |
class IdNorm(nn.Module): | |
def __init__(self, output_dim, depth): | |
super(IdNorm, self).__init__() | |
self.depth = depth | |
self.output_dim = output_dim | |
self.embedding = nn.Embedding(output_dim, depth) | |
# Assuming MOD_ADLER is a constant, you may want to parameterize it as well | |
self.MOD_ADLER = (self.output_dim - 1) // 2 | |
def forward(self, x): | |
# Apply tanh activation and scale | |
x = x.tanh().mul(100).round() | |
# Initialize result container | |
res = torch.zeros_like(x, dtype=torch.float32) | |
# Adler-like checksum computation | |
for i in range(x.size(0)): | |
for j in range(x.size(1)): | |
data = x[i, j] | |
a = 1 | |
b = 0 | |
for index in range(data.size(0)): | |
a = (a + data[index].item()) % self.MOD_ADLER | |
b = (b + a) % self.MOD_ADLER | |
res[i, j] = b | |
# Adjust the result and apply the embedding | |
res = res.add(self.MOD_ADLER).round().long() # Ensure long dtype for embedding input | |
result = self.embedding(res) | |
return result | |
# Example usage: | |
# Define the layer with specific output_dim and depth | |
id_norm_layer = IdNorm(output_dim=32, depth=16) | |
# Example tensor | |
input_tensor = torch.randn(10, 20, 30) # Example shape (batch_size, seq_length, feature_size) | |
output = id_norm_layer(input_tensor) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment