JarvisX50M / model.py
vihaan134354's picture
Upload JarvisX50M with chat interface
2a41c0c verified
import torch
import torch.nn as nn
class Config:
vocab_size = 50257
embedding_dim = 512
num_layers = 10
num_heads = 8
ff_dim = 2048
max_seq_len = 256
device = "cuda" if torch.cuda.is_available() else "cpu"
class JarvisXCore(nn.Module):
def __init__(self, embed_dim, heads, ff_dim):
super().__init__()
self.attn = nn.MultiheadAttention(embed_dim, heads, batch_first=True)
self.ln1 = nn.LayerNorm(embed_dim)
self.ff = nn.Sequential(
nn.Linear(embed_dim, ff_dim),
nn.GELU(),
nn.Linear(ff_dim, embed_dim)
)
self.ln2 = nn.LayerNorm(embed_dim)
def forward(self, x):
attn_output, _ = self.attn(x, x, x)
x = self.ln1(x + attn_output)
ff_output = self.ff(x)
return self.ln2(x + ff_output)
class JarvisX50M(nn.Module):
def __init__(self, config):
super().__init__()
self.token_embed = nn.Embedding(config.vocab_size, config.embedding_dim)
self.pos_embed = nn.Parameter(torch.zeros(1, config.max_seq_len, config.embedding_dim))
self.blocks = nn.Sequential(*[
JarvisXCore(config.embedding_dim, config.num_heads, config.ff_dim)
for _ in range(config.num_layers)
])
self.ln_f = nn.LayerNorm(config.embedding_dim)
self.head = nn.Linear(config.embedding_dim, config.vocab_size)
def forward(self, x):
x = self.token_embed(x) + self.pos_embed[:, :x.size(1), :]
x = self.blocks(x)
return self.head(self.ln_f(x))