File size: 1,563 Bytes
2a41c0c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48

import torch
import torch.nn as nn

class Config:
    vocab_size = 50257
    embedding_dim = 512
    num_layers = 10
    num_heads = 8
    ff_dim = 2048
    max_seq_len = 256
    device = "cuda" if torch.cuda.is_available() else "cpu"

class JarvisXCore(nn.Module):
    def __init__(self, embed_dim, heads, ff_dim):
        super().__init__()
        self.attn = nn.MultiheadAttention(embed_dim, heads, batch_first=True)
        self.ln1 = nn.LayerNorm(embed_dim)
        self.ff = nn.Sequential(
            nn.Linear(embed_dim, ff_dim),
            nn.GELU(),
            nn.Linear(ff_dim, embed_dim)
        )
        self.ln2 = nn.LayerNorm(embed_dim)

    def forward(self, x):
        attn_output, _ = self.attn(x, x, x)
        x = self.ln1(x + attn_output)
        ff_output = self.ff(x)
        return self.ln2(x + ff_output)

class JarvisX50M(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.token_embed = nn.Embedding(config.vocab_size, config.embedding_dim)
        self.pos_embed = nn.Parameter(torch.zeros(1, config.max_seq_len, config.embedding_dim))
        self.blocks = nn.Sequential(*[
            JarvisXCore(config.embedding_dim, config.num_heads, config.ff_dim)
            for _ in range(config.num_layers)
        ])
        self.ln_f = nn.LayerNorm(config.embedding_dim)
        self.head = nn.Linear(config.embedding_dim, config.vocab_size)

    def forward(self, x):
        x = self.token_embed(x) + self.pos_embed[:, :x.size(1), :]
        x = self.blocks(x)
        return self.head(self.ln_f(x))