Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -16,10 +16,9 @@ class ModelConfig:
|
|
| 16 |
dropout = 0.1
|
| 17 |
|
| 18 |
config = ModelConfig()
|
| 19 |
-
device = "cpu" # Spaces CPU
|
| 20 |
|
| 21 |
-
# ---------------- MODEL
|
| 22 |
-
# (Model mimarisi, ağırlıkların doğru yüklenmesi için değiştirilmemelidir)
|
| 23 |
class Head(nn.Module):
|
| 24 |
def __init__(self, head_size):
|
| 25 |
super().__init__()
|
|
@@ -27,6 +26,7 @@ class Head(nn.Module):
|
|
| 27 |
self.query = nn.Linear(config.n_embd, head_size, bias=False)
|
| 28 |
self.value = nn.Linear(config.n_embd, head_size, bias=False)
|
| 29 |
self.register_buffer("tril", torch.tril(torch.ones(config.block_size, config.block_size)))
|
|
|
|
| 30 |
|
| 31 |
def forward(self, x):
|
| 32 |
B, T, C = x.shape
|
|
@@ -34,66 +34,76 @@ class Head(nn.Module):
|
|
| 34 |
wei = q @ k.transpose(-2, -1) * (C ** -0.5)
|
| 35 |
wei = wei.masked_fill(self.tril[:T, :T] == 0, float("-inf"))
|
| 36 |
wei = F.softmax(wei, dim=-1)
|
|
|
|
| 37 |
return wei @ v
|
| 38 |
|
|
|
|
| 39 |
class MultiHeadAttention(nn.Module):
|
| 40 |
def __init__(self, num_heads, head_size):
|
| 41 |
super().__init__()
|
| 42 |
self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)])
|
| 43 |
self.proj = nn.Linear(config.n_embd, config.n_embd)
|
|
|
|
| 44 |
|
| 45 |
def forward(self, x):
|
| 46 |
-
|
|
|
|
|
|
|
| 47 |
|
| 48 |
class FeedForward(nn.Module):
|
| 49 |
-
def __init__(self):
|
| 50 |
super().__init__()
|
| 51 |
self.net = nn.Sequential(
|
| 52 |
-
nn.Linear(
|
| 53 |
nn.ReLU(),
|
| 54 |
nn.Dropout(config.dropout),
|
| 55 |
-
nn.Linear(4 *
|
| 56 |
nn.Dropout(config.dropout),
|
| 57 |
)
|
| 58 |
|
| 59 |
def forward(self, x):
|
| 60 |
return self.net(x)
|
| 61 |
|
|
|
|
| 62 |
class Block(nn.Module):
|
| 63 |
-
def __init__(self):
|
| 64 |
super().__init__()
|
| 65 |
-
|
| 66 |
-
self.
|
| 67 |
-
self.
|
| 68 |
-
self.
|
|
|
|
| 69 |
|
| 70 |
def forward(self, x):
|
| 71 |
x = x + self.sa(self.ln1(x))
|
| 72 |
x = x + self.ffwd(self.ln2(x))
|
| 73 |
return x
|
| 74 |
|
|
|
|
| 75 |
class MyLanguageModel(nn.Module):
|
| 76 |
def __init__(self):
|
| 77 |
super().__init__()
|
| 78 |
self.token_embedding_table = nn.Embedding(config.vocab_size, config.n_embd)
|
| 79 |
self.position_embedding_table = nn.Embedding(config.block_size, config.n_embd)
|
| 80 |
-
self.blocks = nn.Sequential(*[Block() for _ in range(config.n_layer)])
|
| 81 |
self.ln_f = nn.LayerNorm(config.n_embd)
|
| 82 |
self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
|
|
|
|
| 83 |
|
| 84 |
def forward(self, idx):
|
| 85 |
B, T = idx.shape
|
| 86 |
tok_emb = self.token_embedding_table(idx)
|
| 87 |
pos_emb = self.position_embedding_table(torch.arange(T, device=idx.device))
|
| 88 |
-
x = tok_emb + pos_emb
|
| 89 |
x = self.blocks(x)
|
| 90 |
x = self.ln_f(x)
|
| 91 |
return self.lm_head(x)
|
| 92 |
|
|
|
|
| 93 |
# ---------------- LOAD MODEL ----------------
|
| 94 |
-
print("Model yükleniyor...")
|
| 95 |
REPO_ID = "jetbabareal/Sabir-20M"
|
| 96 |
|
|
|
|
| 97 |
model = MyLanguageModel().to(device)
|
| 98 |
weights = hf_hub_download(REPO_ID, "model.safetensors")
|
| 99 |
model.load_state_dict(load_file(weights))
|
|
@@ -101,88 +111,133 @@ model.eval()
|
|
| 101 |
|
| 102 |
tokenizer_path = hf_hub_download(REPO_ID, "tokenizer.model")
|
| 103 |
tokenizer = spm.SentencePieceProcessor(model_file=tokenizer_path)
|
| 104 |
-
print("Model
|
| 105 |
-
|
| 106 |
-
# ---------------- GENERATION
|
| 107 |
-
def
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
message: Kullanıcının yeni mesajı
|
| 111 |
-
history: Önceki konuşma geçmişi (Model küçük olduğu için şimdilik geçmişi göz ardı ediyoruz)
|
| 112 |
-
"""
|
| 113 |
-
|
| 114 |
-
# 1. Prompt Hazırlığı
|
| 115 |
-
# Model sadece son mesajı cevaplayacak şekilde tasarlandı (Context window küçük olduğu için)
|
| 116 |
-
prompt = f"Kullanıcı: {message}\nModel:"
|
| 117 |
-
idx = torch.tensor(tokenizer.encode(prompt), dtype=torch.long).unsqueeze(0)
|
| 118 |
-
|
| 119 |
-
# Ayarlar
|
| 120 |
-
max_new_tokens = 100
|
| 121 |
-
temperature = 0.6
|
| 122 |
-
top_k = 30
|
| 123 |
|
| 124 |
-
|
|
|
|
|
|
|
| 125 |
|
| 126 |
-
|
|
|
|
| 127 |
for _ in range(max_new_tokens):
|
| 128 |
-
# Context window taşarsa son kısmı al
|
| 129 |
idx_cond = idx[:, -config.block_size:]
|
| 130 |
-
|
| 131 |
with torch.no_grad():
|
| 132 |
logits = model(idx_cond)
|
| 133 |
-
|
| 134 |
logits = logits[:, -1, :] / temperature
|
| 135 |
-
|
| 136 |
if top_k:
|
| 137 |
-
v, _ = torch.topk(logits, top_k)
|
| 138 |
logits[logits < v[:, [-1]]] = -float("inf")
|
| 139 |
-
|
| 140 |
probs = F.softmax(logits, dim=-1)
|
| 141 |
-
idx_next = torch.multinomial(probs, 1)
|
| 142 |
-
|
| 143 |
token_id = idx_next.item()
|
|
|
|
| 144 |
|
| 145 |
-
#
|
| 146 |
-
|
|
|
|
|
|
|
| 147 |
break
|
| 148 |
|
| 149 |
-
|
|
|
|
|
|
|
| 150 |
idx = torch.cat([idx, idx_next], dim=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
dropout = 0.1
|
| 17 |
|
| 18 |
config = ModelConfig()
|
| 19 |
+
device = "cpu" # Spaces için CPU zorunlu
|
| 20 |
|
| 21 |
+
# ---------------- MODEL ----------------
|
|
|
|
| 22 |
class Head(nn.Module):
|
| 23 |
def __init__(self, head_size):
|
| 24 |
super().__init__()
|
|
|
|
| 26 |
self.query = nn.Linear(config.n_embd, head_size, bias=False)
|
| 27 |
self.value = nn.Linear(config.n_embd, head_size, bias=False)
|
| 28 |
self.register_buffer("tril", torch.tril(torch.ones(config.block_size, config.block_size)))
|
| 29 |
+
self.dropout = nn.Dropout(config.dropout)
|
| 30 |
|
| 31 |
def forward(self, x):
|
| 32 |
B, T, C = x.shape
|
|
|
|
| 34 |
wei = q @ k.transpose(-2, -1) * (C ** -0.5)
|
| 35 |
wei = wei.masked_fill(self.tril[:T, :T] == 0, float("-inf"))
|
| 36 |
wei = F.softmax(wei, dim=-1)
|
| 37 |
+
wei = self.dropout(wei)
|
| 38 |
return wei @ v
|
| 39 |
|
| 40 |
+
|
| 41 |
class MultiHeadAttention(nn.Module):
|
| 42 |
def __init__(self, num_heads, head_size):
|
| 43 |
super().__init__()
|
| 44 |
self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)])
|
| 45 |
self.proj = nn.Linear(config.n_embd, config.n_embd)
|
| 46 |
+
self.dropout = nn.Dropout(config.dropout)
|
| 47 |
|
| 48 |
def forward(self, x):
|
| 49 |
+
out = torch.cat([h(x) for h in self.heads], dim=-1)
|
| 50 |
+
return self.dropout(self.proj(out))
|
| 51 |
+
|
| 52 |
|
| 53 |
class FeedForward(nn.Module):
|
| 54 |
+
def __init__(self, n_embd):
|
| 55 |
super().__init__()
|
| 56 |
self.net = nn.Sequential(
|
| 57 |
+
nn.Linear(n_embd, 4 * n_embd),
|
| 58 |
nn.ReLU(),
|
| 59 |
nn.Dropout(config.dropout),
|
| 60 |
+
nn.Linear(4 * n_embd, n_embd),
|
| 61 |
nn.Dropout(config.dropout),
|
| 62 |
)
|
| 63 |
|
| 64 |
def forward(self, x):
|
| 65 |
return self.net(x)
|
| 66 |
|
| 67 |
+
|
| 68 |
class Block(nn.Module):
|
| 69 |
+
def __init__(self, n_embd, n_head):
|
| 70 |
super().__init__()
|
| 71 |
+
head_size = n_embd // n_head
|
| 72 |
+
self.sa = MultiHeadAttention(n_head, head_size)
|
| 73 |
+
self.ffwd = FeedForward(n_embd)
|
| 74 |
+
self.ln1 = nn.LayerNorm(n_embd)
|
| 75 |
+
self.ln2 = nn.LayerNorm(n_embd)
|
| 76 |
|
| 77 |
def forward(self, x):
|
| 78 |
x = x + self.sa(self.ln1(x))
|
| 79 |
x = x + self.ffwd(self.ln2(x))
|
| 80 |
return x
|
| 81 |
|
| 82 |
+
|
| 83 |
class MyLanguageModel(nn.Module):
|
| 84 |
def __init__(self):
|
| 85 |
super().__init__()
|
| 86 |
self.token_embedding_table = nn.Embedding(config.vocab_size, config.n_embd)
|
| 87 |
self.position_embedding_table = nn.Embedding(config.block_size, config.n_embd)
|
| 88 |
+
self.blocks = nn.Sequential(*[Block(config.n_embd, n_head=config.n_head) for _ in range(config.n_layer)])
|
| 89 |
self.ln_f = nn.LayerNorm(config.n_embd)
|
| 90 |
self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
|
| 91 |
+
self.dropout = nn.Dropout(config.dropout)
|
| 92 |
|
| 93 |
def forward(self, idx):
|
| 94 |
B, T = idx.shape
|
| 95 |
tok_emb = self.token_embedding_table(idx)
|
| 96 |
pos_emb = self.position_embedding_table(torch.arange(T, device=idx.device))
|
| 97 |
+
x = self.dropout(tok_emb + pos_emb)
|
| 98 |
x = self.blocks(x)
|
| 99 |
x = self.ln_f(x)
|
| 100 |
return self.lm_head(x)
|
| 101 |
|
| 102 |
+
|
| 103 |
# ---------------- LOAD MODEL ----------------
|
|
|
|
| 104 |
REPO_ID = "jetbabareal/Sabir-20M"
|
| 105 |
|
| 106 |
+
print("Model yükleniyor... / Loading model...")
|
| 107 |
model = MyLanguageModel().to(device)
|
| 108 |
weights = hf_hub_download(REPO_ID, "model.safetensors")
|
| 109 |
model.load_state_dict(load_file(weights))
|
|
|
|
| 111 |
|
| 112 |
tokenizer_path = hf_hub_download(REPO_ID, "tokenizer.model")
|
| 113 |
tokenizer = spm.SentencePieceProcessor(model_file=tokenizer_path)
|
| 114 |
+
print("Model başarıyla yüklendi! / Model loaded successfully!")
|
| 115 |
+
|
| 116 |
+
# ---------------- GENERATION ----------------
|
| 117 |
+
def generate(prompt, max_new_tokens=100, temperature=0.2, top_k=30):
|
| 118 |
+
if not prompt.strip():
|
| 119 |
+
return "Lütfen bir şeyler yazın! / Please write something!"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
|
| 121 |
+
full_prompt = f"Kullanıcı: {prompt}\nModel:"
|
| 122 |
+
input_ids = tokenizer.encode(full_prompt)
|
| 123 |
+
idx = torch.tensor(input_ids, dtype=torch.long, device=device).unsqueeze(0)
|
| 124 |
|
| 125 |
+
generated_ids = []
|
| 126 |
+
|
| 127 |
for _ in range(max_new_tokens):
|
|
|
|
| 128 |
idx_cond = idx[:, -config.block_size:]
|
|
|
|
| 129 |
with torch.no_grad():
|
| 130 |
logits = model(idx_cond)
|
| 131 |
+
|
| 132 |
logits = logits[:, -1, :] / temperature
|
| 133 |
+
|
| 134 |
if top_k:
|
| 135 |
+
v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
|
| 136 |
logits[logits < v[:, [-1]]] = -float("inf")
|
| 137 |
+
|
| 138 |
probs = F.softmax(logits, dim=-1)
|
| 139 |
+
idx_next = torch.multinomial(probs, num_samples=1)
|
| 140 |
+
|
| 141 |
token_id = idx_next.item()
|
| 142 |
+
generated_ids.append(token_id)
|
| 143 |
|
| 144 |
+
# Stop conditions
|
| 145 |
+
decoded_so_far = tokenizer.decode(generated_ids)
|
| 146 |
+
if "Kullanıcı:" in decoded_so_far or "Model:" in decoded_so_far:
|
| 147 |
+
generated_ids = generated_ids[:-1]
|
| 148 |
break
|
| 149 |
|
| 150 |
+
if token_id == tokenizer.eos_id():
|
| 151 |
+
break
|
| 152 |
+
|
| 153 |
idx = torch.cat([idx, idx_next], dim=1)
|
| 154 |
+
|
| 155 |
+
response = tokenizer.decode(generated_ids)
|
| 156 |
+
return response.strip()
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
# ---------------- GRADIO UI ----------------
|
| 160 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 161 |
+
gr.Markdown(
|
| 162 |
+
"""
|
| 163 |
+
# 🤖 Sabir-20M - Türkçe Mini LLM
|
| 164 |
+
20 milyon parametreli deneysel Türkçe dil modeli (eğitim ve araştırma amaçlı)
|
| 165 |
|
| 166 |
+
**Not:** Bu model eğitim amaçlı geliştirilmiştir ve küçük bir veri seti üzerinde eğitilmiştir.
|
| 167 |
+
Üretilen metinler her zaman tutarlı olmayabilir.
|
| 168 |
+
"""
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
with gr.Row():
|
| 172 |
+
with gr.Column():
|
| 173 |
+
prompt_input = gr.Textbox(
|
| 174 |
+
label="Promptunuzu yazın / Write your prompt",
|
| 175 |
+
placeholder="Merhaba, nasılsın?",
|
| 176 |
+
lines=3
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
with gr.Row():
|
| 180 |
+
max_tokens = gr.Slider(
|
| 181 |
+
minimum=20,
|
| 182 |
+
maximum=200,
|
| 183 |
+
value=100,
|
| 184 |
+
step=10,
|
| 185 |
+
label="Maksimum Token Sayısı / Max Tokens"
|
| 186 |
+
)
|
| 187 |
+
temperature = gr.Slider(
|
| 188 |
+
minimum=0.1,
|
| 189 |
+
maximum=1.5,
|
| 190 |
+
value=0.2,
|
| 191 |
+
step=0.1,
|
| 192 |
+
label="Temperature (Yaratıcılık)"
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
top_k = gr.Slider(
|
| 196 |
+
minimum=5,
|
| 197 |
+
maximum=50,
|
| 198 |
+
value=30,
|
| 199 |
+
step=5,
|
| 200 |
+
label="Top-K Sampling"
|
| 201 |
+
)
|
| 202 |
|
| 203 |
+
generate_btn = gr.Button("🚀 Üret / Generate", variant="primary")
|
| 204 |
+
|
| 205 |
+
with gr.Column():
|
| 206 |
+
output = gr.Textbox(
|
| 207 |
+
label="Model Çıktısı / Model Output",
|
| 208 |
+
lines=10
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
gr.Markdown(
|
| 212 |
+
"""
|
| 213 |
+
### 💡 İpuçları:
|
| 214 |
+
- **Temperature**: Düşük değerler (0.3-0.5) daha tutarlı, yüksek değerler (0.8-1.2) daha yaratıcı çıktılar üretir
|
| 215 |
+
- **Top-K**: Düşük değerler daha deterministik, yüksek değerler daha çeşitli sonuçlar verir
|
| 216 |
+
- **Max Tokens**: Daha uzun yanıtlar için artırın (ancak tutarsızlık riski artar)
|
| 217 |
+
|
| 218 |
+
### 📊 Model Bilgileri:
|
| 219 |
+
- Parametre Sayısı: ~20M
|
| 220 |
+
- Mimari: Transformer (8 katman, 384 gizli boyut)
|
| 221 |
+
- Vocabulary: 8000 token (SentencePiece)
|
| 222 |
+
"""
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
# Örnek promptlar
|
| 226 |
+
gr.Examples(
|
| 227 |
+
examples=[
|
| 228 |
+
["Merhaba, nasılsın?"],
|
| 229 |
+
["Türkiye'nin başkenti neresidir?"],
|
| 230 |
+
["Yapay zeka nedir?"],
|
| 231 |
+
["En sevdiğin renk nedir?"],
|
| 232 |
+
["Bugün hava nasıl?"],
|
| 233 |
+
],
|
| 234 |
+
inputs=prompt_input,
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
generate_btn.click(
|
| 238 |
+
fn=generate,
|
| 239 |
+
inputs=[prompt_input, max_tokens, temperature, top_k],
|
| 240 |
+
outputs=output
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
demo.launch()
|