File size: 14,999 Bytes
fe6264c
 
 
 
11d64bd
c053894
fe6264c
 
bcbd5e8
 
fe6264c
 
181cebd
 
 
 
 
b781c31
 
 
 
181cebd
fe6264c
 
bcbd5e8
fe6264c
 
c053894
cf716c3
11d64bd
 
 
cf716c3
 
c053894
 
 
 
 
cf716c3
 
bcbd5e8
 
 
c053894
 
 
fe6264c
c053894
 
 
b781c31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bcbd5e8
 
11d64bd
bcbd5e8
c053894
bcbd5e8
 
 
11d64bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c053894
bcbd5e8
c053894
 
bcbd5e8
c053894
bcbd5e8
 
 
 
 
 
 
 
 
c053894
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bcbd5e8
fe6264c
bcbd5e8
181cebd
b781c31
fe6264c
 
c053894
fe6264c
bcbd5e8
c053894
fe6264c
c053894
 
 
fe6264c
c053894
 
fe6264c
c053894
a021341
fe6264c
 
 
c053894
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bcbd5e8
fe6264c
bcbd5e8
c053894
fe6264c
c053894
 
 
fe6264c
bcbd5e8
 
 
 
 
 
 
 
fe6264c
 
bcbd5e8
 
fe6264c
c053894
 
bcbd5e8
fe6264c
bcbd5e8
a021341
fe6264c
 
 
c053894
 
 
fe6264c
 
 
 
 
 
c053894
 
 
1c2d1e9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c053894
42ccf80
c053894
1c2d1e9
 
42ccf80
581710d
 
 
1c2d1e9
581710d
1c2d1e9
581710d
1c2d1e9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
581710d
 
1c2d1e9
581710d
c053894
42ccf80
c053894
 
 
 
 
42ccf80
c053894
 
 
42ccf80
1c2d1e9
581710d
c053894
 
 
 
 
42ccf80
c053894
 
1c2d1e9
42ccf80
 
c053894
 
 
42ccf80
bcbd5e8
42ccf80
 
fe6264c
 
42ccf80
fe6264c
 
 
 
 
 
 
42ccf80
 
bcbd5e8
 
fe6264c
 
 
 
 
 
 
 
ccd9fcb
 
c053894
ccd9fcb
 
 
bcbd5e8
fe6264c
c053894
 
 
fe6264c
 
bcbd5e8
 
fe6264c
 
 
 
 
bcbd5e8
fe6264c
 
 
bcbd5e8
fe6264c
 
 
 
 
 
 
 
a021341
 
fe6264c
 
 
 
1406a1d
bcbd5e8
1406a1d
 
fe6264c
 
 
 
 
c053894
 
 
cc59a28
fe6264c
bcbd5e8
fe6264c
 
 
 
c053894
 
 
 
 
bcbd5e8
fe6264c
 
42ccf80
fe6264c
bcbd5e8
fe6264c
42ccf80
fe6264c
 
 
42ccf80
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
# model_utils.py
from typing import List, Optional
import re

import os
import numpy as np
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from sentence_transformers import SentenceTransformer
from sentence_transformers.util import cos_sim

import qa_store
from loader import (
    load_curriculum, 
    load_manual_qa, 
    rebuild_combined_qa, 
    load_glossary, 
    sync_download_manual_qa,  # <--- Import it
    sync_download_cache,     # <--- Add this import
    sync_upload_cache,       # <--- Add this import
    CACHE_PATH               # <--- Add this import
)

# -----------------------------
# Base chat model
# -----------------------------
MODEL_NAME = "SeaLLMs/SeaLLMs-v3-1.5B-Chat"
EMBED_MODEL_NAME = "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"

BASE_DIR = os.path.dirname(os.path.abspath(__file__))
CACHE_FILE = os.path.join(BASE_DIR, "data", "cached_embeddings.pt")

device = "cuda" if torch.cuda.is_available() else "cpu"

tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
# Use float16 on GPU to save memory, float32 on CPU
dtype = torch.float16 if torch.cuda.is_available() else torch.float32
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=dtype)
model.to(device)
model.eval()

embed_model = SentenceTransformer(EMBED_MODEL_NAME)
embed_model = embed_model.to(device)

# Number of textbook entries to include in the RAG context
MAX_CONTEXT_ENTRIES = 4


# -----------------------------
# Embedding builders
# -----------------------------

# 👇👇👇 ADD THIS NEW FUNCTION 👇👇👇
def admin_force_rebuild_cache() -> str:
    """
    Forcedly re-calculate all embeddings and upload to cloud.
    Triggered by Teacher Panel button.
    """
    status_msg = []
    
    # 1. Compute Textbook
    print("[ADMIN] Rebuilding Textbook Embeddings...")
    texts = []
    for e in qa_store.ENTRIES:
        chapter = e.get("chapter_title", "") or ""
        section = e.get("section_title", "") or ""
        text = e.get("text", "") or ""
        texts.append(f"{chapter}\n{section}\n{text}")
    
    if texts:
        qa_store.TEXT_EMBEDDINGS = embed_model.encode(texts, convert_to_tensor=True)
        status_msg.append(f"✅ Textbook ({len(texts)})")

    # 2. Compute Glossary
    print("[ADMIN] Rebuilding Glossary Embeddings...")
    gloss_texts = [f"{i.get('term')} :: {i.get('definition')}" for i in qa_store.GLOSSARY]
    
    if gloss_texts:
        qa_store.GLOSSARY_EMBEDDINGS = embed_model.encode(
            gloss_texts, convert_to_numpy=True, normalize_embeddings=True
        )
        status_msg.append(f"✅ Glossary ({len(gloss_texts)})")

    # 3. Save to Disk
    print("[ADMIN] Saving to disk...")
    torch.save({
        "textbook": qa_store.TEXT_EMBEDDINGS,
        "glossary": qa_store.GLOSSARY_EMBEDDINGS
    }, CACHE_PATH)

    # 4. Upload to Cloud
    upload_status = sync_upload_cache()
    return f"Rebuild Complete: {', '.join(status_msg)} | {upload_status}"

def _build_entry_embeddings() -> None:
    """
    Load pre-computed embeddings if available, otherwise build them.
    """
    if not getattr(qa_store, "ENTRIES", None):
        qa_store.TEXT_EMBEDDINGS = None
        return

    # 1. Try Loading from Cache
    if os.path.exists(CACHE_FILE):
        try:
            print(f"[INFO] Loading cached embeddings from {CACHE_FILE}...")
            cache = torch.load(CACHE_FILE, map_location=device)
            if "textbook" in cache and cache["textbook"] is not None:
                # Validate size matches
                if len(cache["textbook"]) == len(qa_store.ENTRIES):
                    qa_store.TEXT_EMBEDDINGS = cache["textbook"].to(device)
                    print("[INFO] Textbook embeddings loaded successfully.")
                    return
                else:
                    print("[WARN] Cache size mismatch (Data changed?). Re-computing...")
        except Exception as e:
            print(f"[WARN] Failed to load cache: {e}")

    # 2. Fallback: Compute from scratch (The old slow way)
    print("[INFO] Computing textbook embeddings from scratch...")
    texts: List[str] = []
    for e in qa_store.ENTRIES:
        chapter = e.get("chapter_title", "") or e.get("chapter", "") or ""
        section = e.get("section_title", "") or e.get("section", "") or ""
        text = e.get("text", "") or ""
        combined = f"{chapter}\n{section}\n{text}"
        texts.append(combined)

    qa_store.TEXT_EMBEDDINGS = embed_model.encode(
        texts,
        convert_to_tensor=True,
        show_progress_bar=False,
    )


def _build_glossary_embeddings() -> None:
    """Create embeddings for glossary terms + definitions."""
    if not getattr(qa_store, "GLOSSARY", None):
        qa_store.GLOSSARY_EMBEDDINGS = None
        print("[INFO] No glossary terms to embed.")
        return

    # Embed term + definition together
    texts = [
        f"{item.get('term', '')} :: {item.get('definition', '')}"
        for item in qa_store.GLOSSARY
    ]

    embeddings = embed_model.encode(
        texts,
        convert_to_numpy=True,
        normalize_embeddings=True,
    )
    qa_store.GLOSSARY_EMBEDDINGS = embeddings
    print(f"[INFO] Built glossary embeddings for {len(texts)} terms.")


# -----------------------------
# Load data once at import time
# -----------------------------
sync_download_manual_qa()
sync_download_cache()        # <--- Add this line!
load_curriculum()
load_manual_qa()
load_glossary()
rebuild_combined_qa()
_build_entry_embeddings()
_build_glossary_embeddings()

# -----------------------------
# System prompt (Natural Science)
# -----------------------------
SYSTEM_PROMPT = (
    "ທ່ານແມ່ນຜູ້ຊ່ວຍເຫຼືອດ້ານວິທະຍາສາດທໍາມະຊາດ "
    "ສໍາລັບນັກຮຽນຊັ້ນ ມ.1-ມ.4. "
    "ຕອບແຕ່ພາສາລາວ ໃຫ້ຕອບສັ້ນໆ 2–3 ປະໂຫຍກ ແລະເຂົ້າໃຈງ່າຍ. "
    "ໃຫ້ອີງຈາກຂໍ້ມູນອ້າງອີງຂ້າງລຸ່ມນີ້ເທົ່ານັ້ນ. "
    "ຖ້າຂໍ້ມູນບໍ່ພຽງພໍ ຫຼືບໍ່ຊັດເຈນ ໃຫ້ບອກວ່າບໍ່ແນ່ໃຈ."
)


# -----------------------------
# Helper: history formatting
# -----------------------------
def _format_history(history: Optional[List]) -> str:
    """
    Convert last few chat turns into a Lao conversation snippet
    to give the model context for follow-up questions.
    Gradio history format: [[user_msg, bot_msg], [user_msg, bot_msg], ...]
    """
    if not history:
        return ""

    # keep only the last 3 turns to avoid very long prompts
    recent = history[-3:]

    lines: List[str] = []
    for turn in recent:
        if not isinstance(turn, (list, tuple)) or len(turn) != 2:
            continue
        user_msg, bot_msg = turn
        lines.append(f"ນັກຮຽນ: {user_msg}")
        lines.append(f"ອາຈານ AI: {bot_msg}")

    if not lines:
        return ""

    joined = "\n".join(lines) + "\n\n"
    return joined


# -----------------------------
# RAG: retrieve textbook context
# -----------------------------
def retrieve_context(question: str, max_entries: int = MAX_CONTEXT_ENTRIES) -> str:
    """
    Embedding-based retrieval over textbook entries.
    Falls back to concatenated raw knowledge if embeddings are missing.
    """
    if not getattr(qa_store, "ENTRIES", None):
        # Fallback: raw knowledge (if available) or empty string
        return getattr(qa_store, "RAW_KNOWLEDGE", "")

    if qa_store.TEXT_EMBEDDINGS is None:
        top_entries = qa_store.ENTRIES[:max_entries]
    else:
        # 1) Encode the question
        q_vec = embed_model.encode(
            question,
            convert_to_tensor=True,
            show_progress_bar=False,
        )

        # 2) Cosine similarity with all entry embeddings
        sims = cos_sim(q_vec, qa_store.TEXT_EMBEDDINGS)[0]  # shape [N]

        # 3) Take top-k
        top_indices = torch.topk(sims, k=min(max_entries, sims.shape[0])).indices
        top_entries = [qa_store.ENTRIES[i] for i in top_indices.tolist()]

    # Build context string for the prompt
    context_blocks: List[str] = []
    for e in top_entries:
        header = (
            f"[ຊັ້ນ {e.get('grade','')}, "
            f"ໜ່ວຍ {e.get('unit','')}, "
            f"ບົດ {e.get('chapter_title','')}, "
            f"ຫົວຂໍ້ {e.get('section_title','')}]"
        )
        context_blocks.append(f"{header}\n{e.get('text','')}")

    return "\n\n".join(context_blocks)


# -----------------------------
# Glossary-based answering
# -----------------------------

def normalize_lao_text(text: str) -> str:
    """
    Clean Lao text for accurate matching.
    Removes punctuation and extra spaces.
    """
    if not text: 
        return ""
    
    # 1. Lowercase
    text = text.lower().strip()
    
    # 2. Remove punctuation (Using the safe single-quote format)
    text = re.sub(r'[?.!,;։:\'\""“”‘’]', "", text)
    
    # 3. Collapse multiple spaces into one (THIS WAS MISSING)
    text = re.sub(r"\s+", " ", text)
    
    return text.strip()

def answer_from_glossary(message: str) -> Optional[str]:
    """
    Try to answer using the glossary index.
    Tier 1: Exact/Substring match (Sorted by Length to fix overlap bugs).
    Tier 2: Vector embedding match (Fallback).
    """
    if not getattr(qa_store, "GLOSSARY", None):
        return None

    norm_msg = normalize_lao_text(message)
    
    # --- FIX START: Sort by Length + Exact Match ---
    
    # 1. Sort glossary terms by length (Longest first) 
    # This ensures we match "ນັກວິທະຍາສາດ" (14 chars) BEFORE "ວິທະຍາສາດ" (11 chars)
    sorted_glossary = sorted(
        qa_store.GLOSSARY, 
        key=lambda x: len(normalize_lao_text(x.get("term", ""))), 
        reverse=True
    )

    for item in sorted_glossary:
        term_raw = item.get("term", "")
        norm_term = normalize_lao_text(term_raw)
        
        if not norm_term:
            continue

        # Condition A: EXACT Match (Perfect precision) 
        # Example: User types "ນັກວິທະຍາສາດ"
        is_exact = (norm_msg == norm_term)
        
        # Condition B: Substring Match (High precision for questions)
        # Example: User types "ນັກວິທະຍາສາດ ແມ່ນຫຍັງ"
        # We enforce a length check so "Science" doesn't match a huge paragraph about Pollution.
        is_substring = (norm_term in norm_msg) and (len(norm_msg) < len(norm_term) + 20)

        if is_exact or is_substring:
            definition = item.get("definition", "").strip()
            example = item.get("example", "").strip()
            
            # Return the result immediately once the longest match is found
            if example:
                return f"{definition} ຕົວຢ່າງ: {example}"
            return definition
            
    # --- FIX END ---

    # If no text match, proceed to Vector Similarity (Tier 2)
    if qa_store.GLOSSARY_EMBEDDINGS is None:
        return None

    q_emb = embed_model.encode(
        [message],
        convert_to_numpy=True,
        normalize_embeddings=True,
    )[0]

    sims = np.dot(qa_store.GLOSSARY_EMBEDDINGS, q_emb)
    best_idx = int(np.argmax(sims))
    best_sim = float(sims[best_idx])

    # Threshold 0.65 to prevent weak matches
    if best_sim < 0.65: 
        return None

    item = qa_store.GLOSSARY[best_idx]
    definition = item.get("definition", "").strip()
    example = item.get("example", "").strip()

    if example:
        return f"{definition} ຕົວຢ່າງ: {example}"
    return definition


# -----------------------------
# Prompt + LLM generation
# -----------------------------
def build_prompt(question: str, history: Optional[List] = None) -> str:
    context = retrieve_context(question, max_entries=MAX_CONTEXT_ENTRIES)
    history_block = _format_history(history)

    return f"""{SYSTEM_PROMPT}

{history_block}ຂໍ້ມູນອ້າງອີງ:
{context}

ຄຳຖາມ: {question}

ຄຳຕອບດ້ວຍພາສາລາວ:"""


def generate_answer(question: str, history: Optional[List] = None) -> str:
    prompt = build_prompt(question, history)
    inputs = tokenizer(prompt, return_tensors="pt").to(device)

    with torch.no_grad():
        outputs = model.generate(
            **inputs,
            max_new_tokens=160,
            do_sample=False,
        )

    generated_ids = outputs[0][inputs["input_ids"].shape[1]:]
    answer = tokenizer.decode(generated_ids, skip_special_tokens=True).strip()

    # Enforce 2–3 sentence answers for students
    sentences = re.split(r"(?<=[\.?!…])\s+", answer)
    short_answer = " ".join(sentences[:3]).strip()
    return short_answer if short_answer else answer


# -----------------------------
# QA lookup (exact + fuzzy)
# -----------------------------
def answer_from_qa(question: str) -> Optional[str]:
    """
    1) Exact match in QA_INDEX
    2) Fuzzy match via word overlap with ALL_QA_KNOWLEDGE
    """
    norm_q = qa_store.normalize_question(question)
    if not norm_q:
        return None

    # Exact match
    if norm_q in qa_store.QA_INDEX:
        return qa_store.QA_INDEX[norm_q]

    # Fuzzy match
    q_terms = [t for t in norm_q.split(" ") if len(t) > 1]
    if not q_terms:
        return None

    best_score = 0
    best_answer: Optional[str] = None

    for item in qa_store.ALL_QA_KNOWLEDGE:
        stored_terms = [t for t in item["norm_q"].split(" ") if len(t) > 1]
        overlap = sum(1 for t in q_terms if t in stored_terms)
        if overlap > best_score:
            best_score = overlap
            best_answer = item["a"]

    # require at least 2 overlapping words to accept fuzzy match
    if best_score >= 2 and best_answer is not None:
        # optional: log when fuzzy match is used
        print(f"[FUZZY MATCH] score={best_score} -> {best_answer[:50]!r}")
        return best_answer

    return None


# -----------------------------
# Main chatbot entry
# -----------------------------
def laos_science_bot(message: str, history: List) -> str:
    """
    Main chatbot function for Student tab (Gradio ChatInterface).
    """
    if not message.strip():
        return "ກະລຸນາພິມຄໍາຖາມກ່ອນ."

    # 0) Try glossary first for key concepts
    gloss = answer_from_glossary(message)
    if gloss:
        return gloss

    # 1) Try exact / fuzzy Q&A first
    direct = answer_from_qa(message)
    if direct:
        return direct

    # 2) Fall back to LLM + retrieved context
    try:
        answer = generate_answer(message, history)
    except Exception as e:  # noqa: BLE001
        return f"ລະບົບມີບັນຫາ: {e}"

    return answer