Spaces:
Runtime error
Runtime error
| import sys, os | |
| BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) | |
| UTILS_DIR = os.path.join(BASE_DIR, "utils") | |
| if UTILS_DIR not in sys.path: | |
| sys.path.insert(0, UTILS_DIR) | |
| import streamlit as st | |
| import os, sys | |
| # ─── Ensure omniscientframework package is importable ──────────────── | |
| ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) | |
| PACKAGE_PATH = os.path.abspath(os.path.join(ROOT_PATH, "..")) | |
| if PACKAGE_PATH not in sys.path: | |
| sys.path.insert(0, PACKAGE_PATH) | |
| # ─── Import project utilities ──────────────────────────────────────── | |
| from omniscientframework.utils.backend import run_llm | |
| # ─── Page Setup ───────────────────────────────────────────────────── | |
| st.title("🧪 Example Page with Chatbot") | |
| st.write("This demo chatbot also ingests Omnieye + Omnilog outputs.") | |
| # ─── Initialize Session State ─────────────────────────────────────── | |
| if "example_chat" not in st.session_state: | |
| st.session_state.example_chat = [] | |
| # ─── Collect context from Omnieye + Omnilog ───────────────────────── | |
| system_context = [] | |
| if "omnieye_output" in st.session_state: | |
| preview = st.session_state.omnieye_output.get("file_preview", "") | |
| matches = st.session_state.omnieye_output.get("matches", []) | |
| if preview: | |
| system_context.append(f"Omnieye preview:\n{preview}") | |
| if matches: | |
| system_context.append("Keyword matches:\n" + "\n".join(matches)) | |
| if "omnilog_output" in st.session_state: | |
| normalized = st.session_state.omnilog_output.get("normalized_preview", "") | |
| matches = st.session_state.omnilog_output.get("matches", []) | |
| if normalized: | |
| system_context.append(f"Omnilog preview:\n{normalized}") | |
| if matches: | |
| system_context.append("Log matches:\n" + "\n".join(matches)) | |
| # ─── Display Chat History ─────────────────────────────────────────── | |
| for msg in st.session_state.example_chat: | |
| with st.chat_message(msg["role"]): | |
| st.markdown(msg["content"]) | |
| # ─── Chat Input ───────────────────────────────────────────────────── | |
| if prompt := st.chat_input("Ask the Example Chatbot about files or logs..."): | |
| st.session_state.example_chat.append({"role": "user", "content": prompt}) | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| # Build context-aware input | |
| ai_input = "\n\n".join(system_context + [prompt]) | |
| # Generate AI response | |
| try: | |
| ai_reply = run_llm(ai_input) | |
| except Exception as e: | |
| ai_reply = f"⚠️ Error running LLM: {e}" | |
| with st.chat_message("assistant"): | |
| st.markdown(ai_reply) | |
| st.session_state.example_chat.append({"role": "assistant", "content": ai_reply}) | |