Spaces:
Running
Running
Implement AI Detective functionality with spectator mode and voice integration
Browse files- TODO.md +3 -3
- app.py +72 -14
- game/ai_detective.py +103 -0
- game/game_engine.py +55 -0
- game/llm_manager.py +12 -0
- prompts/detective_player.txt +58 -0
- ui/static/js/game_logic.js +90 -1
- ui/templates/game_interface.html +16 -0
TODO.md
CHANGED
|
@@ -8,13 +8,13 @@
|
|
| 8 |
- pre-generate suspects cartoony potraits
|
| 9 |
- bottom gradio box to see internal MCP tools calls out/in
|
| 10 |
- eleven labs tts/voice
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
# Doing
|
| 14 |
- add ai
|
| 15 |
- mode selection
|
| 16 |
- numbered steps to launch game
|
| 17 |
|
|
|
|
|
|
|
|
|
|
| 18 |
- sound/music
|
| 19 |
- video embeded
|
| 20 |
- divider
|
|
|
|
| 8 |
- pre-generate suspects cartoony potraits
|
| 9 |
- bottom gradio box to see internal MCP tools calls out/in
|
| 10 |
- eleven labs tts/voice
|
|
|
|
|
|
|
|
|
|
| 11 |
- add ai
|
| 12 |
- mode selection
|
| 13 |
- numbered steps to launch game
|
| 14 |
|
| 15 |
+
|
| 16 |
+
# Doing
|
| 17 |
+
|
| 18 |
- sound/music
|
| 19 |
- video embeded
|
| 20 |
- divider
|
app.py
CHANGED
|
@@ -57,9 +57,13 @@ class GameSession:
|
|
| 57 |
def __init__(self):
|
| 58 |
self.session_id = None
|
| 59 |
self.game = None
|
|
|
|
|
|
|
| 60 |
|
| 61 |
-
def start(self, difficulty="medium"):
|
| 62 |
self.session_id, self.game = game_engine.start_game(difficulty)
|
|
|
|
|
|
|
| 63 |
return self._get_init_data()
|
| 64 |
|
| 65 |
def _get_init_data(self):
|
|
@@ -81,7 +85,8 @@ class GameSession:
|
|
| 81 |
"points": self.game.points,
|
| 82 |
"available_cameras": cameras,
|
| 83 |
"dna_map": dna_map,
|
| 84 |
-
"unlocked_evidence": self.game.unlocked_evidence
|
|
|
|
| 85 |
}
|
| 86 |
}
|
| 87 |
|
|
@@ -106,6 +111,46 @@ class GameSession:
|
|
| 106 |
if not self.game:
|
| 107 |
return None
|
| 108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
if action == "select_suspect":
|
| 110 |
return None
|
| 111 |
|
|
@@ -133,7 +178,7 @@ class GameSession:
|
|
| 133 |
response = self.game.question_suspect(suspect_id, message)
|
| 134 |
|
| 135 |
suspect = next((s for s in self.game.scenario["suspects"] if s["id"] == suspect_id), None)
|
| 136 |
-
suspect_name =
|
| 137 |
|
| 138 |
# Clean text for ElevenLabs
|
| 139 |
cleaned_response = response
|
|
@@ -151,7 +196,8 @@ class GameSession:
|
|
| 151 |
|
| 152 |
# Generate Audio
|
| 153 |
audio_b64 = None
|
| 154 |
-
|
|
|
|
| 155 |
audio_bytes = self.game.voice_manager.generate_audio(cleaned_response, suspect["voice_id"])
|
| 156 |
if audio_bytes:
|
| 157 |
audio_b64 = "data:audio/mpeg;base64," + base64.b64encode(audio_bytes).decode('utf-8')
|
|
@@ -161,7 +207,7 @@ class GameSession:
|
|
| 161 |
"data": {
|
| 162 |
"role": "suspect",
|
| 163 |
"name": suspect_name,
|
| 164 |
-
"content": response,
|
| 165 |
"audio": audio_b64
|
| 166 |
}
|
| 167 |
}
|
|
@@ -379,12 +425,14 @@ def get_game_iframe():
|
|
| 379 |
"""
|
| 380 |
return iframe
|
| 381 |
|
| 382 |
-
def start_game_from_ui(case_name):
|
| 383 |
difficulty = "medium"
|
| 384 |
if "Coffee" in case_name: difficulty = "easy"
|
| 385 |
if "Gallery" in case_name: difficulty = "hard"
|
| 386 |
|
| 387 |
-
|
|
|
|
|
|
|
| 388 |
|
| 389 |
# Return visible updates
|
| 390 |
return (
|
|
@@ -407,16 +455,25 @@ footer { display: none !important; }
|
|
| 407 |
with gr.Blocks(title="Murder.Ai", fill_height=True) as demo:
|
| 408 |
gr.HTML(f"<style>{css}</style>")
|
| 409 |
|
| 410 |
-
#
|
| 411 |
-
with gr.Row(elem_id="
|
| 412 |
with gr.Column():
|
| 413 |
-
gr.Markdown("# π΅οΈ MURDER.AI
|
|
|
|
|
|
|
| 414 |
case_dropdown = gr.Dropdown(
|
| 415 |
choices=["The Silicon Valley Incident (Medium)", "The Coffee Shop Murder (Easy)", "The Gallery Heist (Hard)"],
|
| 416 |
value="The Silicon Valley Incident (Medium)",
|
| 417 |
-
|
| 418 |
)
|
| 419 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 420 |
|
| 421 |
# Game Frame (Hidden Initially)
|
| 422 |
with gr.Group(visible=False, elem_id="game-frame-container") as game_group:
|
|
@@ -424,6 +481,7 @@ with gr.Blocks(title="Murder.Ai", fill_height=True) as demo:
|
|
| 424 |
|
| 425 |
bridge_input = gr.Textbox(elem_id="bridge-input", visible=True)
|
| 426 |
bridge_output = gr.Textbox(elem_id="bridge-output", visible=True)
|
|
|
|
| 427 |
|
| 428 |
# Log Box
|
| 429 |
with gr.Accordion("System Logs (MCP Traffic)", open=False):
|
|
@@ -450,8 +508,8 @@ with gr.Blocks(title="Murder.Ai", fill_height=True) as demo:
|
|
| 450 |
# Start Game Event
|
| 451 |
start_btn.click(
|
| 452 |
fn=start_game_from_ui,
|
| 453 |
-
inputs=[case_dropdown],
|
| 454 |
-
outputs=[
|
| 455 |
)
|
| 456 |
|
| 457 |
# Bridge Logic with Logging (Legacy/Fallback)
|
|
|
|
| 57 |
def __init__(self):
|
| 58 |
self.session_id = None
|
| 59 |
self.game = None
|
| 60 |
+
self.voice_enabled = False
|
| 61 |
+
self.game_mode = "interactive"
|
| 62 |
|
| 63 |
+
def start(self, difficulty="medium", mode="interactive", voice=True):
|
| 64 |
self.session_id, self.game = game_engine.start_game(difficulty)
|
| 65 |
+
self.voice_enabled = voice
|
| 66 |
+
self.game_mode = mode
|
| 67 |
return self._get_init_data()
|
| 68 |
|
| 69 |
def _get_init_data(self):
|
|
|
|
| 85 |
"points": self.game.points,
|
| 86 |
"available_cameras": cameras,
|
| 87 |
"dna_map": dna_map,
|
| 88 |
+
"unlocked_evidence": self.game.unlocked_evidence,
|
| 89 |
+
"mode": self.game_mode
|
| 90 |
}
|
| 91 |
}
|
| 92 |
|
|
|
|
| 111 |
if not self.game:
|
| 112 |
return None
|
| 113 |
|
| 114 |
+
if action == "ai_step":
|
| 115 |
+
step_data = self.game.run_ai_step()
|
| 116 |
+
|
| 117 |
+
# Format result for frontend
|
| 118 |
+
# We need to map the AI's internal result to frontend actions
|
| 119 |
+
actions = []
|
| 120 |
+
|
| 121 |
+
# 1. Thought Bubble
|
| 122 |
+
actions.append({
|
| 123 |
+
"action": "ai_thought",
|
| 124 |
+
"data": {"thought": step_data["thought"]}
|
| 125 |
+
})
|
| 126 |
+
|
| 127 |
+
res = step_data["result"]
|
| 128 |
+
res_type = step_data["action"] # use_tool, chat, accuse
|
| 129 |
+
|
| 130 |
+
if res_type == "use_tool":
|
| 131 |
+
# We need to construct the same evidence payload as handle_input
|
| 132 |
+
tool_name = step_data["result"].get("location") # Wait, result of use_tool is the raw dict
|
| 133 |
+
# I need the tool name from decision
|
| 134 |
+
# Re-running logic here is messy.
|
| 135 |
+
# Better: run_ai_step should return enough info.
|
| 136 |
+
# It returns "result" which is the output of use_tool.
|
| 137 |
+
# But we don't know which tool it was easily unless we parse "action" or store it.
|
| 138 |
+
|
| 139 |
+
# In run_ai_step I did:
|
| 140 |
+
# tool_name = decision.get("tool_name")
|
| 141 |
+
# kwargs = decision.get("args", {})
|
| 142 |
+
|
| 143 |
+
# Let's update run_ai_step to include tool_name in result wrapper?
|
| 144 |
+
# No, let's just infer or update run_ai_step.
|
| 145 |
+
# Actually, the loop in JS will call this.
|
| 146 |
+
# I'll handle formatting here.
|
| 147 |
+
pass
|
| 148 |
+
|
| 149 |
+
return {
|
| 150 |
+
"action": "ai_step_result",
|
| 151 |
+
"data": step_data
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
if action == "select_suspect":
|
| 155 |
return None
|
| 156 |
|
|
|
|
| 178 |
response = self.game.question_suspect(suspect_id, message)
|
| 179 |
|
| 180 |
suspect = next((s for s in self.game.scenario["suspects"] if s["id"] == suspect_id), None)
|
| 181 |
+
suspect_name = next((s["name"] for s in self.game.scenario["suspects"] if s["id"] == suspect_id), "Suspect")
|
| 182 |
|
| 183 |
# Clean text for ElevenLabs
|
| 184 |
cleaned_response = response
|
|
|
|
| 196 |
|
| 197 |
# Generate Audio
|
| 198 |
audio_b64 = None
|
| 199 |
+
# Check Voice Enabled
|
| 200 |
+
if self.voice_enabled and suspect and "voice_id" in suspect and cleaned_response:
|
| 201 |
audio_bytes = self.game.voice_manager.generate_audio(cleaned_response, suspect["voice_id"])
|
| 202 |
if audio_bytes:
|
| 203 |
audio_b64 = "data:audio/mpeg;base64," + base64.b64encode(audio_bytes).decode('utf-8')
|
|
|
|
| 207 |
"data": {
|
| 208 |
"role": "suspect",
|
| 209 |
"name": suspect_name,
|
| 210 |
+
"content": response,
|
| 211 |
"audio": audio_b64
|
| 212 |
}
|
| 213 |
}
|
|
|
|
| 425 |
"""
|
| 426 |
return iframe
|
| 427 |
|
| 428 |
+
def start_game_from_ui(case_name, mode, voice):
|
| 429 |
difficulty = "medium"
|
| 430 |
if "Coffee" in case_name: difficulty = "easy"
|
| 431 |
if "Gallery" in case_name: difficulty = "hard"
|
| 432 |
|
| 433 |
+
mode_slug = "spectator" if "Spectator" in mode else "interactive"
|
| 434 |
+
|
| 435 |
+
init_data = session.start(difficulty, mode_slug, voice)
|
| 436 |
|
| 437 |
# Return visible updates
|
| 438 |
return (
|
|
|
|
| 455 |
with gr.Blocks(title="Murder.Ai", fill_height=True) as demo:
|
| 456 |
gr.HTML(f"<style>{css}</style>")
|
| 457 |
|
| 458 |
+
# --- Initial Setup Screen ---
|
| 459 |
+
with gr.Row(elem_id="setup-container", visible=True) as setup_col:
|
| 460 |
with gr.Column():
|
| 461 |
+
gr.Markdown("# π΅οΈ MURDER.AI")
|
| 462 |
+
|
| 463 |
+
gr.Markdown("### 1. Select Case File")
|
| 464 |
case_dropdown = gr.Dropdown(
|
| 465 |
choices=["The Silicon Valley Incident (Medium)", "The Coffee Shop Murder (Easy)", "The Gallery Heist (Hard)"],
|
| 466 |
value="The Silicon Valley Incident (Medium)",
|
| 467 |
+
show_label=False
|
| 468 |
)
|
| 469 |
+
|
| 470 |
+
gr.Markdown("### 2. Game Configuration")
|
| 471 |
+
with gr.Row():
|
| 472 |
+
game_mode = gr.Radio(["Interactive", "AI Spectator (Beta)"], value="Interactive", label="Game Mode")
|
| 473 |
+
voice_toggle = gr.Checkbox(value=True, label="Enable Voice (ElevenLabs)")
|
| 474 |
+
|
| 475 |
+
gr.Markdown("### 3. Investigation")
|
| 476 |
+
start_btn = gr.Button("π OPEN CASE FILE", variant="primary", size="lg")
|
| 477 |
|
| 478 |
# Game Frame (Hidden Initially)
|
| 479 |
with gr.Group(visible=False, elem_id="game-frame-container") as game_group:
|
|
|
|
| 481 |
|
| 482 |
bridge_input = gr.Textbox(elem_id="bridge-input", visible=True)
|
| 483 |
bridge_output = gr.Textbox(elem_id="bridge-output", visible=True)
|
| 484 |
+
log_input = gr.Textbox(elem_id="log-input", visible=True) # Input from JS for logs
|
| 485 |
|
| 486 |
# Log Box
|
| 487 |
with gr.Accordion("System Logs (MCP Traffic)", open=False):
|
|
|
|
| 508 |
# Start Game Event
|
| 509 |
start_btn.click(
|
| 510 |
fn=start_game_from_ui,
|
| 511 |
+
inputs=[case_dropdown, game_mode, voice_toggle],
|
| 512 |
+
outputs=[setup_col, game_group, bridge_output]
|
| 513 |
)
|
| 514 |
|
| 515 |
# Bridge Logic with Logging (Legacy/Fallback)
|
game/ai_detective.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import re
|
| 3 |
+
from .llm_manager import LLMManager
|
| 4 |
+
|
| 5 |
+
class AIDetective:
|
| 6 |
+
def __init__(self, game_instance):
|
| 7 |
+
self.game = game_instance
|
| 8 |
+
self.llm = LLMManager()
|
| 9 |
+
# We reuse the LLMManager but we need to register the new prompt role dynamically
|
| 10 |
+
# or just load it manually.
|
| 11 |
+
self.prompt_template = self._load_prompt()
|
| 12 |
+
self.history = []
|
| 13 |
+
self.memory = [] # Store structured past actions
|
| 14 |
+
|
| 15 |
+
def _load_prompt(self):
|
| 16 |
+
try:
|
| 17 |
+
with open("prompts/detective_player.txt", "r") as f:
|
| 18 |
+
return f.read()
|
| 19 |
+
except:
|
| 20 |
+
return "Error loading prompt."
|
| 21 |
+
|
| 22 |
+
def record_result(self, action_type, result):
|
| 23 |
+
"""Records the outcome of an action to memory."""
|
| 24 |
+
entry = f"Action: {action_type}\nResult: {json.dumps(result)}"
|
| 25 |
+
self.memory.append(entry)
|
| 26 |
+
# Keep prompt focused (last 5 turns)
|
| 27 |
+
if len(self.memory) > 5:
|
| 28 |
+
self.memory.pop(0)
|
| 29 |
+
|
| 30 |
+
def decide_next_move(self):
|
| 31 |
+
"""
|
| 32 |
+
Analyzes game state and returns a JSON action.
|
| 33 |
+
"""
|
| 34 |
+
# 1. Construct Context
|
| 35 |
+
evidence_list = [f"{e.get('title', e.get('info', 'Evidence'))}: {e.get('html_content', e.get('description', str(e)))}" for e in self.game.evidence_revealed]
|
| 36 |
+
evidence_summary = "; ".join(evidence_list) if evidence_list else "None"
|
| 37 |
+
|
| 38 |
+
suspect_status = []
|
| 39 |
+
suspect_phones = []
|
| 40 |
+
for s in self.game.scenario["suspects"]:
|
| 41 |
+
status = "Active"
|
| 42 |
+
if s["id"] in self.game.eliminated_suspects:
|
| 43 |
+
status = "Eliminated"
|
| 44 |
+
suspect_status.append(f"{s['name']} ({s['id']}): {status}")
|
| 45 |
+
suspect_phones.append(f"{s['name']}: {s.get('phone_number', 'Unknown')}")
|
| 46 |
+
|
| 47 |
+
history_str = "\n---\n".join(self.memory) if self.memory else "No previous actions."
|
| 48 |
+
|
| 49 |
+
# Tool Options
|
| 50 |
+
cameras = list(self.game.scenario["evidence"]["footage_data"].keys())
|
| 51 |
+
unlocked_items = self.game.unlocked_evidence if self.game.unlocked_evidence else ["None"]
|
| 52 |
+
|
| 53 |
+
prompt = self.prompt_template.format(
|
| 54 |
+
victim_name=self.game.scenario["victim"]["name"],
|
| 55 |
+
time_of_death=self.game.scenario["victim"]["time_of_death"],
|
| 56 |
+
location=self.game.scenario["victim"].get("location", "Unknown"),
|
| 57 |
+
round=self.game.round,
|
| 58 |
+
points=self.game.points,
|
| 59 |
+
evidence_summary=evidence_summary,
|
| 60 |
+
suspect_status="\n".join(suspect_status),
|
| 61 |
+
history=history_str,
|
| 62 |
+
# Tool Hints
|
| 63 |
+
cameras=", ".join(cameras),
|
| 64 |
+
suspect_phones="\n".join(suspect_phones),
|
| 65 |
+
unlocked_items=", ".join(unlocked_items)
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
# 2. Call LLM
|
| 69 |
+
# We treat this as a one-shot or maintain simple history
|
| 70 |
+
# Using a unique agent ID for the detective player
|
| 71 |
+
agent_id = "ai_detective_player"
|
| 72 |
+
|
| 73 |
+
# We need to inject the prompt into the agent creation if it doesn't exist
|
| 74 |
+
if agent_id not in self.llm.agents:
|
| 75 |
+
# We cheat a bit and use 'detective' role but override instructions in the message
|
| 76 |
+
# because LLMManager expects a fixed role.
|
| 77 |
+
# Actually, let's just pass the full prompt as the "user input" to a generic agent
|
| 78 |
+
# or create a custom agent.
|
| 79 |
+
pass
|
| 80 |
+
|
| 81 |
+
# Direct generation using the underlying model logic would be cleaner,
|
| 82 |
+
# but we'll use the existing abstraction.
|
| 83 |
+
# We will send the ENTIRE prompt as the message.
|
| 84 |
+
response_text = self.llm.get_response_raw(prompt)
|
| 85 |
+
|
| 86 |
+
# 3. Parse JSON
|
| 87 |
+
try:
|
| 88 |
+
# Extract JSON from code blocks if present
|
| 89 |
+
json_match = re.search(r"\{.*\}", response_text, re.DOTALL)
|
| 90 |
+
if json_match:
|
| 91 |
+
response_text = json_match.group(0)
|
| 92 |
+
|
| 93 |
+
action = json.loads(response_text)
|
| 94 |
+
return action
|
| 95 |
+
except Exception as e:
|
| 96 |
+
print(f"AI Detective Error: {e} | Response: {response_text}")
|
| 97 |
+
# Fallback action
|
| 98 |
+
return {
|
| 99 |
+
"thought": "I am confused. I will check the footage.",
|
| 100 |
+
"action": "use_tool",
|
| 101 |
+
"tool_name": "get_footage",
|
| 102 |
+
"args": {"location": "10th_floor_camera"}
|
| 103 |
+
}
|
game/game_engine.py
CHANGED
|
@@ -2,6 +2,7 @@ import uuid
|
|
| 2 |
from .scenario_generator import generate_crime_scenario
|
| 3 |
from .llm_manager import LLMManager
|
| 4 |
from .voice_manager import VoiceManager
|
|
|
|
| 5 |
from mcp import tools
|
| 6 |
|
| 7 |
class GameInstance:
|
|
@@ -10,6 +11,8 @@ class GameInstance:
|
|
| 10 |
self.scenario = generate_crime_scenario(difficulty)
|
| 11 |
self.llm_manager = LLMManager()
|
| 12 |
self.voice_manager = VoiceManager()
|
|
|
|
|
|
|
| 13 |
self.round = 1
|
| 14 |
self.max_rounds = 3 # 3 Chances
|
| 15 |
self.points = 10
|
|
@@ -22,7 +25,54 @@ class GameInstance:
|
|
| 22 |
|
| 23 |
# Initialize Agents
|
| 24 |
self._init_agents()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
def _init_agents(self):
|
| 27 |
# 1. Detective
|
| 28 |
detective_context = {
|
|
@@ -119,6 +169,11 @@ class GameInstance:
|
|
| 119 |
return result # Don't deduct points for errors
|
| 120 |
|
| 121 |
self.points -= cost
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
self.evidence_revealed.append(result)
|
| 123 |
self.log_event("System", f"Used {tool_name}. Cost: {cost} pts. Result: {str(result)}")
|
| 124 |
return result
|
|
|
|
| 2 |
from .scenario_generator import generate_crime_scenario
|
| 3 |
from .llm_manager import LLMManager
|
| 4 |
from .voice_manager import VoiceManager
|
| 5 |
+
from .ai_detective import AIDetective
|
| 6 |
from mcp import tools
|
| 7 |
|
| 8 |
class GameInstance:
|
|
|
|
| 11 |
self.scenario = generate_crime_scenario(difficulty)
|
| 12 |
self.llm_manager = LLMManager()
|
| 13 |
self.voice_manager = VoiceManager()
|
| 14 |
+
self.ai_detective = None # Initialized later to avoid circular dep issues if any, or just now.
|
| 15 |
+
|
| 16 |
self.round = 1
|
| 17 |
self.max_rounds = 3 # 3 Chances
|
| 18 |
self.points = 10
|
|
|
|
| 25 |
|
| 26 |
# Initialize Agents
|
| 27 |
self._init_agents()
|
| 28 |
+
self.ai_detective = AIDetective(self)
|
| 29 |
+
|
| 30 |
+
def run_ai_step(self):
|
| 31 |
+
"""Executes one turn for the AI Detective."""
|
| 32 |
+
if self.game_over:
|
| 33 |
+
return {"thought": "Game Over.", "action": "none"}
|
| 34 |
+
|
| 35 |
+
decision = self.ai_detective.decide_next_move()
|
| 36 |
+
action_type = decision.get("action")
|
| 37 |
+
thought = decision.get("thought", "Thinking...")
|
| 38 |
+
result = {}
|
| 39 |
|
| 40 |
+
if action_type == "use_tool":
|
| 41 |
+
tool_name = decision.get("tool_name")
|
| 42 |
+
kwargs = decision.get("args", {})
|
| 43 |
+
result = self.use_tool(tool_name, **kwargs)
|
| 44 |
+
result["type"] = "evidence"
|
| 45 |
+
|
| 46 |
+
elif action_type == "chat":
|
| 47 |
+
suspect_id = decision.get("suspect_id")
|
| 48 |
+
msg = decision.get("message")
|
| 49 |
+
response = self.question_suspect(suspect_id, msg)
|
| 50 |
+
result = {
|
| 51 |
+
"type": "chat",
|
| 52 |
+
"suspect_id": suspect_id,
|
| 53 |
+
"question": msg,
|
| 54 |
+
"response": response
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
elif action_type == "accuse":
|
| 58 |
+
suspect_id = decision.get("suspect_id")
|
| 59 |
+
# Map suspect_id if it's just "suspect_1" (which it is)
|
| 60 |
+
# But make_accusation handles ID.
|
| 61 |
+
outcome = self.make_accusation(suspect_id)
|
| 62 |
+
result = {
|
| 63 |
+
"type": "game_over",
|
| 64 |
+
"outcome": outcome
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
# Record result for AI memory
|
| 68 |
+
self.ai_detective.record_result(action_type, result)
|
| 69 |
+
|
| 70 |
+
return {
|
| 71 |
+
"thought": thought,
|
| 72 |
+
"action": action_type,
|
| 73 |
+
"result": result
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
def _init_agents(self):
|
| 77 |
# 1. Detective
|
| 78 |
detective_context = {
|
|
|
|
| 169 |
return result # Don't deduct points for errors
|
| 170 |
|
| 171 |
self.points -= cost
|
| 172 |
+
|
| 173 |
+
# Inject input context for AI memory
|
| 174 |
+
if isinstance(result, dict):
|
| 175 |
+
result["_input_args"] = kwargs
|
| 176 |
+
|
| 177 |
self.evidence_revealed.append(result)
|
| 178 |
self.log_event("System", f"Used {tool_name}. Cost: {cost} pts. Result: {str(result)}")
|
| 179 |
return result
|
game/llm_manager.py
CHANGED
|
@@ -93,3 +93,15 @@ class LLMManager:
|
|
| 93 |
if agent:
|
| 94 |
return agent.generate_response(user_input)
|
| 95 |
return "Error: Agent not found."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
if agent:
|
| 94 |
return agent.generate_response(user_input)
|
| 95 |
return "Error: Agent not found."
|
| 96 |
+
|
| 97 |
+
def get_response_raw(self, prompt):
|
| 98 |
+
"""Stateless generation for AI Detective logic."""
|
| 99 |
+
if not API_KEY:
|
| 100 |
+
return '{"thought": "Mock thought", "action": "chat", "suspect_id": "suspect_1", "message": "Hello"}'
|
| 101 |
+
|
| 102 |
+
try:
|
| 103 |
+
model = genai.GenerativeModel('gemini-2.5-flash')
|
| 104 |
+
response = model.generate_content(prompt)
|
| 105 |
+
return response.text
|
| 106 |
+
except Exception as e:
|
| 107 |
+
return f"Error: {str(e)}"
|
prompts/detective_player.txt
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
You are the Lead AI Detective in a murder mystery game.
|
| 2 |
+
Your goal is to solve the case by finding the murderer among the 4 suspects.
|
| 3 |
+
|
| 4 |
+
CASE DETAILS:
|
| 5 |
+
Victim: {victim_name}
|
| 6 |
+
Time of Death: {time_of_death}
|
| 7 |
+
Location: {location}
|
| 8 |
+
|
| 9 |
+
CURRENT STATE:
|
| 10 |
+
Round: {round}/3
|
| 11 |
+
Points: {points}
|
| 12 |
+
Evidence Found: {evidence_summary}
|
| 13 |
+
Suspects Status: {suspect_status}
|
| 14 |
+
|
| 15 |
+
MY PREVIOUS ACTIONS:
|
| 16 |
+
{history}
|
| 17 |
+
|
| 18 |
+
AVAILABLE TOOLS (Costs Points):
|
| 19 |
+
1. get_location(phone_number) [2 pts]: Check where a suspect was at specific times.
|
| 20 |
+
- Valid Phones:
|
| 21 |
+
{suspect_phones}
|
| 22 |
+
|
| 23 |
+
2. call_alibi(alibi_id, question) [1 pt]: Call a suspect's alibi witness.
|
| 24 |
+
- Ask the suspect for their 'Alibi ID' first!
|
| 25 |
+
|
| 26 |
+
3. get_footage(camera_name) [3 pts]: Watch security footage. Unlocks physical evidence.
|
| 27 |
+
- Valid Cameras: {cameras}
|
| 28 |
+
|
| 29 |
+
4. get_dna_test(evidence_id) [4 pts]: Test DNA on unlocked items.
|
| 30 |
+
- Unlocked Items: {unlocked_items}
|
| 31 |
+
|
| 32 |
+
5. interrogate(suspect_id, question) [0 pts]: Chat with a suspect.
|
| 33 |
+
6. accuse(suspect_id) [0 pts]: Accuse a suspect. WARNING: Wrong accusation advances round. 3 strikes = LOSS.
|
| 34 |
+
|
| 35 |
+
STRATEGY:
|
| 36 |
+
- Start by gathering general evidence (Footage) or questioning suspects to get their stories.
|
| 37 |
+
- Cross-reference stories with Location data.
|
| 38 |
+
- Use DNA to confirm presence at the crime scene.
|
| 39 |
+
- If a suspect lies about their location or alibi, press them.
|
| 40 |
+
- Only ACCUSE if you are 80% sure or running out of points.
|
| 41 |
+
|
| 42 |
+
INSTRUCTIONS:
|
| 43 |
+
Analyze the current state and decide your next move.
|
| 44 |
+
Provide a "Thought" (your internal reasoning) and an "Action" (the tool/chat to execute).
|
| 45 |
+
|
| 46 |
+
OUTPUT FORMAT (JSON ONLY):
|
| 47 |
+
{{
|
| 48 |
+
"thought": "I need to verify Suspect 1's alibi...",
|
| 49 |
+
"action": "use_tool" OR "chat" OR "accuse",
|
| 50 |
+
"tool_name": "call_alibi" (if action is use_tool),
|
| 51 |
+
"args": {{
|
| 52 |
+
"alibi_id": "...",
|
| 53 |
+
"question": "..."
|
| 54 |
+
}}
|
| 55 |
+
OR
|
| 56 |
+
"suspect_id": "suspect_1" (if action is chat or accuse),
|
| 57 |
+
"message": "Where were you..." (if action is chat)
|
| 58 |
+
}}
|
ui/static/js/game_logic.js
CHANGED
|
@@ -146,6 +146,91 @@ function initializeGame(data) {
|
|
| 146 |
gameState.unlockedEvidence = data.unlocked_evidence || [];
|
| 147 |
|
| 148 |
renderCaseFile(data.scenario);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
}
|
| 150 |
|
| 151 |
function renderCaseFile(scenario) {
|
|
@@ -741,7 +826,11 @@ function showModalResult(data) {
|
|
| 741 |
resultDiv.style.marginTop = '20px';
|
| 742 |
resultDiv.style.borderTop = '2px dashed var(--ink-color)';
|
| 743 |
resultDiv.style.paddingTop = '10px';
|
| 744 |
-
document.querySelector('
|
|
|
|
|
|
|
|
|
|
|
|
|
| 745 |
}
|
| 746 |
|
| 747 |
resultDiv.style.display = 'block';
|
|
|
|
| 146 |
gameState.unlockedEvidence = data.unlocked_evidence || [];
|
| 147 |
|
| 148 |
renderCaseFile(data.scenario);
|
| 149 |
+
|
| 150 |
+
// Mode Check
|
| 151 |
+
if (data.mode === 'spectator') {
|
| 152 |
+
document.getElementById('spectator-modal').classList.add('active');
|
| 153 |
+
document.getElementById('spectator-start-btn').onclick = startSpectatorMode;
|
| 154 |
+
|
| 155 |
+
// Disable user controls
|
| 156 |
+
document.getElementById('chat-input-area').style.display = 'none';
|
| 157 |
+
document.getElementById('tools-panel').style.pointerEvents = 'none';
|
| 158 |
+
document.getElementById('tools-panel').style.opacity = '0.5';
|
| 159 |
+
}
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
// --- AI Spectator Logic ---
|
| 163 |
+
|
| 164 |
+
function startSpectatorMode() {
|
| 165 |
+
document.getElementById('spectator-modal').classList.remove('active');
|
| 166 |
+
document.getElementById('ai-log-panel').style.display = 'block';
|
| 167 |
+
runAIStep();
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
async function runAIStep() {
|
| 171 |
+
const logContent = document.getElementById('ai-log-content');
|
| 172 |
+
|
| 173 |
+
// Visual "Thinking" state
|
| 174 |
+
const thinkingDiv = document.createElement('div');
|
| 175 |
+
thinkingDiv.style.color = '#888';
|
| 176 |
+
thinkingDiv.style.fontStyle = 'italic';
|
| 177 |
+
thinkingDiv.innerText = "Analyzing evidence...";
|
| 178 |
+
thinkingDiv.id = 'temp-thinking';
|
| 179 |
+
logContent.appendChild(thinkingDiv);
|
| 180 |
+
logContent.scrollTop = logContent.scrollHeight;
|
| 181 |
+
|
| 182 |
+
// 1. Request Move
|
| 183 |
+
const response = await sendAction('ai_step', {});
|
| 184 |
+
|
| 185 |
+
// Remove temp thinking
|
| 186 |
+
const temp = document.getElementById('temp-thinking');
|
| 187 |
+
if (temp) temp.remove();
|
| 188 |
+
|
| 189 |
+
if (!response || response.action !== 'ai_step_result') {
|
| 190 |
+
return;
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
const step = response.data;
|
| 194 |
+
|
| 195 |
+
// 2. Log Thought
|
| 196 |
+
const entry = document.createElement('div');
|
| 197 |
+
entry.style.marginBottom = '15px';
|
| 198 |
+
entry.style.borderBottom = '1px dashed #333';
|
| 199 |
+
entry.style.paddingBottom = '10px';
|
| 200 |
+
entry.innerHTML = `
|
| 201 |
+
<div style="color:#aaa; margin-bottom:5px;">π€ <strong>THOUGHT:</strong></div>
|
| 202 |
+
<div style="margin-bottom:10px;">${step.thought}</div>
|
| 203 |
+
<div style="color:#aaa; margin-bottom:5px;">β‘ <strong>ACTION:</strong> ${step.action.toUpperCase()}</div>
|
| 204 |
+
`;
|
| 205 |
+
logContent.appendChild(entry);
|
| 206 |
+
logContent.scrollTop = logContent.scrollHeight;
|
| 207 |
+
|
| 208 |
+
// 3. Execute Action Visualization
|
| 209 |
+
if (step.action === 'chat') {
|
| 210 |
+
// Select suspect if needed
|
| 211 |
+
if (gameState.currentSuspect !== step.result.suspect_id) {
|
| 212 |
+
selectSuspect(step.result.suspect_id);
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
// Simulate User Message (AI Detective)
|
| 216 |
+
addChatMessage('detective', step.result.question, "AI DETECTIVE");
|
| 217 |
+
|
| 218 |
+
setTimeout(() => {
|
| 219 |
+
addChatMessage('suspect', step.result.response, "Suspect");
|
| 220 |
+
}, 1000);
|
| 221 |
+
|
| 222 |
+
} else if (step.action === 'use_tool') {
|
| 223 |
+
showNotification(`π€ AI USED TOOL`);
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
// Loop
|
| 227 |
+
if (step.result.type !== 'game_over' && !step.result.outcome) {
|
| 228 |
+
setTimeout(runAIStep, 6000); // 6s delay for reading
|
| 229 |
+
} else {
|
| 230 |
+
if (step.result.type === 'game_over') {
|
| 231 |
+
triggerGameOver(step.result.outcome);
|
| 232 |
+
}
|
| 233 |
+
}
|
| 234 |
}
|
| 235 |
|
| 236 |
function renderCaseFile(scenario) {
|
|
|
|
| 826 |
resultDiv.style.marginTop = '20px';
|
| 827 |
resultDiv.style.borderTop = '2px dashed var(--ink-color)';
|
| 828 |
resultDiv.style.paddingTop = '10px';
|
| 829 |
+
const modalContent = document.querySelector('#tool-modal .modal-content');
|
| 830 |
+
const modalActions = document.querySelector('#tool-modal .modal-actions');
|
| 831 |
+
if (modalContent && modalActions) {
|
| 832 |
+
modalContent.insertBefore(resultDiv, modalActions);
|
| 833 |
+
}
|
| 834 |
}
|
| 835 |
|
| 836 |
resultDiv.style.display = 'block';
|
ui/templates/game_interface.html
CHANGED
|
@@ -14,6 +14,22 @@
|
|
| 14 |
<div>INITIALIZING CASE FILE...</div>
|
| 15 |
</div>
|
| 16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
<div id="game-container">
|
| 18 |
|
| 19 |
<!-- Header -->
|
|
|
|
| 14 |
<div>INITIALIZING CASE FILE...</div>
|
| 15 |
</div>
|
| 16 |
|
| 17 |
+
<!-- AI Spectator Start Modal -->
|
| 18 |
+
<div id="spectator-modal" class="modal-overlay">
|
| 19 |
+
<div class="modal-content" style="text-align: center;">
|
| 20 |
+
<div class="modal-header">AI DETECTIVE MODE</div>
|
| 21 |
+
<p style="margin: 20px 0;">You are now watching an AI Agent investigate this case.</p>
|
| 22 |
+
<p style="font-size: 0.9rem; color: #666; margin-bottom: 20px;">It will gather evidence, interview suspects, and make an accusation.</p>
|
| 23 |
+
<button id="spectator-start-btn" class="modal-btn confirm" style="font-size: 1.2rem; width: 100%;">START INVESTIGATION</button>
|
| 24 |
+
</div>
|
| 25 |
+
</div>
|
| 26 |
+
|
| 27 |
+
<!-- AI Log Panel -->
|
| 28 |
+
<div id="ai-log-panel" style="display: none; grid-row: 2/3; grid-column: 2/3; background: rgba(0,0,0,0.95); padding: 20px; border: 2px solid #0f0; overflow-y: auto; font-family: 'Courier New', monospace; color: #0f0; z-index: 50; margin: 10px;">
|
| 29 |
+
<div style="font-weight: bold; margin-bottom: 10px; border-bottom: 1px solid #0f0; padding-bottom: 5px;">π€ AI DETECTIVE LOG</div>
|
| 30 |
+
<div id="ai-log-content"></div>
|
| 31 |
+
</div>
|
| 32 |
+
|
| 33 |
<div id="game-container">
|
| 34 |
|
| 35 |
<!-- Header -->
|