Spaces:
Sleeping
Sleeping
gabriel-melki
commited on
Commit
·
2ee9679
1
Parent(s):
77c169c
Reorganize project structure: move files to src/ directory and remove old files
Browse files- .gitattributes +0 -35
- .gitignore +1 -0
- agent.py +0 -51
- app.py +0 -43
- prompt.py +0 -46
- src/eval/submission.py +13 -5
- src/tools/audio_processing_tools.py +1 -1
- src/ui/builder.py +0 -1
- submission.py +0 -198
- tools/audio_processing_tools.py +0 -11
- tools/file_tools.py +0 -60
- tools/image_processing_tools.py +0 -59
- tools/wikipedia_tools.py +0 -323
- tools/youtube_tools.py +0 -112
.gitattributes
DELETED
|
@@ -1,35 +0,0 @@
|
|
| 1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitignore
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
# Python
|
| 2 |
__pycache__/
|
|
|
|
| 3 |
*.py[cod]
|
| 4 |
*$py.class
|
| 5 |
*.so
|
|
|
|
| 1 |
# Python
|
| 2 |
__pycache__/
|
| 3 |
+
.gradio/
|
| 4 |
*.py[cod]
|
| 5 |
*$py.class
|
| 6 |
*.so
|
agent.py
DELETED
|
@@ -1,51 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import glob
|
| 3 |
-
from smolagents import CodeAgent
|
| 4 |
-
from prompt import get_prompt
|
| 5 |
-
|
| 6 |
-
class QuestionAnsweringAgent(CodeAgent):
|
| 7 |
-
def __init__(self, *args, **kwargs):
|
| 8 |
-
super().__init__(*args, **kwargs)
|
| 9 |
-
|
| 10 |
-
def get_current_files(self):
|
| 11 |
-
"""Get a set of all files in the current working directory"""
|
| 12 |
-
try:
|
| 13 |
-
# Get all files in current directory (including hidden files)
|
| 14 |
-
all_files = set()
|
| 15 |
-
for pattern in ['*', '.*']:
|
| 16 |
-
all_files.update(glob.glob(pattern))
|
| 17 |
-
# Filter to only include actual files (not directories)
|
| 18 |
-
files = {f for f in all_files if os.path.isfile(f)}
|
| 19 |
-
return files
|
| 20 |
-
except Exception as e:
|
| 21 |
-
print(f"Error getting current files: {e}")
|
| 22 |
-
return set()
|
| 23 |
-
|
| 24 |
-
def cleanup_created_files(self, files_before):
|
| 25 |
-
"""Remove files that were created during execution"""
|
| 26 |
-
try:
|
| 27 |
-
files_after = self.get_current_files()
|
| 28 |
-
newly_created_files = files_after - files_before
|
| 29 |
-
|
| 30 |
-
for file_path in newly_created_files:
|
| 31 |
-
try:
|
| 32 |
-
if os.path.exists(file_path):
|
| 33 |
-
os.remove(file_path)
|
| 34 |
-
print(f"Cleaned up file: {file_path}")
|
| 35 |
-
except Exception as e:
|
| 36 |
-
print(f"Error cleaning up file {file_path}: {e}")
|
| 37 |
-
|
| 38 |
-
except Exception as e:
|
| 39 |
-
print(f"Error during cleanup: {e}")
|
| 40 |
-
|
| 41 |
-
def __call__(self, question_text, file_name) -> str:
|
| 42 |
-
# Take snapshot of files before execution
|
| 43 |
-
files_before = self.get_current_files()
|
| 44 |
-
|
| 45 |
-
try:
|
| 46 |
-
enhanced_question = get_prompt(question_text, file_name)
|
| 47 |
-
response = self.run(enhanced_question, reset=True)
|
| 48 |
-
return response
|
| 49 |
-
finally:
|
| 50 |
-
# Always clean up files, even if there's an exception
|
| 51 |
-
self.cleanup_created_files(files_before)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
DELETED
|
@@ -1,43 +0,0 @@
|
|
| 1 |
-
from smolagents import (
|
| 2 |
-
InferenceClientModel,
|
| 3 |
-
FinalAnswerTool
|
| 4 |
-
)
|
| 5 |
-
|
| 6 |
-
from tools.wikipedia_tools import wikipedia_summary, read_wikipedia_page
|
| 7 |
-
from tools.file_tools import read_file_as_text
|
| 8 |
-
from tools.youtube_tools import download_youtube_url_images, download_youtube_url_audio
|
| 9 |
-
from tools.image_processing_tools import ask_question_about_image
|
| 10 |
-
|
| 11 |
-
from agent import QuestionAnsweringAgent
|
| 12 |
-
|
| 13 |
-
from submission import build_gradio_interface
|
| 14 |
-
|
| 15 |
-
model = InferenceClientModel(
|
| 16 |
-
provider="auto",
|
| 17 |
-
model_id="Qwen/Qwen3-Coder-30B-A3B-Instruct",
|
| 18 |
-
temperature=0,
|
| 19 |
-
top_p=1.0,
|
| 20 |
-
seed=42
|
| 21 |
-
)
|
| 22 |
-
|
| 23 |
-
agent_tools = [
|
| 24 |
-
FinalAnswerTool(),
|
| 25 |
-
wikipedia_summary, read_wikipedia_page,
|
| 26 |
-
read_file_as_text,
|
| 27 |
-
download_youtube_url_images, download_youtube_url_audio,
|
| 28 |
-
ask_question_about_image
|
| 29 |
-
]
|
| 30 |
-
|
| 31 |
-
agent = QuestionAnsweringAgent(
|
| 32 |
-
name="question_answering_expert",
|
| 33 |
-
model=model,
|
| 34 |
-
tools=agent_tools,
|
| 35 |
-
add_base_tools=True,
|
| 36 |
-
planning_interval=None,
|
| 37 |
-
additional_authorized_imports=["os", "bs4", "PIL", "transformers", "torch", "requests", "glob"],
|
| 38 |
-
max_steps=10,
|
| 39 |
-
verbosity_level=2, # For better debugging
|
| 40 |
-
)
|
| 41 |
-
|
| 42 |
-
if __name__ == "__main__":
|
| 43 |
-
build_gradio_interface(agent)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
prompt.py
DELETED
|
@@ -1,46 +0,0 @@
|
|
| 1 |
-
def get_prompt(question_text, file_name):
|
| 2 |
-
PROMPT = f"""
|
| 3 |
-
You are a highly precise question-answering agent.
|
| 4 |
-
When given a question:
|
| 5 |
-
- If necessary, start by performing a wikipedia search using the `wikipedia_summary` tool to find possible sources of information.
|
| 6 |
-
For the `query` parameter of the `wikipedia_summary` tool, you MUST think about the entity being searched for and ALWAYS pass exactly the entity name (person/place/event) with no qualifiers.
|
| 7 |
-
NEVER include words like: career, albums, list, biography, years, numbers, prepositions, or date ranges.
|
| 8 |
-
Examples:
|
| 9 |
-
- "Lionel Messi career" → use: wikipedia_summary("Lionel Messi")
|
| 10 |
-
- "Battle of Hastings timeline" → use: wikipedia_summary("Battle of Hastings")
|
| 11 |
-
- "Population of Paris in 2010" → use: wikipedia_summary("Paris")
|
| 12 |
-
- If necessary, visit the wikipedia page listed in the wikipedia summary tool to read the full content. You will find the page url in the output of the wikipedia summary tool at the end after the **Read more:** section. Use the `read_wikipedia_page` tool to visit the page.
|
| 13 |
-
- When using the `read_wikipedia_page` tool, you may find tables in the page. To analyze the tables, please use a code snippet to read the tables into a pandas dataframe and analyze the data.
|
| 14 |
-
- If necessary, download a youtube video using the `download_youtube_url_audio` or `download_youtube_url_images` tool to find possible sources of information. For the parameter `num_images`, use a large number if you need to have comprehensive information about the video.
|
| 15 |
-
- If necessary, analyze the audio or images downloaded from youtube using the `ask_question_about_image` tool to find possible sources of information.
|
| 16 |
-
- If necessary, perform a web search using the `web_search` tool to find possible sources of information.
|
| 17 |
-
- If necessary, please analyze the images downloaded using the `ask_question_about_image` tool to find possible sources of information.
|
| 18 |
-
- If the web search only returns titles and short snippets, you MUST visit the actual webpage using the `visit_webpage` tool to read the full content before answering.
|
| 19 |
-
- If the task requires reading, listening, or analyzing a file, you must use the file specified after the question, NOT the file name mentioned casually inside the question text.
|
| 20 |
-
- Comma separated lists MUST contain a single space after each comma.
|
| 21 |
-
- If you are asked for a number, don't use comma to write your number, nor use units such as $$ or percent sign unless specified otherwise.
|
| 22 |
-
- If you are asked for a string, don't use articles, nor abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise.
|
| 23 |
-
- If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
|
| 24 |
-
- Only answer after you have gathered enough information by reading the actual page contents.
|
| 25 |
-
- Only answer after you have printed out the final answer first.
|
| 26 |
-
- Once you have obtained the final answer, you MUST make a code call as follows:
|
| 27 |
-
<code>
|
| 28 |
-
final_answer("your_answer")
|
| 29 |
-
</code>
|
| 30 |
-
to submit the final answer.
|
| 31 |
-
- Do not retry or execute anything else after calling `final_answer`. STOP IMMEDIATELY.
|
| 32 |
-
- Calling `final_answer` terminates the task completely. No further steps are needed.
|
| 33 |
-
- The function `final_answer` must wrap the exact printed value.
|
| 34 |
-
- Provide ONLY the precise answer requested.
|
| 35 |
-
- Do not include explanations, steps, reasoning, or additional text when calling `final_answer`.
|
| 36 |
-
- Be direct and specific. The GAIA benchmark requires exactly matching answers.
|
| 37 |
-
|
| 38 |
-
Based on the above guidelines, answer the following question:
|
| 39 |
-
-- beginning of question --
|
| 40 |
-
{question_text}
|
| 41 |
-
-- end of question --
|
| 42 |
-
|
| 43 |
-
IMPORTANT: If the question mentions the need to use a file, the file name is provided below.
|
| 44 |
-
file_name: {file_name}"""
|
| 45 |
-
|
| 46 |
-
return PROMPT
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/eval/submission.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
import os
|
| 2 |
import gradio as gr
|
| 3 |
import requests
|
| 4 |
import pandas as pd
|
|
@@ -6,7 +5,7 @@ import numpy as np
|
|
| 6 |
|
| 7 |
|
| 8 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 9 |
-
SELECTED_QUESTIONS = [3]
|
| 10 |
|
| 11 |
def run_and_submit_all(agent, profile: gr.OAuthProfile | None):
|
| 12 |
"""
|
|
@@ -24,6 +23,9 @@ def run_and_submit_all(agent, profile: gr.OAuthProfile | None):
|
|
| 24 |
questions_url = f"{api_url}/questions"
|
| 25 |
submit_url = f"{api_url}/submit"
|
| 26 |
|
|
|
|
|
|
|
|
|
|
| 27 |
# 1. Fetch Questions
|
| 28 |
print(f"Fetching questions from: {questions_url}")
|
| 29 |
try:
|
|
@@ -50,7 +52,13 @@ def run_and_submit_all(agent, profile: gr.OAuthProfile | None):
|
|
| 50 |
answers_payload = []
|
| 51 |
is_correct_answers = []
|
| 52 |
print(f"Running agent on {len(questions_data)} questions...")
|
| 53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
task_id = item.get("task_id")
|
| 55 |
question_text = item.get("question")
|
| 56 |
file_name = item.get("file_name")
|
|
@@ -66,7 +74,7 @@ def run_and_submit_all(agent, profile: gr.OAuthProfile | None):
|
|
| 66 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
|
| 67 |
individual_submission_data = {
|
| 68 |
"username": username.strip(),
|
| 69 |
-
"agent_code": agent_code,
|
| 70 |
"answers": [{"task_id": task_id, "submitted_answer": submitted_answer}]
|
| 71 |
}
|
| 72 |
|
|
@@ -79,7 +87,7 @@ def run_and_submit_all(agent, profile: gr.OAuthProfile | None):
|
|
| 79 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
| 80 |
|
| 81 |
# 3. Prepare Submission
|
| 82 |
-
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
| 83 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
| 84 |
print(status_update)
|
| 85 |
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import requests
|
| 3 |
import pandas as pd
|
|
|
|
| 5 |
|
| 6 |
|
| 7 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 8 |
+
SELECTED_QUESTIONS = None #[3]
|
| 9 |
|
| 10 |
def run_and_submit_all(agent, profile: gr.OAuthProfile | None):
|
| 11 |
"""
|
|
|
|
| 23 |
questions_url = f"{api_url}/questions"
|
| 24 |
submit_url = f"{api_url}/submit"
|
| 25 |
|
| 26 |
+
# 0. Define Agent Code
|
| 27 |
+
agent_code = f"https://huggingface.co/spaces/{profile.username}/tree/main"
|
| 28 |
+
|
| 29 |
# 1. Fetch Questions
|
| 30 |
print(f"Fetching questions from: {questions_url}")
|
| 31 |
try:
|
|
|
|
| 52 |
answers_payload = []
|
| 53 |
is_correct_answers = []
|
| 54 |
print(f"Running agent on {len(questions_data)} questions...")
|
| 55 |
+
|
| 56 |
+
selected_questions_data = (
|
| 57 |
+
np.array(questions_data).take(SELECTED_QUESTIONS)
|
| 58 |
+
if SELECTED_QUESTIONS
|
| 59 |
+
else questions_data
|
| 60 |
+
)
|
| 61 |
+
for item in selected_questions_data:
|
| 62 |
task_id = item.get("task_id")
|
| 63 |
question_text = item.get("question")
|
| 64 |
file_name = item.get("file_name")
|
|
|
|
| 74 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
|
| 75 |
individual_submission_data = {
|
| 76 |
"username": username.strip(),
|
| 77 |
+
"agent_code": agent_code.strip(),
|
| 78 |
"answers": [{"task_id": task_id, "submitted_answer": submitted_answer}]
|
| 79 |
}
|
| 80 |
|
|
|
|
| 87 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
| 88 |
|
| 89 |
# 3. Prepare Submission
|
| 90 |
+
submission_data = {"username": username.strip(), "agent_code": agent_code.strip(), "answers": answers_payload}
|
| 91 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
| 92 |
print(status_update)
|
| 93 |
|
src/tools/audio_processing_tools.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
from whisper import load_model
|
| 2 |
from smolagents.tools import tool
|
| 3 |
-
|
| 4 |
@tool
|
| 5 |
def ask_question_about_audio(question: str, path_to_audio: str) -> str:
|
| 6 |
"""
|
|
|
|
| 1 |
from whisper import load_model
|
| 2 |
from smolagents.tools import tool
|
| 3 |
+
# TODO: Add a tool to ask a question about an audio
|
| 4 |
@tool
|
| 5 |
def ask_question_about_audio(question: str, path_to_audio: str) -> str:
|
| 6 |
"""
|
src/ui/builder.py
CHANGED
|
@@ -4,7 +4,6 @@ import re
|
|
| 4 |
import shutil
|
| 5 |
from typing import Optional
|
| 6 |
import gradio as gr
|
| 7 |
-
from functools import partial
|
| 8 |
|
| 9 |
from smolagents.agent_types import AgentAudio, AgentImage, AgentText, handle_agent_output_types
|
| 10 |
from smolagents.agents import ActionStep, MultiStepAgent
|
|
|
|
| 4 |
import shutil
|
| 5 |
from typing import Optional
|
| 6 |
import gradio as gr
|
|
|
|
| 7 |
|
| 8 |
from smolagents.agent_types import AgentAudio, AgentImage, AgentText, handle_agent_output_types
|
| 9 |
from smolagents.agents import ActionStep, MultiStepAgent
|
submission.py
DELETED
|
@@ -1,198 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import gradio as gr
|
| 3 |
-
import requests
|
| 4 |
-
import pandas as pd
|
| 5 |
-
import numpy as np
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 9 |
-
SELECTED_QUESTIONS = [3]
|
| 10 |
-
def run_and_submit_all(agent, profile: gr.OAuthProfile | None):
|
| 11 |
-
"""
|
| 12 |
-
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
| 13 |
-
and displays the results.
|
| 14 |
-
"""
|
| 15 |
-
# --- Determine HF Space Runtime URL and Repo URL ---
|
| 16 |
-
space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
|
| 17 |
-
|
| 18 |
-
if profile:
|
| 19 |
-
username= f"{profile.username}"
|
| 20 |
-
print(f"User logged in: {username}")
|
| 21 |
-
else:
|
| 22 |
-
print("User not logged in.")
|
| 23 |
-
return "Please Login to Hugging Face with the button.", None
|
| 24 |
-
|
| 25 |
-
api_url = DEFAULT_API_URL
|
| 26 |
-
questions_url = f"{api_url}/questions"
|
| 27 |
-
submit_url = f"{api_url}/submit"
|
| 28 |
-
|
| 29 |
-
# 1. Instantiate Agent ( modify this part to create your agent)
|
| 30 |
-
try:
|
| 31 |
-
agent = agent
|
| 32 |
-
except Exception as e:
|
| 33 |
-
print(f"Error instantiating agent: {e}")
|
| 34 |
-
return f"Error initializing agent: {e}", None
|
| 35 |
-
# In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
|
| 36 |
-
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
| 37 |
-
print(agent_code)
|
| 38 |
-
|
| 39 |
-
# 2. Fetch Questions
|
| 40 |
-
print(f"Fetching questions from: {questions_url}")
|
| 41 |
-
try:
|
| 42 |
-
response = requests.get(questions_url, timeout=15)
|
| 43 |
-
response.raise_for_status()
|
| 44 |
-
questions_data = response.json()
|
| 45 |
-
if not questions_data:
|
| 46 |
-
print("Fetched questions list is empty.")
|
| 47 |
-
return "Fetched questions list is empty or invalid format.", None
|
| 48 |
-
print(f"Fetched {len(questions_data)} questions.")
|
| 49 |
-
except requests.exceptions.RequestException as e:
|
| 50 |
-
print(f"Error fetching questions: {e}")
|
| 51 |
-
return f"Error fetching questions: {e}", None
|
| 52 |
-
except requests.exceptions.JSONDecodeError as e:
|
| 53 |
-
print(f"Error decoding JSON response from questions endpoint: {e}")
|
| 54 |
-
print(f"Response text: {response.text[:500]}")
|
| 55 |
-
return f"Error decoding server response for questions: {e}", None
|
| 56 |
-
except Exception as e:
|
| 57 |
-
print(f"An unexpected error occurred fetching questions: {e}")
|
| 58 |
-
return f"An unexpected error occurred fetching questions: {e}", None
|
| 59 |
-
|
| 60 |
-
# 3. Run your Agent
|
| 61 |
-
results_log = []
|
| 62 |
-
answers_payload = []
|
| 63 |
-
is_correct_answers = []
|
| 64 |
-
print(f"Running agent on {len(questions_data)} questions...")
|
| 65 |
-
for item in np.array(questions_data).take(SELECTED_QUESTIONS):
|
| 66 |
-
task_id = item.get("task_id")
|
| 67 |
-
question_text = item.get("question")
|
| 68 |
-
file_name = item.get("file_name")
|
| 69 |
-
if not task_id or question_text is None:
|
| 70 |
-
print(f"Skipping item with missing task_id or question: {item}")
|
| 71 |
-
continue
|
| 72 |
-
try:
|
| 73 |
-
submitted_answer = agent(question_text, file_name)
|
| 74 |
-
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
| 75 |
-
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
| 76 |
-
except Exception as e:
|
| 77 |
-
print(f"Error running agent on task {task_id}: {e}")
|
| 78 |
-
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
|
| 79 |
-
individual_submission_data = {
|
| 80 |
-
"username": username.strip(),
|
| 81 |
-
"agent_code": agent_code,
|
| 82 |
-
"answers": [{"task_id": task_id, "submitted_answer": submitted_answer}]
|
| 83 |
-
}
|
| 84 |
-
|
| 85 |
-
individual_response = requests.post(submit_url, json=individual_submission_data, timeout=60)
|
| 86 |
-
individual_response.raise_for_status()
|
| 87 |
-
individual_result_data = individual_response.json()
|
| 88 |
-
is_correct_answers.append(True if individual_result_data.get("correct_count", 0) == 1 else False)
|
| 89 |
-
if not answers_payload:
|
| 90 |
-
print("Agent did not produce any answers to submit.")
|
| 91 |
-
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
| 92 |
-
|
| 93 |
-
# 4. Prepare Submission
|
| 94 |
-
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
| 95 |
-
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
| 96 |
-
print(status_update)
|
| 97 |
-
|
| 98 |
-
# 5. Submit
|
| 99 |
-
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
|
| 100 |
-
try:
|
| 101 |
-
response = requests.post(submit_url, json=submission_data, timeout=60)
|
| 102 |
-
response.raise_for_status()
|
| 103 |
-
result_data = response.json()
|
| 104 |
-
final_status = (
|
| 105 |
-
f"Submission Successful!\n"
|
| 106 |
-
f"User: {result_data.get('username')}\n"
|
| 107 |
-
f"Overall Score: {result_data.get('score', 'N/A')}% "
|
| 108 |
-
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
|
| 109 |
-
f"Message: {result_data.get('message', 'No message received.')}"
|
| 110 |
-
)
|
| 111 |
-
print("Submission successful.")
|
| 112 |
-
results_df = pd.DataFrame(results_log)
|
| 113 |
-
results_df["Is Correct"] = is_correct_answers
|
| 114 |
-
return final_status, results_df
|
| 115 |
-
except requests.exceptions.HTTPError as e:
|
| 116 |
-
error_detail = f"Server responded with status {e.response.status_code}."
|
| 117 |
-
try:
|
| 118 |
-
error_json = e.response.json()
|
| 119 |
-
error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
|
| 120 |
-
except requests.exceptions.JSONDecodeError:
|
| 121 |
-
error_detail += f" Response: {e.response.text[:500]}"
|
| 122 |
-
status_message = f"Submission Failed: {error_detail}"
|
| 123 |
-
print(status_message)
|
| 124 |
-
results_df = pd.DataFrame(results_log)
|
| 125 |
-
results_df["Is Correct"] = is_correct_answers
|
| 126 |
-
return status_message, results_df
|
| 127 |
-
except requests.exceptions.Timeout:
|
| 128 |
-
status_message = "Submission Failed: The request timed out."
|
| 129 |
-
print(status_message)
|
| 130 |
-
results_df = pd.DataFrame(results_log)
|
| 131 |
-
return status_message, results_df
|
| 132 |
-
except requests.exceptions.RequestException as e:
|
| 133 |
-
status_message = f"Submission Failed: Network error - {e}"
|
| 134 |
-
print(status_message)
|
| 135 |
-
results_df = pd.DataFrame(results_log)
|
| 136 |
-
return status_message, results_df
|
| 137 |
-
except Exception as e:
|
| 138 |
-
status_message = f"An unexpected error occurred during submission: {e}"
|
| 139 |
-
print(status_message)
|
| 140 |
-
results_df = pd.DataFrame(results_log)
|
| 141 |
-
return status_message, results_df
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
def build_gradio_interface(agent):
|
| 145 |
-
# --- Build Gradio Interface using Blocks ---
|
| 146 |
-
with gr.Blocks() as demo:
|
| 147 |
-
gr.Markdown("# Basic Agent Evaluation Runner")
|
| 148 |
-
gr.Markdown(
|
| 149 |
-
"""
|
| 150 |
-
**Instructions:**
|
| 151 |
-
1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
|
| 152 |
-
2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
|
| 153 |
-
3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
|
| 154 |
-
---
|
| 155 |
-
**Disclaimers:**
|
| 156 |
-
Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
|
| 157 |
-
This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
|
| 158 |
-
"""
|
| 159 |
-
)
|
| 160 |
-
|
| 161 |
-
gr.LoginButton()
|
| 162 |
-
|
| 163 |
-
run_button = gr.Button("Run Evaluation & Submit All Answers")
|
| 164 |
-
|
| 165 |
-
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
|
| 166 |
-
# Removed max_rows=10 from DataFrame constructor
|
| 167 |
-
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
|
| 168 |
-
|
| 169 |
-
def run_with_login_state(profile: gr.OAuthProfile):
|
| 170 |
-
return run_and_submit_all(agent, profile)
|
| 171 |
-
|
| 172 |
-
run_button.click(
|
| 173 |
-
fn=run_with_login_state,
|
| 174 |
-
outputs=[status_output, results_table]
|
| 175 |
-
)
|
| 176 |
-
|
| 177 |
-
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
| 178 |
-
# Check for SPACE_HOST and SPACE_ID at startup for information
|
| 179 |
-
space_host_startup = os.getenv("SPACE_HOST")
|
| 180 |
-
space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
|
| 181 |
-
|
| 182 |
-
if space_host_startup:
|
| 183 |
-
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
| 184 |
-
print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
|
| 185 |
-
else:
|
| 186 |
-
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
| 187 |
-
|
| 188 |
-
if space_id_startup: # Print repo URLs if SPACE_ID is found
|
| 189 |
-
print(f"✅ SPACE_ID found: {space_id_startup}")
|
| 190 |
-
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
| 191 |
-
print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
|
| 192 |
-
else:
|
| 193 |
-
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
|
| 194 |
-
|
| 195 |
-
print("-"*(60 + len(" App Starting ")) + "\n")
|
| 196 |
-
|
| 197 |
-
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
| 198 |
-
demo.launch(debug=True, share=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/audio_processing_tools.py
DELETED
|
@@ -1,11 +0,0 @@
|
|
| 1 |
-
from whisper import load_model
|
| 2 |
-
from smolagents.tools import tool
|
| 3 |
-
|
| 4 |
-
@tool
|
| 5 |
-
def ask_question_about_audio(question: str, path_to_audio: str) -> str:
|
| 6 |
-
"""
|
| 7 |
-
Ask a question about an audio and return the answer.
|
| 8 |
-
"""
|
| 9 |
-
model = load_model("base")
|
| 10 |
-
res = model.transcribe(path_to_audio)
|
| 11 |
-
return res["text"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/file_tools.py
DELETED
|
@@ -1,60 +0,0 @@
|
|
| 1 |
-
|
| 2 |
-
import json
|
| 3 |
-
import csv
|
| 4 |
-
import openpyxl
|
| 5 |
-
import whisper
|
| 6 |
-
import os
|
| 7 |
-
import requests
|
| 8 |
-
from smolagents.tools import tool
|
| 9 |
-
|
| 10 |
-
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 11 |
-
|
| 12 |
-
def _download_file(file_name: str) -> None:
|
| 13 |
-
if not os.path.exists(file_name):
|
| 14 |
-
url = f"{DEFAULT_API_URL}/files/{file_name.split('.')[-2]}"
|
| 15 |
-
r = requests.get(url)
|
| 16 |
-
with open(file_name, "wb") as f:
|
| 17 |
-
f.write(r.content)
|
| 18 |
-
|
| 19 |
-
@tool
|
| 20 |
-
def read_file_as_text(file_name: str) -> str:
|
| 21 |
-
"""
|
| 22 |
-
Opens a file and returns its content as readable text.
|
| 23 |
-
Supports 'txt', 'json', 'csv', 'xlsx', and 'mp3' (for mp3, it transcribes speech to text).
|
| 24 |
-
Args:
|
| 25 |
-
file_name (str): The path or name of the file.
|
| 26 |
-
Returns:
|
| 27 |
-
str: The content of the file as text, or transcribed speech if 'mp3'.
|
| 28 |
-
"""
|
| 29 |
-
_download_file(file_name)
|
| 30 |
-
file_type = file_name.split(".")[-1]
|
| 31 |
-
try:
|
| 32 |
-
if file_type in {"txt", "py"}:
|
| 33 |
-
with open(file_name, "r", encoding="utf-8") as f:
|
| 34 |
-
return f.read()
|
| 35 |
-
elif file_type == "json":
|
| 36 |
-
with open(file_name, "r", encoding="utf-8") as f:
|
| 37 |
-
data = json.load(f)
|
| 38 |
-
return json.dumps(data, indent=2)
|
| 39 |
-
elif file_type == "csv":
|
| 40 |
-
with open(file_name, "r", encoding="utf-8") as f:
|
| 41 |
-
reader = csv.reader(f)
|
| 42 |
-
rows = list(reader)
|
| 43 |
-
return "\n".join([", ".join(row) for row in rows])
|
| 44 |
-
elif file_type == "xlsx":
|
| 45 |
-
wb = openpyxl.load_workbook(file_name, data_only=True)
|
| 46 |
-
sheet = wb.active
|
| 47 |
-
content = []
|
| 48 |
-
for row in sheet.iter_rows(values_only=True):
|
| 49 |
-
content.append(", ".join(str(cell) if cell is not None else "" for cell in row))
|
| 50 |
-
return "\n".join(content)
|
| 51 |
-
elif file_type == "mp3":
|
| 52 |
-
w = whisper.load_model("base")
|
| 53 |
-
res = w.transcribe(file_name)
|
| 54 |
-
return res["text"]
|
| 55 |
-
else:
|
| 56 |
-
return f"File type '{file_type}' not supported."
|
| 57 |
-
except FileNotFoundError:
|
| 58 |
-
return f"File '{file_name}' not found."
|
| 59 |
-
except Exception as e:
|
| 60 |
-
return f"Error opening file '{file_name}': {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/image_processing_tools.py
DELETED
|
@@ -1,59 +0,0 @@
|
|
| 1 |
-
from PIL import Image
|
| 2 |
-
from transformers import BlipProcessor, BlipForQuestionAnswering
|
| 3 |
-
from smolagents.tools import tool
|
| 4 |
-
import torch
|
| 5 |
-
import requests
|
| 6 |
-
import os
|
| 7 |
-
|
| 8 |
-
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 9 |
-
|
| 10 |
-
def _download_file(file_name: str) -> None:
|
| 11 |
-
"""Download file if it doesn't exist locally"""
|
| 12 |
-
try:
|
| 13 |
-
# Try to open the file to check if it exists
|
| 14 |
-
with open(file_name, 'rb') as f:
|
| 15 |
-
pass # File exists, do nothing
|
| 16 |
-
except FileNotFoundError:
|
| 17 |
-
# File doesn't exist, download it
|
| 18 |
-
url = f"{DEFAULT_API_URL}/files/{file_name.split('.')[-2]}"
|
| 19 |
-
r = requests.get(url)
|
| 20 |
-
with open(file_name, "wb") as f:
|
| 21 |
-
f.write(r.content)
|
| 22 |
-
|
| 23 |
-
@tool
|
| 24 |
-
def ask_question_about_image(question: str, path_to_image: str) -> str:
|
| 25 |
-
"""
|
| 26 |
-
Ask a question about an image and return the answer.
|
| 27 |
-
Args:
|
| 28 |
-
question: the question to ask about the image.
|
| 29 |
-
path_to_image: The path to the image to ask the question about.
|
| 30 |
-
Returns:
|
| 31 |
-
A string with the answer to the question.
|
| 32 |
-
"""
|
| 33 |
-
# Download the file if it doesn't exist
|
| 34 |
-
_download_file(path_to_image)
|
| 35 |
-
|
| 36 |
-
# Check if CUDA is available and use GPU if possible, otherwise use CPU
|
| 37 |
-
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 38 |
-
|
| 39 |
-
# Load the processor and model (using BLIP for more stable VQA)
|
| 40 |
-
processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
|
| 41 |
-
model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
|
| 42 |
-
model = model.to(device)
|
| 43 |
-
|
| 44 |
-
# Load and process the image
|
| 45 |
-
image = Image.open(path_to_image).convert('RGB')
|
| 46 |
-
|
| 47 |
-
# Process the inputs
|
| 48 |
-
inputs = processor(image, question, return_tensors="pt")
|
| 49 |
-
inputs = {k: v.to(device) for k, v in inputs.items()}
|
| 50 |
-
|
| 51 |
-
# Generate the answer
|
| 52 |
-
with torch.no_grad():
|
| 53 |
-
outputs = model.generate(**inputs, max_length=50, num_beams=5)
|
| 54 |
-
|
| 55 |
-
# Decode and return the answer
|
| 56 |
-
answer = processor.decode(outputs[0], skip_special_tokens=True)
|
| 57 |
-
|
| 58 |
-
return answer
|
| 59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/wikipedia_tools.py
DELETED
|
@@ -1,323 +0,0 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import requests
|
| 3 |
-
from io import StringIO
|
| 4 |
-
import pandas as pd
|
| 5 |
-
from bs4 import BeautifulSoup
|
| 6 |
-
from smolagents.tools import tool
|
| 7 |
-
import wikipediaapi
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
def fetch_wikipedia_page(url: str) -> str:
|
| 11 |
-
"""Fetch raw HTML of a Wikipedia page."""
|
| 12 |
-
headers = {
|
| 13 |
-
"User-Agent": "GAIA_benchmark_agent/1.0 (contact: [email protected])",
|
| 14 |
-
"Accept-Language": "en-US,en;q=0.9",
|
| 15 |
-
}
|
| 16 |
-
resp = requests.get(url, headers=headers, timeout=50)
|
| 17 |
-
resp.raise_for_status()
|
| 18 |
-
return resp.text
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
def _normalize_title(value: str) -> str:
|
| 22 |
-
"""Lowercase, collapse whitespace for robust title comparisons."""
|
| 23 |
-
return " ".join(value.lower().split()) if isinstance(value, str) else ""
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
def _remove_sections_by_titles(soup: BeautifulSoup, titles: list[str]) -> None:
|
| 27 |
-
"""Remove sections (header + content until next header of same/higher level) whose
|
| 28 |
-
header text matches any of `titles` (case-insensitive). Mutates `soup` in-place.
|
| 29 |
-
"""
|
| 30 |
-
if not titles:
|
| 31 |
-
return
|
| 32 |
-
excluded = {_normalize_title(t) for t in titles}
|
| 33 |
-
header_tags = ["h1", "h2", "h3", "h4", "h5", "h6"]
|
| 34 |
-
|
| 35 |
-
# Find all headers that match excluded titles
|
| 36 |
-
headers_to_remove = []
|
| 37 |
-
for header in soup.find_all(header_tags):
|
| 38 |
-
title_text = _normalize_title(header.get_text(" ", strip=True))
|
| 39 |
-
if title_text in excluded:
|
| 40 |
-
headers_to_remove.append(header)
|
| 41 |
-
|
| 42 |
-
# Remove each matching section (header + content)
|
| 43 |
-
for header in headers_to_remove:
|
| 44 |
-
# Skip if header was already removed as part of another section
|
| 45 |
-
if not header.parent:
|
| 46 |
-
continue
|
| 47 |
-
|
| 48 |
-
level = int(header.name[1])
|
| 49 |
-
|
| 50 |
-
# Determine the container to remove - could be the header itself or its parent wrapper
|
| 51 |
-
header_container = header
|
| 52 |
-
# If header is wrapped in a heading container (like div.mw-heading), use that as the starting point
|
| 53 |
-
if (header.parent and
|
| 54 |
-
header.parent.name == 'div' and
|
| 55 |
-
header.parent.get('class') and
|
| 56 |
-
any('heading' in cls.lower() for cls in header.parent.get('class', []))):
|
| 57 |
-
header_container = header.parent
|
| 58 |
-
|
| 59 |
-
nodes_to_remove = [header_container]
|
| 60 |
-
|
| 61 |
-
# Collect all content after the header container until next header of same/higher level
|
| 62 |
-
current = header_container
|
| 63 |
-
while current.next_sibling:
|
| 64 |
-
current = current.next_sibling
|
| 65 |
-
sib_name = getattr(current, "name", None)
|
| 66 |
-
|
| 67 |
-
# If we hit another header (directly or within a heading container), check its level
|
| 68 |
-
next_header = None
|
| 69 |
-
if sib_name in header_tags:
|
| 70 |
-
next_header = current
|
| 71 |
-
elif (sib_name == 'div' and
|
| 72 |
-
current.get('class') and
|
| 73 |
-
any('heading' in cls.lower() for cls in current.get('class', []))):
|
| 74 |
-
# This is a heading container, find the header inside it
|
| 75 |
-
for child in current.find_all(header_tags):
|
| 76 |
-
next_header = child
|
| 77 |
-
break
|
| 78 |
-
|
| 79 |
-
if next_header:
|
| 80 |
-
next_level = int(next_header.name[1])
|
| 81 |
-
if next_level <= level:
|
| 82 |
-
# This is a header of same or higher level - stop here
|
| 83 |
-
break
|
| 84 |
-
|
| 85 |
-
# Add this node to removal list
|
| 86 |
-
nodes_to_remove.append(current)
|
| 87 |
-
|
| 88 |
-
# Remove all collected nodes
|
| 89 |
-
for node in nodes_to_remove:
|
| 90 |
-
try:
|
| 91 |
-
node.decompose()
|
| 92 |
-
except Exception:
|
| 93 |
-
try:
|
| 94 |
-
node.extract()
|
| 95 |
-
except Exception:
|
| 96 |
-
pass
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
def _cleanup_non_content(root: BeautifulSoup) -> None:
|
| 100 |
-
"""Remove Wikipedia UI/maintenance blocks from the main content area."""
|
| 101 |
-
selectors = [
|
| 102 |
-
"div#toc",
|
| 103 |
-
"div.toc",
|
| 104 |
-
"div.hatnote",
|
| 105 |
-
"div.shortdescription",
|
| 106 |
-
"div.reflist",
|
| 107 |
-
"ol.references",
|
| 108 |
-
"div.navbox",
|
| 109 |
-
"table.navbox",
|
| 110 |
-
"table.vertical-navbox",
|
| 111 |
-
"table.sidebar",
|
| 112 |
-
"table.ambox",
|
| 113 |
-
"table.metadata",
|
| 114 |
-
"div#catlinks",
|
| 115 |
-
"div.mw-authority-control",
|
| 116 |
-
"div.printfooter",
|
| 117 |
-
"div.portal",
|
| 118 |
-
"table.infobox", # avoid dumping infobox into text
|
| 119 |
-
]
|
| 120 |
-
for sel in selectors:
|
| 121 |
-
for el in root.select(sel):
|
| 122 |
-
try:
|
| 123 |
-
el.decompose()
|
| 124 |
-
except Exception:
|
| 125 |
-
try:
|
| 126 |
-
el.extract()
|
| 127 |
-
except Exception:
|
| 128 |
-
pass
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
def extract_text(soup: BeautifulSoup) -> str:
|
| 132 |
-
"""Extract main text (paragraphs + headers + lists) from article body only, preserving document order.
|
| 133 |
-
Excludes content that's inside tables and excludes headers that are also used as
|
| 134 |
-
table names (either as <caption> or the nearest previous header) to avoid duplication
|
| 135 |
-
with extract_tables."""
|
| 136 |
-
content_root = soup.select_one("div.mw-parser-output") or soup
|
| 137 |
-
|
| 138 |
-
for elem in content_root(["script", "style", "sup", "aside", "nav"]):
|
| 139 |
-
elem.decompose()
|
| 140 |
-
_cleanup_non_content(content_root)
|
| 141 |
-
|
| 142 |
-
# Identify table names (from captions or nearest previous headers) to avoid duplicating them in text
|
| 143 |
-
table_names_normalized = set()
|
| 144 |
-
for table in content_root.find_all("table"):
|
| 145 |
-
# Skip non-content tables (same logic as extract_tables)
|
| 146 |
-
classes = table.get("class", [])
|
| 147 |
-
if isinstance(classes, list) and any(
|
| 148 |
-
c.lower() in {"navbox", "vertical-navbox", "sidebar", "mbox", "metadata"}
|
| 149 |
-
for c in classes
|
| 150 |
-
):
|
| 151 |
-
continue
|
| 152 |
-
|
| 153 |
-
name_text = None
|
| 154 |
-
caption_el = table.find("caption")
|
| 155 |
-
if caption_el:
|
| 156 |
-
caption_text = caption_el.get_text(" ", strip=True)
|
| 157 |
-
if caption_text:
|
| 158 |
-
name_text = caption_text
|
| 159 |
-
else:
|
| 160 |
-
# Empty caption: treat as no caption and fallback to previous header
|
| 161 |
-
prev_header = table.find_previous(["h1", "h2", "h3", "h4", "h5", "h6"])
|
| 162 |
-
if prev_header:
|
| 163 |
-
name_text = prev_header.get_text(" ", strip=True)
|
| 164 |
-
else:
|
| 165 |
-
prev_header = table.find_previous(["h1", "h2", "h3", "h4", "h5", "h6"])
|
| 166 |
-
if prev_header:
|
| 167 |
-
name_text = prev_header.get_text(" ", strip=True)
|
| 168 |
-
|
| 169 |
-
if not name_text and isinstance(classes, list) and any(c.lower() == "infobox" for c in classes):
|
| 170 |
-
name_text = "Infobox"
|
| 171 |
-
|
| 172 |
-
if name_text:
|
| 173 |
-
table_names_normalized.add(_normalize_title(name_text))
|
| 174 |
-
|
| 175 |
-
# Find all text elements in document order, but exclude duplicates
|
| 176 |
-
text_elements = []
|
| 177 |
-
for element in content_root.find_all(["h1", "h2", "h3", "h4", "h5", "h6", "p", "li"]):
|
| 178 |
-
# Skip elements that are inside a table (to avoid duplication with extract_tables)
|
| 179 |
-
if element.find_parent("table"):
|
| 180 |
-
continue
|
| 181 |
-
|
| 182 |
-
# Skip headers that match any table name (to avoid duplication with extract_tables)
|
| 183 |
-
if element.name in {"h1", "h2", "h3", "h4", "h5", "h6"}:
|
| 184 |
-
header_text_norm = _normalize_title(element.get_text(" ", strip=True))
|
| 185 |
-
if header_text_norm in table_names_normalized:
|
| 186 |
-
continue
|
| 187 |
-
|
| 188 |
-
# Skip list items that are exactly a table name (common for inline mini-TOCs within sections)
|
| 189 |
-
if element.name == "li":
|
| 190 |
-
li_text_norm = _normalize_title(element.get_text(" ", strip=True))
|
| 191 |
-
if li_text_norm in table_names_normalized:
|
| 192 |
-
continue
|
| 193 |
-
|
| 194 |
-
text = element.get_text(" ", strip=True)
|
| 195 |
-
if text: # Only include non-empty text
|
| 196 |
-
text_elements.append(text)
|
| 197 |
-
|
| 198 |
-
return "\n\n".join(text_elements)
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
def extract_tables(soup: BeautifulSoup) -> list[dict]:
|
| 202 |
-
"""Extract all HTML tables as dicts: {name, df}."""
|
| 203 |
-
content_root = soup.select_one("div.mw-parser-output") or soup
|
| 204 |
-
|
| 205 |
-
tables = []
|
| 206 |
-
for table_idx, table in enumerate(content_root.find_all("table")):
|
| 207 |
-
# Skip non-content tables (navboxes, sidebars, etc.)
|
| 208 |
-
classes = table.get("class", [])
|
| 209 |
-
if isinstance(classes, list) and any(
|
| 210 |
-
c.lower() in {"navbox", "vertical-navbox", "sidebar", "mbox", "metadata"}
|
| 211 |
-
for c in classes
|
| 212 |
-
):
|
| 213 |
-
continue
|
| 214 |
-
|
| 215 |
-
# Prefer explicit <caption>
|
| 216 |
-
caption_el = table.find("caption")
|
| 217 |
-
name = caption_el.get_text(" ", strip=True) if caption_el else None
|
| 218 |
-
|
| 219 |
-
# Fallback: nearest previous section header
|
| 220 |
-
if not name:
|
| 221 |
-
prev_header = table.find_previous(["h1", "h2", "h3", "h4", "h5", "h6"])
|
| 222 |
-
if prev_header:
|
| 223 |
-
name = prev_header.get_text(" ", strip=True)
|
| 224 |
-
|
| 225 |
-
# Fallback: class-based hints (e.g., infobox)
|
| 226 |
-
if not name:
|
| 227 |
-
if isinstance(classes, list) and any(c.lower() == "infobox" for c in classes):
|
| 228 |
-
name = "Infobox"
|
| 229 |
-
|
| 230 |
-
# Final fallback
|
| 231 |
-
if not name:
|
| 232 |
-
name = f"Table {table_idx + 1}"
|
| 233 |
-
|
| 234 |
-
try:
|
| 235 |
-
dfs = pd.read_html(StringIO(str(table)))
|
| 236 |
-
if len(dfs) == 1:
|
| 237 |
-
tables.append({"name": name, "df": dfs[0]})
|
| 238 |
-
else:
|
| 239 |
-
for part_idx, df in enumerate(dfs, start=1):
|
| 240 |
-
tables.append({"name": f"{name} (part {part_idx})", "df": df})
|
| 241 |
-
except ValueError:
|
| 242 |
-
continue
|
| 243 |
-
return tables
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
def format_for_llm(text: str, tables: list[dict], sections_to_exclude: list[str]) -> str:
|
| 247 |
-
"""Combine text + tables into a single string for LLM input."""
|
| 248 |
-
output = []
|
| 249 |
-
output.append("=== ARTICLE TEXT ===\n")
|
| 250 |
-
output.append(text)
|
| 251 |
-
|
| 252 |
-
excluded = {_normalize_title(s) for s in sections_to_exclude}
|
| 253 |
-
filtered_tables = [
|
| 254 |
-
t for t in tables if _normalize_title(t.get("name", "")) not in excluded
|
| 255 |
-
]
|
| 256 |
-
|
| 257 |
-
for i, t in enumerate(filtered_tables, start=1):
|
| 258 |
-
tname = t.get("name") or f"Table {i}"
|
| 259 |
-
df = t["df"]
|
| 260 |
-
output.append(f"\n\n=== TABLE {i}: {tname} ===\n")
|
| 261 |
-
output.append(df.to_markdown(index=False))
|
| 262 |
-
|
| 263 |
-
return "\n".join(output)
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
@tool
|
| 267 |
-
def wikipedia_summary(entity: str) -> dict:
|
| 268 |
-
"""
|
| 269 |
-
Search Wikipedia for a query and return a dictionary with the summary of the page and the url of the page.
|
| 270 |
-
Args:
|
| 271 |
-
entity: the entity being searched for and ALWAYS pass exactly the entity name (person/place/event/concept) with no qualifiers.
|
| 272 |
-
Returns:
|
| 273 |
-
A dictionary with the summary of the page and the url of the page.
|
| 274 |
-
"""
|
| 275 |
-
import wikipedia
|
| 276 |
-
summary_tool = wikipediaapi.Wikipedia(
|
| 277 |
-
user_agent=f"My research agent ({os.getenv('USER_EMAIL')})",
|
| 278 |
-
)
|
| 279 |
-
page = summary_tool.page(entity)
|
| 280 |
-
if not page.exists():
|
| 281 |
-
raise ValueError(f"No Wikipedia page found for '{entity}'. Try a different query.")
|
| 282 |
-
sections = [section._title for section in page.sections]
|
| 283 |
-
return {
|
| 284 |
-
"summary": f'''The sections inside the page are {", ".join(sections)} and the summary of the page is {page.summary}
|
| 285 |
-
''',
|
| 286 |
-
"url": wikipedia.page(pageid=page.pageid).url
|
| 287 |
-
}
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
@tool
|
| 291 |
-
def read_wikipedia_page(
|
| 292 |
-
url: str,
|
| 293 |
-
sections_to_exclude: list[str] = [
|
| 294 |
-
"External links",
|
| 295 |
-
"References",
|
| 296 |
-
"Further reading",
|
| 297 |
-
"See also",
|
| 298 |
-
"Notes",
|
| 299 |
-
]) -> str:
|
| 300 |
-
"""
|
| 301 |
-
Read a Wikipedia page and return a string with the text of the page.
|
| 302 |
-
Args:
|
| 303 |
-
url: The URL of the Wikipedia page to read.
|
| 304 |
-
sections_to_exclude: A list of sections to exclude from the page.
|
| 305 |
-
Returns:
|
| 306 |
-
A string with the text of the page.
|
| 307 |
-
"""
|
| 308 |
-
if "https://en.wikipedia.org/wiki/" not in url:
|
| 309 |
-
raise ValueError("URL is required")
|
| 310 |
-
# Fetch the page
|
| 311 |
-
html = fetch_wikipedia_page(url)
|
| 312 |
-
# Parse the page
|
| 313 |
-
soup = BeautifulSoup(html, "html.parser")
|
| 314 |
-
# Remove unwanted sections
|
| 315 |
-
_remove_sections_by_titles(soup, sections_to_exclude)
|
| 316 |
-
|
| 317 |
-
# Extract after pruning unwanted sections
|
| 318 |
-
text = extract_text(soup)
|
| 319 |
-
tables = extract_tables(soup)
|
| 320 |
-
|
| 321 |
-
# Combine
|
| 322 |
-
llm_ready = format_for_llm(text, tables, sections_to_exclude)
|
| 323 |
-
return llm_ready
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/youtube_tools.py
DELETED
|
@@ -1,112 +0,0 @@
|
|
| 1 |
-
|
| 2 |
-
import os
|
| 3 |
-
import subprocess
|
| 4 |
-
from yt_dlp import YoutubeDL
|
| 5 |
-
|
| 6 |
-
from smolagents.tools import tool
|
| 7 |
-
|
| 8 |
-
# Use FFmpeg to extract frames from the video
|
| 9 |
-
def extract_frames_with_ffmpeg(video_path: str, num_frames: int) -> [str]:
|
| 10 |
-
"""Extract frames from video using FFmpeg"""
|
| 11 |
-
if not os.path.exists(video_path):
|
| 12 |
-
raise FileNotFoundError(f"Video file not found: {video_path}")
|
| 13 |
-
|
| 14 |
-
# Get video duration using ffprobe
|
| 15 |
-
duration_cmd = [
|
| 16 |
-
'ffprobe', '-v', 'quiet', '-print_format', 'json',
|
| 17 |
-
'-show_format', video_path
|
| 18 |
-
]
|
| 19 |
-
|
| 20 |
-
try:
|
| 21 |
-
result = subprocess.run(duration_cmd, capture_output=True, text=True, check=True)
|
| 22 |
-
import json
|
| 23 |
-
metadata = json.loads(result.stdout)
|
| 24 |
-
duration = float(metadata['format']['duration'])
|
| 25 |
-
|
| 26 |
-
# Calculate time intervals for frame extraction
|
| 27 |
-
time_intervals = [duration * i / (num_frames + 1) for i in range(1, num_frames + 1)]
|
| 28 |
-
|
| 29 |
-
extracted_files = []
|
| 30 |
-
for i, time_pos in enumerate(time_intervals):
|
| 31 |
-
output_filename = f"{os.path.splitext(os.path.basename(video_path))[0]}_frame_{i+1:03d}.jpg"
|
| 32 |
-
|
| 33 |
-
# Extract frame at specific time
|
| 34 |
-
ffmpeg_cmd = [
|
| 35 |
-
'ffmpeg', '-i', video_path, '-ss', str(time_pos),
|
| 36 |
-
'-vframes', '1', '-q:v', '2', '-y', output_filename
|
| 37 |
-
]
|
| 38 |
-
|
| 39 |
-
subprocess.run(ffmpeg_cmd, capture_output=True, check=True)
|
| 40 |
-
extracted_files.append(output_filename)
|
| 41 |
-
|
| 42 |
-
return extracted_files
|
| 43 |
-
|
| 44 |
-
except subprocess.CalledProcessError as e:
|
| 45 |
-
print(f"Error running FFmpeg: {e}")
|
| 46 |
-
return []
|
| 47 |
-
except Exception as e:
|
| 48 |
-
print(f"Error: {e}")
|
| 49 |
-
return []
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
@tool
|
| 53 |
-
def download_youtube_url_audio(url: str) -> str:
|
| 54 |
-
"""
|
| 55 |
-
Download a YouTube video using the url, extract the audio and return the path to the downloaded file.
|
| 56 |
-
|
| 57 |
-
Args:
|
| 58 |
-
url (str): The URL of the YouTube video to download.
|
| 59 |
-
|
| 60 |
-
Returns:
|
| 61 |
-
str: The path to the downloaded audio file.
|
| 62 |
-
"""
|
| 63 |
-
ydl_audio_opts = {
|
| 64 |
-
'format': 'bestaudio/best',
|
| 65 |
-
'postprocessors': [{
|
| 66 |
-
'key': 'FFmpegExtractAudio',
|
| 67 |
-
'preferredcodec': 'mp3',
|
| 68 |
-
'preferredquality': '192',
|
| 69 |
-
}],
|
| 70 |
-
'quiet': True,
|
| 71 |
-
'no_verbose_header': True,
|
| 72 |
-
'no_warnings': True,
|
| 73 |
-
}
|
| 74 |
-
|
| 75 |
-
with YoutubeDL(ydl_audio_opts) as ydl:
|
| 76 |
-
file_path = ydl.extract_info(url)
|
| 77 |
-
|
| 78 |
-
return file_path['requested_downloads'][0]['filepath']
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
@tool
|
| 82 |
-
def download_youtube_url_images(url: str, num_images: int = 3) -> str:
|
| 83 |
-
"""
|
| 84 |
-
Download a YouTube video using the url, extract the frames and return the path to the downloaded files.
|
| 85 |
-
|
| 86 |
-
Args:
|
| 87 |
-
url (str): The URL of the YouTube video to download.
|
| 88 |
-
num_images (int): The number of images to download. The images are extracted from the video at regular intervals.
|
| 89 |
-
|
| 90 |
-
Returns:
|
| 91 |
-
str: The different paths to the downloaded frames, separated by newlines.
|
| 92 |
-
"""
|
| 93 |
-
# First, download the video
|
| 94 |
-
ydl_images_opts = {
|
| 95 |
-
'format': 'best[height<=720]', # Download video in reasonable quality
|
| 96 |
-
'outtmpl': '%(title)s.%(ext)s', # Save with title as filename
|
| 97 |
-
'quiet': True,
|
| 98 |
-
'no_verbose_header': True,
|
| 99 |
-
'no_warnings': True,
|
| 100 |
-
}
|
| 101 |
-
|
| 102 |
-
with YoutubeDL(ydl_images_opts) as ydl:
|
| 103 |
-
info = ydl.extract_info(url, download=True)
|
| 104 |
-
video_filepath = ydl.prepare_filename(info)
|
| 105 |
-
|
| 106 |
-
# Extract frames from the downloaded video
|
| 107 |
-
if os.path.exists(video_filepath):
|
| 108 |
-
extracted_frames = extract_frames_with_ffmpeg(video_filepath, num_images)
|
| 109 |
-
return "\n".join(extracted_frames)
|
| 110 |
-
|
| 111 |
-
return ""
|
| 112 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|