-
Notifications
You must be signed in to change notification settings - Fork 2.5k
Open
Description
i am building a mcp server using gemini and tavily code, and i have this issue when running the tool by the mcp inspectors:
"Error executing tool run_with_tavily: Query is missing."
i wonder if the error is from one of the agents itself ?
this is how i define the tools
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
from mcp.server.fastmcp import FastMCP
from dotenv import load_dotenv
from knowledge_storm.lm import GoogleModel
from knowledge_storm import (
STORMWikiRunnerArguments,
STORMWikiRunner,
STORMWikiLMConfigs,
)
from knowledge_storm.rm import (
TavilySearchRM,
)
import os
mcp = FastMCP(
name="deep_research_agent",
host="0.0.0.0",
port=8050
)
@mcp.tool(
name="run_with_tavily",
description=(
"Researches a topic using Tavily, generates outlines and articles with Gemini models, "
"then polishes and saves results to the specified directory."
)
)
def run_with_tavily(
topic: str,
output_dir: str = "./results/gemini",
max_conv_turn: int = 3,
max_perspective: int = 3,
search_top_k: int = 3,
max_thread_num: int = 3,
do_research: bool = True,
do_generate_outline: bool = True,
do_generate_article: bool = True,
do_polish_article: bool = True,
) -> dict:
"""
Args:
output_dir: Directory to store generated artifacts
max_conv_turn: Number of conversational turns for researcher
max_perspective: Max perspectives per turn
search_top_k: Top-K results to retrieve from Tavily
max_thread_num: Parallel threads for retrieval
do_research: Whether to perform initial research
do_generate_outline: Whether to generate an outline
do_generate_article: Whether to write the article
do_polish_article: Whether to polish the article
Returns:
Summary dictionary containing output path and basic status
"""
# Load API keys from .env
load_dotenv()
gemini_kwargs = {
"api_key": os.getenv("GOOGLE_API_KEY"),
"temperature": 1.0,
"top_p": 0.9,
}
# Configure language models
lm_configs = STORMWikiLMConfigs()
lm_configs.set_conv_simulator_lm(
GoogleModel("models/gemini-2.0-flash-lite", max_tokens=500, **gemini_kwargs)
)
lm_configs.set_question_asker_lm(
GoogleModel("models/gemini-1.5-flash", max_tokens=500, **gemini_kwargs)
)
lm_configs.set_outline_gen_lm(
GoogleModel("models/gemini-2.0-flash", max_tokens=400, **gemini_kwargs)
)
lm_configs.set_article_gen_lm(
GoogleModel("models/gemini-2.0-flash-lite", max_tokens=700, **gemini_kwargs)
)
lm_configs.set_article_polish_lm(
GoogleModel("models/gemini-2.0-flash-lite", max_tokens=4000, **gemini_kwargs)
)
# Runner arguments
engine_args = STORMWikiRunnerArguments(
output_dir=output_dir,
max_conv_turn=max_conv_turn,
max_perspective=max_perspective,
search_top_k=search_top_k,
max_thread_num=max_thread_num,
)
# Initialize Tavily retriever
rm = TavilySearchRM(
tavily_search_api_key=os.getenv("TAVILY_API_KEY"),
k=search_top_k,
include_raw_content=True,
)
# Create runner
runner = STORMWikiRunner(engine_args, lm_configs, rm)
# Prompt user for topic and run
runner.run(
topic=topic,
do_research=do_research,
do_generate_outline=do_generate_outline,
do_generate_article=do_generate_article,
do_polish_article=do_polish_article,
)
runner.post_run()
runner.summary()
# Return output information for the agent
return {
"status": "completed",
"output_dir": output_dir,
"topic": topic,
"research": do_research,
"outline": do_generate_outline,
"article_generated": do_generate_article,
"article_polished": do_polish_article
}
if __name__ == "__main__":
transport = "stdio"
if transport == "stdio":
print("Running server with stdio transport")
mcp.run(transport="stdio")
elif transport == "sse":
print("Running server with SSE transport")
mcp.run(transport="sse")
else:
raise ValueError(f"Unknown transport: {transport}")Metadata
Metadata
Assignees
Labels
No labels