Skip to content

Commit b0cb425

Browse files
committed
Fix llm example tests
Nodes with errors are available in results, just as errors
1 parent e0b5292 commit b0cb425

File tree

4 files changed

+32
-23
lines changed

4 files changed

+32
-23
lines changed

asimov/graph/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -535,6 +535,8 @@ async def _run_task(self, task: Task) -> None:
535535
for (i, _), result in zip(tasks, results):
536536
node_name = parallel_group["nodes"][i]
537537

538+
self.node_results[node_name] = result
539+
538540
if not self.is_success(result):
539541
dependent_chain = self.get_dependent_chains(node_name)
540542
failed_chains.add(dependent_chain)
@@ -549,8 +551,6 @@ async def _run_task(self, task: Task) -> None:
549551
)
550552
continue
551553

552-
self.node_results[node_name] = result
553-
554554
self.execution_state.mark_executed(
555555
self.execution_state.execution_index, i
556556
)

examples/llm_agent.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,11 @@ async def process(
8080
timeout=30.0,
8181
)
8282

83-
response_content = json.loads(response)["steps"]
83+
try:
84+
loaded_response = json.loads(response)
85+
response_content = loaded_response["steps"]
86+
except json.JSONDecodeError as e:
87+
return {"status": "error", "result": str(e)}
8488

8589
print(f"{self.name}: Received plan from LLM")
8690
except asyncio.TimeoutError:

tests/test_agent.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,7 @@ async def test_error_handling(simple_agent, mock_cache):
266266
assert "NodeB" in result["failed_chains"][0]
267267
assert "Executed A" in str(result["result"]["NodeA"])
268268
assert "Executed C" in str(result["result"]["NodeC"])
269-
assert "NodeB" not in result["result"]
269+
assert "NodeB" in result["result"]
270270
assert "NodeD" not in result["result"]
271271
assert error["status"] == "error" and "NodeB" in error["node"]
272272

@@ -744,7 +744,7 @@ async def test_caching_results(simple_agent, mock_cache):
744744
result = await mock_cache.get_message(simple_agent.output_mailbox)
745745
assert "NodeA" in result["result"]
746746
assert "NodeB" in result["result"]
747-
assert "NodeC" not in result["result"]
747+
assert "NodeC" in result["result"]
748748

749749

750750
@pytest.mark.asyncio

tests/test_llm_agent.py

Lines changed: 23 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -20,26 +20,21 @@
2020
from examples.llm_agent import LLMPlannerModule, LLMExecutorModule, LLMFlowControlModule
2121

2222

23-
class MockAnthropicResponse:
24-
def __init__(self, content):
25-
self.choices = [MagicMock(message=MagicMock(content=content))]
26-
27-
2823
class MockAnthropicClient:
2924
"""Mock client for testing LLM interactions."""
3025

3126
def __init__(self, responses=None):
3227
self.responses = responses or {}
33-
self.default_response = MockAnthropicResponse("Default mock response")
28+
self.default_response = {}
3429

35-
async def get_generation(self, messages: List[Any]) -> MockAnthropicResponse:
30+
async def get_generation(self, messages: List[Any]) -> str:
3631
# Extract prompt from messages
3732
prompt = messages[-1].content if messages else ""
3833
# Return predefined response if available, otherwise default
3934
for key, response in self.responses.items():
4035
if key.lower() in prompt.lower():
41-
return MockAnthropicResponse(response)
42-
return self.default_response
36+
return response
37+
return json.dumps(self.default_response)
4338

4439

4540
@pytest.fixture(autouse=True)
@@ -50,6 +45,7 @@ def setup_env():
5045
if "ANTHROPIC_API_KEY" in os.environ:
5146
del os.environ["ANTHROPIC_API_KEY"]
5247

48+
5349
@pytest.fixture
5450
def mock_cache():
5551
return MockRedisCache()
@@ -59,7 +55,7 @@ def mock_cache():
5955
def mock_anthropic_client():
6056
return MockAnthropicClient(
6157
{
62-
"Create a step-by-step plan": '[{"description": "Research AI agents", "requirements": "Access to documentation", "validation": "Comprehensive notes available"}, {"description": "Write introduction", "requirements": "Research notes", "validation": "Clear introduction exists"}]',
58+
"Create a step-by-step plan": '{"steps": [{"description": "Research AI agents", "requirements": "Access to documentation", "validation": "Comprehensive notes available"}, {"description": "Write introduction", "requirements": "Research notes", "validation": "Clear introduction exists"}]}',
6359
"Execute this step": "Step executed successfully with the following results:\n1. Actions: Researched AI agents\n2. Outcome: Comprehensive notes created\n3. Output: 5 pages of detailed notes",
6460
"Evaluate if the step": "success - all validation criteria met",
6561
"Analyze the execution history": "Analysis complete. Decision: continue - execution is proceeding as expected",
@@ -145,7 +141,7 @@ async def test_llm_planning(llm_agent, mock_cache):
145141
assert "Plan created successfully" in result["results"][0]["result"]
146142

147143
# Verify plan was stored in cache
148-
plan = json.loads(await mock_cache.get("plan"))
144+
plan = await mock_cache.get("plan")
149145

150146
print(plan)
151147
assert isinstance(plan, list)
@@ -194,7 +190,7 @@ async def test_llm_execution(llm_agent, mock_cache):
194190
execution_history = await mock_cache.get("execution_history")
195191
assert execution_history is not None
196192
assert len(execution_history) == 1 # Should have one entry after first execution
197-
193+
198194
history_entry = execution_history[0]
199195
assert "step" in history_entry
200196
assert "execution_result" in history_entry
@@ -250,16 +246,23 @@ async def test_missing_api_key():
250246
"""Test that modules properly handle missing API key."""
251247
if "ANTHROPIC_API_KEY" in os.environ:
252248
del os.environ["ANTHROPIC_API_KEY"]
253-
254-
with pytest.raises(ValueError, match="ANTHROPIC_API_KEY environment variable must be set"):
249+
250+
with pytest.raises(
251+
ValueError, match="ANTHROPIC_API_KEY environment variable must be set"
252+
):
255253
LLMPlannerModule()
256-
257-
with pytest.raises(ValueError, match="ANTHROPIC_API_KEY environment variable must be set"):
254+
255+
with pytest.raises(
256+
ValueError, match="ANTHROPIC_API_KEY environment variable must be set"
257+
):
258258
LLMExecutorModule()
259-
260-
with pytest.raises(ValueError, match="ANTHROPIC_API_KEY environment variable must be set"):
259+
260+
with pytest.raises(
261+
ValueError, match="ANTHROPIC_API_KEY environment variable must be set"
262+
):
261263
LLMFlowControlModule()
262264

265+
263266
@pytest.mark.asyncio
264267
async def test_error_handling(llm_agent, mock_cache):
265268
"""Test error handling with problematic LLM responses."""
@@ -287,6 +290,8 @@ async def test_error_handling(llm_agent, mock_cache):
287290

288291
# Verify error handling in results
289292
planner_result = llm_agent.node_results.get("planner", {})
293+
294+
print(llm_agent.node_results)
290295
assert planner_result.get("status") in ["success", "error"]
291296

292297
# If planning succeeded despite invalid JSON, executor should handle the error

0 commit comments

Comments
 (0)