Skip to content

Commit 7ecbeaa

Browse files
Merge pull request #954 from MervinPraison/claude/issue-940-20250716-1247
fix: prevent premature termination in Ollama sequential tool execution
2 parents cc5e04a + 47cf9d4 commit 7ecbeaa

File tree

1 file changed

+21
-10
lines changed
  • src/praisonai-agents/praisonaiagents/llm

1 file changed

+21
-10
lines changed

src/praisonai-agents/praisonaiagents/llm/llm.py

Lines changed: 21 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,9 @@ class LLM:
9393
# Ollama-specific prompt constants
9494
OLLAMA_TOOL_USAGE_PROMPT = "Please analyze the request and use the available tools to help answer the question. Start by identifying what information you need."
9595
OLLAMA_FINAL_ANSWER_PROMPT = "Based on the tool results above, please provide the final answer to the original question."
96+
97+
# Ollama iteration threshold for summary generation
98+
OLLAMA_SUMMARY_ITERATION_THRESHOLD = 3
9699

97100
def _log_llm_config(self, method_name: str, **config):
98101
"""Centralized debug logging for LLM configuration and parameters.
@@ -827,6 +830,7 @@ def get_response(
827830
iteration_count = 0
828831
final_response_text = ""
829832
stored_reasoning_content = None # Store reasoning content from tool execution
833+
accumulated_tool_results = [] # Store all tool results across iterations
830834

831835
while iteration_count < max_iterations:
832836
try:
@@ -1070,7 +1074,7 @@ def get_response(
10701074
})
10711075

10721076
should_continue = False
1073-
tool_results = [] # Store all tool results
1077+
tool_results = [] # Store current iteration tool results
10741078
for tool_call in tool_calls:
10751079
# Handle both object and dict access patterns
10761080
is_ollama = self._is_ollama_provider()
@@ -1084,6 +1088,7 @@ def get_response(
10841088
tool_result = execute_tool_fn(function_name, arguments)
10851089
logging.debug(f"[TOOL_EXEC_DEBUG] Tool execution result: {tool_result}")
10861090
tool_results.append(tool_result) # Store the result
1091+
accumulated_tool_results.append(tool_result) # Accumulate across iterations
10871092

10881093
if verbose:
10891094
display_message = f"Agent {agent_name} called function '{function_name}' with arguments: {arguments}\n"
@@ -1127,10 +1132,12 @@ def get_response(
11271132
break
11281133

11291134
# Special handling for Ollama to prevent infinite loops
1130-
tool_summary = self._generate_ollama_tool_summary(tool_results, response_text)
1131-
if tool_summary:
1132-
final_response_text = tool_summary
1133-
break
1135+
# Only generate summary after multiple iterations to allow sequential execution
1136+
if iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
1137+
tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
1138+
if tool_summary:
1139+
final_response_text = tool_summary
1140+
break
11341141

11351142
# Otherwise, continue the loop to check if more tools are needed
11361143
iteration_count += 1
@@ -1579,6 +1586,7 @@ async def get_response_async(
15791586
iteration_count = 0
15801587
final_response_text = ""
15811588
stored_reasoning_content = None # Store reasoning content from tool execution
1589+
accumulated_tool_results = [] # Store all tool results across iterations
15821590

15831591
while iteration_count < max_iterations:
15841592
response_text = ""
@@ -1749,7 +1757,7 @@ async def get_response_async(
17491757
"tool_calls": serializable_tool_calls
17501758
})
17511759

1752-
tool_results = [] # Store all tool results
1760+
tool_results = [] # Store current iteration tool results
17531761
for tool_call in tool_calls:
17541762
# Handle both object and dict access patterns
17551763
is_ollama = self._is_ollama_provider()
@@ -1761,6 +1769,7 @@ async def get_response_async(
17611769

17621770
tool_result = await execute_tool_fn(function_name, arguments)
17631771
tool_results.append(tool_result) # Store the result
1772+
accumulated_tool_results.append(tool_result) # Accumulate across iterations
17641773

17651774
if verbose:
17661775
display_message = f"Agent {agent_name} called function '{function_name}' with arguments: {arguments}\n"
@@ -1894,10 +1903,12 @@ async def get_response_async(
18941903
break
18951904

18961905
# Special handling for Ollama to prevent infinite loops
1897-
tool_summary = self._generate_ollama_tool_summary(tool_results, response_text)
1898-
if tool_summary:
1899-
final_response_text = tool_summary
1900-
break
1906+
# Only generate summary after multiple iterations to allow sequential execution
1907+
if iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
1908+
tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
1909+
if tool_summary:
1910+
final_response_text = tool_summary
1911+
break
19011912

19021913
# Continue the loop to check if more tools are needed
19031914
iteration_count += 1

0 commit comments

Comments
 (0)