@@ -827,6 +827,7 @@ def get_response(
827827 iteration_count = 0
828828 final_response_text = ""
829829 stored_reasoning_content = None # Store reasoning content from tool execution
830+ accumulated_tool_results = [] # Store all tool results across iterations
830831
831832 while iteration_count < max_iterations :
832833 try :
@@ -1052,7 +1053,7 @@ def get_response(
10521053 })
10531054
10541055 should_continue = False
1055- tool_results = [] # Store all tool results
1056+ tool_results = [] # Store current iteration tool results
10561057 for tool_call in tool_calls :
10571058 # Handle both object and dict access patterns
10581059 is_ollama = self ._is_ollama_provider ()
@@ -1066,6 +1067,7 @@ def get_response(
10661067 tool_result = execute_tool_fn (function_name , arguments )
10671068 logging .debug (f"[TOOL_EXEC_DEBUG] Tool execution result: { tool_result } " )
10681069 tool_results .append (tool_result ) # Store the result
1070+ accumulated_tool_results .append (tool_result ) # Accumulate across iterations
10691071
10701072 if verbose :
10711073 display_message = f"Agent { agent_name } called function '{ function_name } ' with arguments: { arguments } \n "
@@ -1111,7 +1113,7 @@ def get_response(
11111113 # Special handling for Ollama to prevent infinite loops
11121114 # Only generate summary after multiple iterations to allow sequential execution
11131115 if iteration_count >= 3 :
1114- tool_summary = self ._generate_ollama_tool_summary (tool_results , response_text )
1116+ tool_summary = self ._generate_ollama_tool_summary (accumulated_tool_results , response_text )
11151117 if tool_summary :
11161118 final_response_text = tool_summary
11171119 break
@@ -1545,6 +1547,7 @@ async def get_response_async(
15451547 iteration_count = 0
15461548 final_response_text = ""
15471549 stored_reasoning_content = None # Store reasoning content from tool execution
1550+ accumulated_tool_results = [] # Store all tool results across iterations
15481551
15491552 while iteration_count < max_iterations :
15501553 response_text = ""
@@ -1715,7 +1718,7 @@ async def get_response_async(
17151718 "tool_calls" : serializable_tool_calls
17161719 })
17171720
1718- tool_results = [] # Store all tool results
1721+ tool_results = [] # Store current iteration tool results
17191722 for tool_call in tool_calls :
17201723 # Handle both object and dict access patterns
17211724 is_ollama = self ._is_ollama_provider ()
@@ -1727,6 +1730,7 @@ async def get_response_async(
17271730
17281731 tool_result = await execute_tool_fn (function_name , arguments )
17291732 tool_results .append (tool_result ) # Store the result
1733+ accumulated_tool_results .append (tool_result ) # Accumulate across iterations
17301734
17311735 if verbose :
17321736 display_message = f"Agent { agent_name } called function '{ function_name } ' with arguments: { arguments } \n "
@@ -1862,7 +1866,7 @@ async def get_response_async(
18621866 # Special handling for Ollama to prevent infinite loops
18631867 # Only generate summary after multiple iterations to allow sequential execution
18641868 if iteration_count >= 3 :
1865- tool_summary = self ._generate_ollama_tool_summary (tool_results , response_text )
1869+ tool_summary = self ._generate_ollama_tool_summary (accumulated_tool_results , response_text )
18661870 if tool_summary :
18671871 final_response_text = tool_summary
18681872 break
0 commit comments