@@ -19,6 +19,7 @@ import {
1919} from 'ai' ;
2020import { google , GoogleGenerativeAIProviderOptions } from '@ai-sdk/google' ;
2121import { anthropic , AnthropicProviderOptions } from '@ai-sdk/anthropic' ;
22+ import { openai , OpenAIResponsesProviderOptions } from '@ai-sdk/openai' ;
2223import z from 'zod' ;
2324import { callWithTimeout } from '../utils/timeout.js' ;
2425import { combineAbortSignals } from '../utils/abort-signal.js' ;
@@ -32,6 +33,10 @@ const SUPPORTED_MODELS = [
3233 'gemini-2.5-flash' ,
3334 'gemini-2.5-pro' ,
3435 'gemini-3-pro-preview' ,
36+ 'gpt-5.1-no-thinking' ,
37+ 'gpt-5.1-thinking-low' ,
38+ 'gpt-5.1-thinking-high' ,
39+ 'gpt-5.1-thinking-medium' ,
3540] as const ;
3641
3742// Increased to a very high value as we rely on an actual timeout
@@ -133,9 +138,13 @@ export class AiSDKRunner implements LlmRunner {
133138 ) ;
134139 }
135140
136- private async _getAiSdkModelOptions (
137- request : LocalLlmGenerateTextRequestOptions ,
138- ) : Promise < { model : LanguageModel ; providerOptions : { } } > {
141+ private async _getAiSdkModelOptions ( request : LocalLlmGenerateTextRequestOptions ) : Promise < {
142+ model : LanguageModel ;
143+ providerOptions :
144+ | { anthropic : AnthropicProviderOptions }
145+ | { google : GoogleGenerativeAIProviderOptions }
146+ | { openai : OpenAIResponsesProviderOptions } ;
147+ } > {
139148 const modelName = request . model as ( typeof SUPPORTED_MODELS ) [ number ] ;
140149 switch ( modelName ) {
141150 case 'claude-opus-4.1-no-thinking' :
@@ -144,9 +153,11 @@ export class AiSDKRunner implements LlmRunner {
144153 return {
145154 model : anthropic ( 'claude-opus-4-1' ) ,
146155 providerOptions : {
147- sendReasoning : thinkingEnabled ,
148- thinking : { type : thinkingEnabled ? 'enabled' : 'disabled' } ,
149- } satisfies AnthropicProviderOptions ,
156+ anthropic : {
157+ sendReasoning : thinkingEnabled ,
158+ thinking : { type : thinkingEnabled ? 'enabled' : 'disabled' } ,
159+ } satisfies AnthropicProviderOptions ,
160+ } ,
150161 } ;
151162 }
152163 case 'claude-sonnet-4.5-no-thinking' :
@@ -155,9 +166,11 @@ export class AiSDKRunner implements LlmRunner {
155166 return {
156167 model : anthropic ( 'claude-sonnet-4-5' ) ,
157168 providerOptions : {
158- sendReasoning : thinkingEnabled ,
159- thinking : { type : thinkingEnabled ? 'enabled' : 'disabled' } ,
160- } satisfies AnthropicProviderOptions ,
169+ anthropic : {
170+ sendReasoning : thinkingEnabled ,
171+ thinking : { type : thinkingEnabled ? 'enabled' : 'disabled' } ,
172+ } satisfies AnthropicProviderOptions ,
173+ } ,
161174 } ;
162175 }
163176 case 'gemini-2.5-flash-lite' :
@@ -167,10 +180,33 @@ export class AiSDKRunner implements LlmRunner {
167180 return {
168181 model : google ( modelName ) ,
169182 providerOptions : {
170- thinkingConfig : {
171- includeThoughts : request . thinkingConfig ?. includeThoughts ,
172- } ,
173- } satisfies GoogleGenerativeAIProviderOptions ,
183+ google : {
184+ thinkingConfig : {
185+ includeThoughts : request . thinkingConfig ?. includeThoughts ,
186+ } ,
187+ } satisfies GoogleGenerativeAIProviderOptions ,
188+ } ,
189+ } ;
190+ case 'gpt-5.1-no-thinking' :
191+ case 'gpt-5.1-thinking-low' :
192+ case 'gpt-5.1-thinking-medium' :
193+ case 'gpt-5.1-thinking-high' :
194+ let reasoningEffort : string = 'none' ;
195+ if ( modelName === 'gpt-5.1-thinking-high' ) {
196+ reasoningEffort = 'high' ;
197+ } else if ( modelName === 'gpt-5.1-thinking-medium' ) {
198+ reasoningEffort = 'medium' ;
199+ } else if ( modelName === 'gpt-5.1-thinking-low' ) {
200+ reasoningEffort = 'low' ;
201+ }
202+ return {
203+ model : openai ( 'gpt-5.1' ) ,
204+ providerOptions : {
205+ openai : {
206+ reasoningEffort,
207+ reasoningSummary : 'detailed' ,
208+ } satisfies OpenAIResponsesProviderOptions ,
209+ } ,
174210 } ;
175211 default :
176212 throw new Error ( `Unexpected model in AI SDK runner: ${ request . model } .` ) ;
0 commit comments