Skip to content

Commit 81ffa82

Browse files
committed
feat: support gpt-5.1 with various thinking modes in AI SDK runner
* Adds support for various GPT 5.1 thinking mode variations in the AI SDK runner! * Fixes an issue where the old provider options might not be picked up. The AI SDK API is unfortunately not super type safe here; but I tried making it a bit more safer from our side.
1 parent dedae61 commit 81ffa82

File tree

3 files changed

+65
-13
lines changed

3 files changed

+65
-13
lines changed

package.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@
5353
"dependencies": {
5454
"@ai-sdk/anthropic": "^2.0.45",
5555
"@ai-sdk/google": "^2.0.39",
56+
"@ai-sdk/openai": "^2.0.71",
5657
"@anthropic-ai/sdk": "^0.68.0",
5758
"@axe-core/puppeteer": "^4.10.2",
5859
"@genkit-ai/compat-oai": "1.23.0",

pnpm-lock.yaml

Lines changed: 15 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

runner/codegen/ai-sdk-runner.ts

Lines changed: 49 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ import {
1919
} from 'ai';
2020
import {google, GoogleGenerativeAIProviderOptions} from '@ai-sdk/google';
2121
import {anthropic, AnthropicProviderOptions} from '@ai-sdk/anthropic';
22+
import {openai, OpenAIResponsesProviderOptions} from '@ai-sdk/openai';
2223
import z from 'zod';
2324
import {callWithTimeout} from '../utils/timeout.js';
2425
import {combineAbortSignals} from '../utils/abort-signal.js';
@@ -32,6 +33,10 @@ const SUPPORTED_MODELS = [
3233
'gemini-2.5-flash',
3334
'gemini-2.5-pro',
3435
'gemini-3-pro-preview',
36+
'gpt-5.1-no-thinking',
37+
'gpt-5.1-thinking-low',
38+
'gpt-5.1-thinking-high',
39+
'gpt-5.1-thinking-medium',
3540
] as const;
3641

3742
// Increased to a very high value as we rely on an actual timeout
@@ -133,9 +138,13 @@ export class AiSDKRunner implements LlmRunner {
133138
);
134139
}
135140

136-
private async _getAiSdkModelOptions(
137-
request: LocalLlmGenerateTextRequestOptions,
138-
): Promise<{model: LanguageModel; providerOptions: {}}> {
141+
private async _getAiSdkModelOptions(request: LocalLlmGenerateTextRequestOptions): Promise<{
142+
model: LanguageModel;
143+
providerOptions:
144+
| {anthropic: AnthropicProviderOptions}
145+
| {google: GoogleGenerativeAIProviderOptions}
146+
| {openai: OpenAIResponsesProviderOptions};
147+
}> {
139148
const modelName = request.model as (typeof SUPPORTED_MODELS)[number];
140149
switch (modelName) {
141150
case 'claude-opus-4.1-no-thinking':
@@ -144,9 +153,11 @@ export class AiSDKRunner implements LlmRunner {
144153
return {
145154
model: anthropic('claude-opus-4-1'),
146155
providerOptions: {
147-
sendReasoning: thinkingEnabled,
148-
thinking: {type: thinkingEnabled ? 'enabled' : 'disabled'},
149-
} satisfies AnthropicProviderOptions,
156+
anthropic: {
157+
sendReasoning: thinkingEnabled,
158+
thinking: {type: thinkingEnabled ? 'enabled' : 'disabled'},
159+
} satisfies AnthropicProviderOptions,
160+
},
150161
};
151162
}
152163
case 'claude-sonnet-4.5-no-thinking':
@@ -155,9 +166,11 @@ export class AiSDKRunner implements LlmRunner {
155166
return {
156167
model: anthropic('claude-sonnet-4-5'),
157168
providerOptions: {
158-
sendReasoning: thinkingEnabled,
159-
thinking: {type: thinkingEnabled ? 'enabled' : 'disabled'},
160-
} satisfies AnthropicProviderOptions,
169+
anthropic: {
170+
sendReasoning: thinkingEnabled,
171+
thinking: {type: thinkingEnabled ? 'enabled' : 'disabled'},
172+
} satisfies AnthropicProviderOptions,
173+
},
161174
};
162175
}
163176
case 'gemini-2.5-flash-lite':
@@ -167,10 +180,33 @@ export class AiSDKRunner implements LlmRunner {
167180
return {
168181
model: google(modelName),
169182
providerOptions: {
170-
thinkingConfig: {
171-
includeThoughts: request.thinkingConfig?.includeThoughts,
172-
},
173-
} satisfies GoogleGenerativeAIProviderOptions,
183+
google: {
184+
thinkingConfig: {
185+
includeThoughts: request.thinkingConfig?.includeThoughts,
186+
},
187+
} satisfies GoogleGenerativeAIProviderOptions,
188+
},
189+
};
190+
case 'gpt-5.1-no-thinking':
191+
case 'gpt-5.1-thinking-low':
192+
case 'gpt-5.1-thinking-medium':
193+
case 'gpt-5.1-thinking-high':
194+
let reasoningEffort: string = 'none';
195+
if (modelName === 'gpt-5.1-thinking-high') {
196+
reasoningEffort = 'high';
197+
} else if (modelName === 'gpt-5.1-thinking-medium') {
198+
reasoningEffort = 'medium';
199+
} else if (modelName === 'gpt-5.1-thinking-low') {
200+
reasoningEffort = 'low';
201+
}
202+
return {
203+
model: openai('gpt-5.1'),
204+
providerOptions: {
205+
openai: {
206+
reasoningEffort,
207+
reasoningSummary: 'detailed',
208+
} satisfies OpenAIResponsesProviderOptions,
209+
},
174210
};
175211
default:
176212
throw new Error(`Unexpected model in AI SDK runner: ${request.model}.`);

0 commit comments

Comments
 (0)