Skip to content

Commit c36a1ad

Browse files
Backport: feat(provider/bedrock): Support Nova 2 extended reasoning maxReasoningEffort field (#10846)
This is an automated backport of #10839 to the release-v5.0 branch. FYI @R-Taneja --------- Co-authored-by: Rohan Taneja <47066511+R-Taneja@users.noreply.github.com>
1 parent cae5d8f commit c36a1ad

File tree

5 files changed

+176
-15
lines changed

5 files changed

+176
-15
lines changed
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
'@ai-sdk/amazon-bedrock': patch
3+
---
4+
5+
feat(provider/bedrock): Support Nova 2 extended reasoning `maxReasoningEffort` field

content/providers/01-ai-sdk-providers/08-amazon-bedrock.mdx

Lines changed: 22 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -416,16 +416,18 @@ console.log(
416416

417417
## Reasoning
418418

419-
Amazon Bedrock has reasoning support for the `claude-3-7-sonnet-20250219` model.
419+
Amazon Bedrock supports model creator-specific reasoning features:
420420

421-
You can enable it using the `reasoningConfig` provider option and specifying a thinking budget in tokens (minimum: `1024`, maximum: `64000`).
421+
- Anthropic (e.g. `claude-3-7-sonnet-20250219`): enable via the `reasoningConfig` provider option and specifying a thinking budget in tokens (minimum: `1024`, maximum: `64000`).
422+
- Amazon (e.g. `us.amazon.nova-2-lite-v1:0`): enable via the `reasoningConfig` provider option and specifying a maximum reasoning effort level (`'low' | 'medium' | 'high'`).
422423

423424
```ts
424425
import { bedrock } from '@ai-sdk/amazon-bedrock';
425426
import { generateText } from 'ai';
426427

427-
const { text, reasoning, reasoningDetails } = await generateText({
428-
model: bedrock('us.anthropic.claude-3-7-sonnet-20250219-v1:0'),
428+
// Anthropic example
429+
const anthropicResult = await generateText({
430+
model: bedrock('us.anthropclaude-3-7-sonnet-20250219-v1:0'),
429431
prompt: 'How many people will live in the world in 2040?',
430432
providerOptions: {
431433
bedrock: {
@@ -434,9 +436,22 @@ const { text, reasoning, reasoningDetails } = await generateText({
434436
},
435437
});
436438

437-
console.log(reasoning); // reasoning text
438-
console.log(reasoningDetails); // reasoning details including redacted reasoning
439-
console.log(text); // text response
439+
console.log(anthropicResult.reasoning); // reasoning text
440+
console.log(anthropicResult.text); // text response
441+
442+
// Nova 2 example
443+
const amazonResult = await generateText({
444+
model: bedrock('us.amazon.nova-2-lite-v1:0'),
445+
prompt: 'How many people will live in the world in 2040?',
446+
providerOptions: {
447+
bedrock: {
448+
reasoningConfig: { type: 'enabled', maxReasoningEffort: 'medium' },
449+
},
450+
},
451+
});
452+
453+
console.log(amazonResult.reasoning); // reasoning text
454+
console.log(amazonResult.text); // text response
440455
```
441456

442457
See [AI SDK UI: Chatbot](/docs/ai-sdk-ui/chatbot#reasoning) for more details

packages/amazon-bedrock/src/bedrock-chat-language-model.test.ts

Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,11 @@ const anthropicGenerateUrl = `${baseUrl}/model/${encodeURIComponent(
8282
anthropicModelId,
8383
)}/converse`;
8484

85+
const novaModelId = 'us.amazon.nova-2-lite-v1:0';
86+
const novaGenerateUrl = `${baseUrl}/model/${encodeURIComponent(
87+
novaModelId,
88+
)}/converse`;
89+
8590
const server = createTestServer({
8691
[generateUrl]: {},
8792
[streamUrl]: {
@@ -92,6 +97,7 @@ const server = createTestServer({
9297
},
9398
// Configure the server for the Anthropic model from the start
9499
[anthropicGenerateUrl]: {},
100+
[novaGenerateUrl]: {},
95101
});
96102

97103
function prepareJsonFixtureResponse(filename: string) {
@@ -138,6 +144,13 @@ const model = new BedrockChatLanguageModel(modelId, {
138144
generateId: () => 'test-id',
139145
});
140146

147+
const novaModel = new BedrockChatLanguageModel(novaModelId, {
148+
baseUrl: () => baseUrl,
149+
headers: {},
150+
fetch: fakeFetchWithAuth,
151+
generateId: () => 'test-id',
152+
});
153+
141154
let mockOptions: { success: boolean; errorValue?: any } = { success: true };
142155

143156
describe('doStream', () => {
@@ -2162,6 +2175,40 @@ describe('doStream', () => {
21622175
]
21632176
`);
21642177
});
2178+
2179+
it('should warn when Anthropic model receives maxReasoningEffort in stream', async () => {
2180+
setupMockEventStreamHandler();
2181+
server.urls[streamUrl].response = {
2182+
type: 'stream-chunks',
2183+
chunks: [
2184+
JSON.stringify({
2185+
messageStop: {
2186+
stopReason: 'stop_sequence',
2187+
},
2188+
}) + '\n',
2189+
],
2190+
};
2191+
2192+
const result = await model.doStream({
2193+
prompt: TEST_PROMPT,
2194+
includeRawChunks: false,
2195+
providerOptions: {
2196+
bedrock: {
2197+
reasoningConfig: {
2198+
type: 'enabled',
2199+
maxReasoningEffort: 'medium',
2200+
},
2201+
},
2202+
},
2203+
});
2204+
2205+
await convertReadableStreamToArray(result.stream);
2206+
2207+
const requestBody = await server.calls[0].requestBodyJson;
2208+
expect(
2209+
requestBody.additionalModelRequestFields?.reasoningConfig,
2210+
).toBeUndefined();
2211+
});
21652212
});
21662213

21672214
describe('doGenerate', () => {
@@ -2917,6 +2964,71 @@ describe('doGenerate', () => {
29172964
});
29182965
});
29192966

2967+
it('maps maxReasoningEffort for Nova without thinking (generate)', async () => {
2968+
server.urls[novaGenerateUrl].response = {
2969+
type: 'json-value',
2970+
body: {
2971+
output: {
2972+
message: { content: [{ text: 'Hello' }], role: 'assistant' },
2973+
},
2974+
stopReason: 'stop_sequence',
2975+
usage: { inputTokens: 1, outputTokens: 1, totalTokens: 2 },
2976+
},
2977+
};
2978+
2979+
await novaModel.doGenerate({
2980+
prompt: TEST_PROMPT,
2981+
providerOptions: {
2982+
bedrock: {
2983+
reasoningConfig: {
2984+
type: 'enabled',
2985+
maxReasoningEffort: 'medium',
2986+
budgetTokens: 2048,
2987+
},
2988+
},
2989+
},
2990+
});
2991+
2992+
const requestBody = await server.calls[0].requestBodyJson;
2993+
expect(requestBody).toMatchObject({
2994+
additionalModelRequestFields: {
2995+
reasoningConfig: {
2996+
type: 'enabled',
2997+
maxReasoningEffort: 'medium',
2998+
},
2999+
},
3000+
});
3001+
expect(requestBody.additionalModelRequestFields?.thinking).toBeUndefined();
3002+
});
3003+
3004+
it('should warn when Anthropic model receives maxReasoningEffort (generate)', async () => {
3005+
prepareJsonResponse({});
3006+
3007+
const result = await model.doGenerate({
3008+
prompt: TEST_PROMPT,
3009+
providerOptions: {
3010+
bedrock: {
3011+
reasoningConfig: {
3012+
type: 'enabled',
3013+
maxReasoningEffort: 'medium',
3014+
},
3015+
},
3016+
},
3017+
});
3018+
3019+
const requestBody = await server.calls[0].requestBodyJson;
3020+
expect(
3021+
requestBody.additionalModelRequestFields?.reasoningConfig,
3022+
).toBeUndefined();
3023+
3024+
expect(result.warnings).toContainEqual({
3025+
type: 'unsupported-setting',
3026+
setting: 'providerOptions',
3027+
details:
3028+
'maxReasoningEffort applies only to Amazon Nova models on Bedrock and will be ignored for this model.',
3029+
});
3030+
});
3031+
29203032
it('should extract reasoning text with signature', async () => {
29213033
const reasoningText = 'I need to think about this problem carefully...';
29223034
const signature = 'abc123signature';

packages/amazon-bedrock/src/bedrock-chat-language-model.ts

Lines changed: 36 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -172,8 +172,11 @@ export class BedrockChatLanguageModel implements LanguageModelV2 {
172172
};
173173
}
174174

175-
const isThinking = bedrockOptions.reasoningConfig?.type === 'enabled';
175+
const isAnthropicModel = this.modelId.includes('anthropic');
176+
const isThinkingRequested =
177+
bedrockOptions.reasoningConfig?.type === 'enabled';
176178
const thinkingBudget = bedrockOptions.reasoningConfig?.budgetTokens;
179+
const isAnthropicThinkingEnabled = isAnthropicModel && isThinkingRequested;
177180

178181
const inferenceConfig = {
179182
...(maxOutputTokens != null && { maxTokens: maxOutputTokens }),
@@ -183,8 +186,7 @@ export class BedrockChatLanguageModel implements LanguageModelV2 {
183186
...(stopSequences != null && { stopSequences }),
184187
};
185188

186-
// Adjust maxTokens if thinking is enabled
187-
if (isThinking && thinkingBudget != null) {
189+
if (isAnthropicThinkingEnabled && thinkingBudget != null) {
188190
if (inferenceConfig.maxTokens != null) {
189191
inferenceConfig.maxTokens += thinkingBudget;
190192
} else {
@@ -199,10 +201,37 @@ export class BedrockChatLanguageModel implements LanguageModelV2 {
199201
budget_tokens: thinkingBudget,
200202
},
201203
};
204+
} else if (!isAnthropicModel && thinkingBudget != null) {
205+
warnings.push({
206+
type: 'unsupported-setting',
207+
setting: 'providerOptions',
208+
details:
209+
'budgetTokens applies only to Anthropic models on Bedrock and will be ignored for this model.',
210+
});
211+
}
212+
213+
const maxReasoningEffort =
214+
bedrockOptions.reasoningConfig?.maxReasoningEffort;
215+
if (maxReasoningEffort != null && !isAnthropicModel) {
216+
bedrockOptions.additionalModelRequestFields = {
217+
...bedrockOptions.additionalModelRequestFields,
218+
reasoningConfig: {
219+
...(bedrockOptions.reasoningConfig?.type != null && {
220+
type: bedrockOptions.reasoningConfig.type,
221+
}),
222+
maxReasoningEffort,
223+
},
224+
};
225+
} else if (maxReasoningEffort != null && isAnthropicModel) {
226+
warnings.push({
227+
type: 'unsupported-setting',
228+
setting: 'providerOptions',
229+
details:
230+
'maxReasoningEffort applies only to Amazon Nova models on Bedrock and will be ignored for this model.',
231+
});
202232
}
203233

204-
// Remove temperature if thinking is enabled
205-
if (isThinking && inferenceConfig.temperature != null) {
234+
if (isAnthropicThinkingEnabled && inferenceConfig.temperature != null) {
206235
delete inferenceConfig.temperature;
207236
warnings.push({
208237
type: 'unsupported-setting',
@@ -211,8 +240,7 @@ export class BedrockChatLanguageModel implements LanguageModelV2 {
211240
});
212241
}
213242

214-
// Remove topP if thinking is enabled
215-
if (isThinking && inferenceConfig.topP != null) {
243+
if (isAnthropicThinkingEnabled && inferenceConfig.topP != null) {
216244
delete inferenceConfig.topP;
217245
warnings.push({
218246
type: 'unsupported-setting',
@@ -221,7 +249,7 @@ export class BedrockChatLanguageModel implements LanguageModelV2 {
221249
});
222250
}
223251

224-
if (isThinking && inferenceConfig.topK != null) {
252+
if (isAnthropicThinkingEnabled && inferenceConfig.topK != null) {
225253
delete inferenceConfig.topK;
226254
warnings.push({
227255
type: 'unsupported-setting',

packages/amazon-bedrock/src/bedrock-chat-options.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@ export const bedrockProviderOptions = z.object({
102102
.object({
103103
type: z.union([z.literal('enabled'), z.literal('disabled')]).optional(),
104104
budgetTokens: z.number().optional(),
105+
maxReasoningEffort: z.enum(['low', 'medium', 'high']).optional(),
105106
})
106107
.optional(),
107108
/**

0 commit comments

Comments
 (0)