Skip to content
Draft
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
64 changes: 62 additions & 2 deletions packages/agents-extensions/src/aiSdk.ts
Original file line number Diff line number Diff line change
Expand Up @@ -42,13 +42,22 @@ import { encodeUint8ArrayToBase64 } from '@openai/agents/utils';
export function itemsToLanguageV2Messages(
model: LanguageModelV2,
items: protocol.ModelItem[],
modelSettingsProviderData?: Record<string, any>,
): LanguageModelV2Message[] {
const messages: LanguageModelV2Message[] = [];
let currentAssistantMessage: LanguageModelV2Message | undefined;
const isDeepSeekThinkingMode = isDeepSeekModel(
model,
modelSettingsProviderData,
);

for (const item of items) {
if (item.type === 'message' || typeof item.type === 'undefined') {
const { role, content, providerData } = item;
if (currentAssistantMessage && role !== 'assistant') {
messages.push(currentAssistantMessage);
currentAssistantMessage = undefined;
}
if (role === 'system') {
messages.push({
role: 'system',
Expand Down Expand Up @@ -226,6 +235,29 @@ export function itemsToLanguageV2Messages(
item.content.length > 0 &&
typeof item.content[0].text === 'string'
) {
if (isDeepSeekThinkingMode) {
if (!currentAssistantMessage) {
currentAssistantMessage = {
role: 'assistant',
content: [],
providerOptions: {
...(item.providerData ?? {}),
Comment on lines +240 to +244

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Badge Preserve tool providerOptions when merging DeepSeek reasoning

In DeepSeek thinking mode we now create the assistant message in the reasoning branch, setting providerOptions from the reasoning item. When the subsequent function_call arrives, the message already exists so its providerOptions are never updated with the tool call’s providerData. This means any tool-level provider options (e.g., cache control or provider-specific flags) are dropped at the message scope whenever a reasoning item precedes the tool call in thinking mode, diverging from previous behavior where tool-call providerOptions were applied to the message. Consider updating the existing currentAssistantMessage with the tool call’s providerData so DeepSeek thinking-mode tool calls retain their message-level options.

Useful? React with 👍 / 👎.

},
};
}
if (
Array.isArray(currentAssistantMessage.content) &&
currentAssistantMessage.role === 'assistant'
) {
currentAssistantMessage.content.push({
type: 'reasoning',
text: item.content[0].text,
providerOptions: { ...(item.providerData ?? {}) },
} as any);
}
continue;
}

messages.push({
role: 'assistant',
content: [
Expand Down Expand Up @@ -262,6 +294,26 @@ export function itemsToLanguageV2Messages(
return messages;
}

function isDeepSeekModel(
model: LanguageModelV2,
modelSettingsProviderData?: Record<string, any>,
): boolean {
const provider = typeof model.provider === 'string' ? model.provider : '';
if (!provider.toLowerCase().startsWith('deepseek')) {
return false;
}

const modelId = typeof model.modelId === 'string' ? model.modelId : '';
if (modelId === 'deepseek-reasoner') {
return true;
}

return (
modelSettingsProviderData?.providerOptions?.deepseek?.thinking?.type ===
'enabled'
);
}

/**
* @internal
* Converts a handoff to a language model V2 tool.
Expand Down Expand Up @@ -612,7 +664,11 @@ export class AiSdkModel implements Model {
content: [{ type: 'text', text: request.input }],
},
]
: itemsToLanguageV2Messages(this.#model, request.input);
: itemsToLanguageV2Messages(
this.#model,
request.input,
request.modelSettings.providerData,
);

if (request.systemInstructions) {
input = [
Expand Down Expand Up @@ -867,7 +923,11 @@ export class AiSdkModel implements Model {
content: [{ type: 'text', text: request.input }],
},
]
: itemsToLanguageV2Messages(this.#model, request.input);
: itemsToLanguageV2Messages(
this.#model,
request.input,
request.modelSettings.providerData,
);

if (request.systemInstructions) {
input = [
Expand Down
170 changes: 170 additions & 0 deletions packages/agents-extensions/test/aiSdk.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,176 @@ describe('itemsToLanguageV2Messages', () => {
]);
});

test('merges reasoning into assistant tool-call message for DeepSeek thinking mode (deepseek-reasoner)', () => {
const deepseekModel = {
...stubModel({}),
provider: 'deepseek.chat',
modelId: 'deepseek-reasoner',
} as any;
const items: protocol.ModelItem[] = [
{ type: 'reasoning', content: [{ text: 'thinking' }] } as any,
{
type: 'function_call',
callId: '1',
name: 'foo',
arguments: '{}',
} as any,
{
type: 'function_call_result',
callId: '1',
name: 'foo',
output: { type: 'text', text: 'out' },
} as any,
];

const msgs = itemsToLanguageV2Messages(deepseekModel, items);
expect(msgs).toEqual([
{
role: 'assistant',
content: [
{ type: 'reasoning', text: 'thinking', providerOptions: {} },
{
type: 'tool-call',
toolCallId: '1',
toolName: 'foo',
input: {},
providerOptions: {},
},
],
providerOptions: {},
},
{
role: 'tool',
content: [
{
type: 'tool-result',
toolCallId: '1',
toolName: 'foo',
output: { type: 'text', value: 'out' },
providerOptions: {},
},
],
providerOptions: {},
},
]);
});

test('does not merge reasoning into tool-call message for DeepSeek without thinking mode', () => {
const deepseekModel = {
...stubModel({}),
provider: 'deepseek.chat',
modelId: 'deepseek-chat',
} as any;
const items: protocol.ModelItem[] = [
{ type: 'reasoning', content: [{ text: 'thinking' }] } as any,
{
type: 'function_call',
callId: '1',
name: 'foo',
arguments: '{}',
} as any,
{
type: 'function_call_result',
callId: '1',
name: 'foo',
output: { type: 'text', text: 'out' },
} as any,
];

const msgs = itemsToLanguageV2Messages(deepseekModel, items);
expect(msgs).toEqual([
{
role: 'assistant',
content: [{ type: 'reasoning', text: 'thinking', providerOptions: {} }],
providerOptions: {},
},
{
role: 'assistant',
content: [
{
type: 'tool-call',
toolCallId: '1',
toolName: 'foo',
input: {},
providerOptions: {},
},
],
providerOptions: {},
},
{
role: 'tool',
content: [
{
type: 'tool-result',
toolCallId: '1',
toolName: 'foo',
output: { type: 'text', value: 'out' },
providerOptions: {},
},
],
providerOptions: {},
},
]);
});

test('merges reasoning into tool-call message for DeepSeek thinking mode (providerOptions)', () => {
const deepseekModel = {
...stubModel({}),
provider: 'deepseek.chat',
modelId: 'deepseek-chat',
} as any;
const items: protocol.ModelItem[] = [
{ type: 'reasoning', content: [{ text: 'thinking' }] } as any,
{
type: 'function_call',
callId: '1',
name: 'foo',
arguments: '{}',
} as any,
{
type: 'function_call_result',
callId: '1',
name: 'foo',
output: { type: 'text', text: 'out' },
} as any,
];

const providerData = {
providerOptions: { deepseek: { thinking: { type: 'enabled' } } },
};

const msgs = itemsToLanguageV2Messages(deepseekModel, items, providerData);
expect(msgs).toEqual([
{
role: 'assistant',
content: [
{ type: 'reasoning', text: 'thinking', providerOptions: {} },
{
type: 'tool-call',
toolCallId: '1',
toolName: 'foo',
input: {},
providerOptions: {},
},
],
providerOptions: {},
},
{
role: 'tool',
content: [
{
type: 'tool-result',
toolCallId: '1',
toolName: 'foo',
output: { type: 'text', value: 'out' },
providerOptions: {},
},
],
providerOptions: {},
},
]);
});

test('throws on built-in tool calls', () => {
const items: protocol.ModelItem[] = [
{ type: 'hosted_tool_call', name: 'search' } as any,
Expand Down