Skip to content

Commit 520c8a9

Browse files
feat(api): Add responses.input_tokens.count
1 parent d4aaef9 commit 520c8a9

File tree

8 files changed

+239
-4
lines changed

8 files changed

+239
-4
lines changed

.stats.yml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
configured_endpoints: 134
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-b062c33330de7e3bbf992fd4f0799afd868c30a66c39418dd2c62f4add3b45b6.yml
3-
openapi_spec_hash: fe067f5b1c0e93799b5ea7fde3c4b1b3
4-
config_hash: 4b6f471b24d659514b86b736c90a0c0a
1+
configured_endpoints: 135
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-812a10f8fb54c584efc914422b574cb3f43dc238b5733b13f6a0b2308b7d9910.yml
3+
openapi_spec_hash: 0222041ba12a5ff6b94924a834fa91a2
4+
config_hash: 50ee3382a63c021a9f821a935950e926

MIGRATION.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -135,6 +135,7 @@ client.example.list(undefined, { headers: { ... } });
135135
- `client.batches.list()`
136136
- `client.responses.retrieve()`
137137
- `client.responses.inputItems.list()`
138+
- `client.responses.inputTokens.count()`
138139
- `client.realtime.calls.reject()`
139140
- `client.conversations.create()`
140141
- `client.conversations.items.list()`

api.md

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -824,6 +824,16 @@ Methods:
824824

825825
- <code title="get /responses/{response_id}/input_items">client.responses.inputItems.<a href="./src/resources/responses/input-items.ts">list</a>(responseID, { ...params }) -> ResponseItemsPage</code>
826826

827+
## InputTokens
828+
829+
Types:
830+
831+
- <code><a href="./src/resources/responses/input-tokens.ts">InputTokenCountResponse</a></code>
832+
833+
Methods:
834+
835+
- <code title="post /responses/input_tokens">client.responses.inputTokens.<a href="./src/resources/responses/input-tokens.ts">count</a>({ ...params }) -> InputTokenCountResponse</code>
836+
827837
# Realtime
828838

829839
Types:

scripts/detect-breaking-changes

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ TEST_PATHS=(
4747
tests/api-resources/uploads/parts.test.ts
4848
tests/api-resources/responses/responses.test.ts
4949
tests/api-resources/responses/input-items.test.ts
50+
tests/api-resources/responses/input-tokens.test.ts
5051
tests/api-resources/realtime/realtime.test.ts
5152
tests/api-resources/realtime/client-secrets.test.ts
5253
tests/api-resources/realtime/calls.test.ts

src/resources/responses/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
22

33
export { InputItems, type ResponseItemList, type InputItemListParams } from './input-items';
4+
export { InputTokens, type InputTokenCountResponse, type InputTokenCountParams } from './input-tokens';
45
export { Responses } from './responses';
Lines changed: 161 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,161 @@
1+
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
import { APIResource } from '../../core/resource';
4+
import * as Shared from '../shared';
5+
import * as ResponsesAPI from './responses';
6+
import { APIPromise } from '../../core/api-promise';
7+
import { RequestOptions } from '../../internal/request-options';
8+
9+
export class InputTokens extends APIResource {
10+
/**
11+
* Get input token counts
12+
*
13+
* @example
14+
* ```ts
15+
* const response = await client.responses.inputTokens.count();
16+
* ```
17+
*/
18+
count(
19+
body: InputTokenCountParams | null | undefined = {},
20+
options?: RequestOptions,
21+
): APIPromise<InputTokenCountResponse> {
22+
return this._client.post('/responses/input_tokens', { body, ...options });
23+
}
24+
}
25+
26+
export interface InputTokenCountResponse {
27+
input_tokens: number;
28+
29+
object: 'response.input_tokens';
30+
}
31+
32+
export interface InputTokenCountParams {
33+
/**
34+
* The conversation that this response belongs to. Items from this conversation are
35+
* prepended to `input_items` for this response request. Input items and output
36+
* items from this response are automatically added to this conversation after this
37+
* response completes.
38+
*/
39+
conversation?: string | ResponsesAPI.ResponseConversationParam | null;
40+
41+
/**
42+
* Text, image, or file inputs to the model, used to generate a response
43+
*/
44+
input?: string | Array<ResponsesAPI.ResponseInputItem> | null;
45+
46+
/**
47+
* A system (or developer) message inserted into the model's context. When used
48+
* along with `previous_response_id`, the instructions from a previous response
49+
* will not be carried over to the next response. This makes it simple to swap out
50+
* system (or developer) messages in new responses.
51+
*/
52+
instructions?: string | null;
53+
54+
/**
55+
* Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
56+
* wide range of models with different capabilities, performance characteristics,
57+
* and price points. Refer to the
58+
* [model guide](https://platform.openai.com/docs/models) to browse and compare
59+
* available models.
60+
*/
61+
model?: string | null;
62+
63+
/**
64+
* Whether to allow the model to run tool calls in parallel.
65+
*/
66+
parallel_tool_calls?: boolean | null;
67+
68+
/**
69+
* The unique ID of the previous response to the model. Use this to create
70+
* multi-turn conversations. Learn more about
71+
* [conversation state](https://platform.openai.com/docs/guides/conversation-state).
72+
* Cannot be used in conjunction with `conversation`.
73+
*/
74+
previous_response_id?: string | null;
75+
76+
/**
77+
* **gpt-5 and o-series models only** Configuration options for
78+
* [reasoning models](https://platform.openai.com/docs/guides/reasoning).
79+
*/
80+
reasoning?: Shared.Reasoning | null;
81+
82+
/**
83+
* Configuration options for a text response from the model. Can be plain text or
84+
* structured JSON data. Learn more:
85+
*
86+
* - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
87+
* - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
88+
*/
89+
text?: InputTokenCountParams.Text | null;
90+
91+
/**
92+
* How the model should select which tool (or tools) to use when generating a
93+
* response. See the `tools` parameter to see how to specify which tools the model
94+
* can call.
95+
*/
96+
tool_choice?:
97+
| ResponsesAPI.ToolChoiceOptions
98+
| ResponsesAPI.ToolChoiceAllowed
99+
| ResponsesAPI.ToolChoiceTypes
100+
| ResponsesAPI.ToolChoiceFunction
101+
| ResponsesAPI.ToolChoiceMcp
102+
| ResponsesAPI.ToolChoiceCustom
103+
| null;
104+
105+
/**
106+
* An array of tools the model may call while generating a response. You can
107+
* specify which tool to use by setting the `tool_choice` parameter.
108+
*/
109+
tools?: Array<ResponsesAPI.Tool> | null;
110+
111+
/**
112+
* The truncation strategy to use for the model response. - `auto`: If the input to
113+
* this Response exceeds the model's context window size, the model will truncate
114+
* the response to fit the context window by dropping items from the beginning of
115+
* the conversation. - `disabled` (default): If the input size will exceed the
116+
* context window size for a model, the request will fail with a 400 error.
117+
*/
118+
truncation?: 'auto' | 'disabled';
119+
}
120+
121+
export namespace InputTokenCountParams {
122+
/**
123+
* Configuration options for a text response from the model. Can be plain text or
124+
* structured JSON data. Learn more:
125+
*
126+
* - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
127+
* - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
128+
*/
129+
export interface Text {
130+
/**
131+
* An object specifying the format that the model must output.
132+
*
133+
* Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
134+
* ensures the model will match your supplied JSON schema. Learn more in the
135+
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
136+
*
137+
* The default format is `{ "type": "text" }` with no additional options.
138+
*
139+
* **Not recommended for gpt-4o and newer models:**
140+
*
141+
* Setting to `{ "type": "json_object" }` enables the older JSON mode, which
142+
* ensures the message the model generates is valid JSON. Using `json_schema` is
143+
* preferred for models that support it.
144+
*/
145+
format?: ResponsesAPI.ResponseFormatTextConfig;
146+
147+
/**
148+
* Constrains the verbosity of the model's response. Lower values will result in
149+
* more concise responses, while higher values will result in more verbose
150+
* responses. Currently supported values are `low`, `medium`, and `high`.
151+
*/
152+
verbosity?: 'low' | 'medium' | 'high' | null;
153+
}
154+
}
155+
156+
export declare namespace InputTokens {
157+
export {
158+
type InputTokenCountResponse as InputTokenCountResponse,
159+
type InputTokenCountParams as InputTokenCountParams,
160+
};
161+
}

src/resources/responses/responses.ts

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@ import * as ResponsesAPI from './responses';
1212
import * as Shared from '../shared';
1313
import * as InputItemsAPI from './input-items';
1414
import { InputItemListParams, InputItems, ResponseItemList } from './input-items';
15+
import * as InputTokensAPI from './input-tokens';
16+
import { InputTokenCountParams, InputTokenCountResponse, InputTokens } from './input-tokens';
1517
import { APIPromise } from '../../core/api-promise';
1618
import { CursorPage } from '../../core/pagination';
1719
import { Stream } from '../../core/streaming';
@@ -58,6 +60,7 @@ export type ResponseParseParams = ResponseCreateParamsNonStreaming;
5860

5961
export class Responses extends APIResource {
6062
inputItems: InputItemsAPI.InputItems = new InputItemsAPI.InputItems(this._client);
63+
inputTokens: InputTokensAPI.InputTokens = new InputTokensAPI.InputTokens(this._client);
6164

6265
/**
6366
* Creates a model response. Provide
@@ -5763,6 +5766,7 @@ export interface ResponseRetrieveParamsStreaming extends ResponseRetrieveParamsB
57635766
}
57645767

57655768
Responses.InputItems = InputItems;
5769+
Responses.InputTokens = InputTokens;
57665770

57675771
export declare namespace Responses {
57685772
export {
@@ -5890,4 +5894,10 @@ export declare namespace Responses {
58905894
type ResponseItemList as ResponseItemList,
58915895
type InputItemListParams as InputItemListParams,
58925896
};
5897+
5898+
export {
5899+
InputTokens as InputTokens,
5900+
type InputTokenCountResponse as InputTokenCountResponse,
5901+
type InputTokenCountParams as InputTokenCountParams,
5902+
};
58935903
}
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2+
3+
import OpenAI from 'openai';
4+
5+
const client = new OpenAI({
6+
apiKey: 'My API Key',
7+
baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
8+
});
9+
10+
describe('resource inputTokens', () => {
11+
test('count', async () => {
12+
const responsePromise = client.responses.inputTokens.count();
13+
const rawResponse = await responsePromise.asResponse();
14+
expect(rawResponse).toBeInstanceOf(Response);
15+
const response = await responsePromise;
16+
expect(response).not.toBeInstanceOf(Response);
17+
const dataAndResponse = await responsePromise.withResponse();
18+
expect(dataAndResponse.data).toBe(response);
19+
expect(dataAndResponse.response).toBe(rawResponse);
20+
});
21+
22+
test('count: request options and params are passed correctly', async () => {
23+
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
24+
await expect(
25+
client.responses.inputTokens.count(
26+
{
27+
conversation: 'string',
28+
input: 'string',
29+
instructions: 'instructions',
30+
model: 'model',
31+
parallel_tool_calls: true,
32+
previous_response_id: 'resp_123',
33+
reasoning: { effort: 'minimal', generate_summary: 'auto', summary: 'auto' },
34+
text: { format: { type: 'text' }, verbosity: 'low' },
35+
tool_choice: 'none',
36+
tools: [
37+
{
38+
name: 'name',
39+
parameters: { foo: 'bar' },
40+
strict: true,
41+
type: 'function',
42+
description: 'description',
43+
},
44+
],
45+
truncation: 'auto',
46+
},
47+
{ path: '/_stainless_unknown_path' },
48+
),
49+
).rejects.toThrow(OpenAI.NotFoundError);
50+
});
51+
});

0 commit comments

Comments
 (0)