Skip to content

Commit df258a5

Browse files
authored
Merge pull request #6 from yusuf-eren/feature/issue/5
Feature/issue/5
2 parents a163af4 + ec16ef0 commit df258a5

File tree

9 files changed

+157
-35
lines changed

9 files changed

+157
-35
lines changed
+95
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
import { Agent, Runner } from '../../src/agents';
2+
3+
/**
4+
* This demonstrates usage of the `previous_response_id` parameter to continue a conversation.
5+
* The second run passes the previous response ID to the model, which allows it to continue the
6+
* conversation without re-sending the previous messages.
7+
*
8+
* Notes:
9+
* 1. This only applies to the OpenAI Responses API. Other models will ignore this parameter.
10+
* 2. Responses are only stored for 30 days as of this writing, so in production you should
11+
* store the response ID along with an expiration date; if the response is no longer valid,
12+
* you'll need to re-send the previous conversation history.
13+
*/
14+
15+
async function main() {
16+
const agent = new Agent({
17+
name: 'Assistant',
18+
instructions: 'You are a helpful assistant. be VERY concise.',
19+
});
20+
21+
const result = await Runner.run(
22+
agent,
23+
'What is the largest country in South America?'
24+
);
25+
console.log(result.finalOutput);
26+
// Brazil
27+
console.log('first message response_id:', result.lastResponseId);
28+
29+
const followupResult = await Runner.run(
30+
agent,
31+
'What is the capital of that country?',
32+
{ previousResponseId: result.lastResponseId! }
33+
);
34+
console.log(followupResult.finalOutput);
35+
// Brasilia
36+
console.log('second message response_id:', followupResult.lastResponseId);
37+
}
38+
39+
async function mainStream() {
40+
const agent = new Agent({
41+
name: 'Assistant',
42+
instructions: 'You are a helpful assistant. be VERY concise.',
43+
tools: [],
44+
});
45+
46+
const result = Runner.runStreamed(
47+
agent,
48+
'What is the largest country in South America?'
49+
);
50+
51+
for await (const event of result.streamEvents()) {
52+
if (
53+
event.type === 'raw_response_event' &&
54+
event.data.type === 'response.output_text.delta'
55+
) {
56+
process.stdout.write(event.data.delta);
57+
}
58+
}
59+
console.log('\n---');
60+
61+
console.log('first messageresponse_id:', result.lastResponseId);
62+
63+
const followupResult = Runner.runStreamed(
64+
agent,
65+
'What is the capital of that country?',
66+
{ previousResponseId: result.lastResponseId! }
67+
);
68+
69+
for await (const event of followupResult.streamEvents()) {
70+
if (
71+
event.type === 'raw_response_event' &&
72+
event.data.type === 'response.output_text.delta'
73+
) {
74+
process.stdout.write(event.data.delta);
75+
}
76+
}
77+
78+
console.log('\n---');
79+
console.log('second message response_id:', followupResult.lastResponseId);
80+
}
81+
82+
// Get user input for stream mode
83+
const readline = require('readline').createInterface({
84+
input: process.stdin,
85+
output: process.stdout,
86+
});
87+
88+
readline.question('Run in stream mode? (y/n): ', (answer: string) => {
89+
readline.close();
90+
if (answer.toLowerCase() === 'y') {
91+
mainStream();
92+
} else {
93+
main();
94+
}
95+
});

package.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "openai-agents-js",
3-
"version": "0.1.2",
3+
"version": "0.1.4",
44
"description": "An unofficial Node.js library for building AI agents and multi-agent workflows with OpenAI",
55
"main": "dist/index.js",
66
"types": "dist/index.d.ts",

src/agents/agent/index.ts

+4-4
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ interface AgentProps<TContext = any> {
7474
/** Model-specific tuning parameters */
7575
model_settings?: ModelSettings;
7676
/** A list of tools that the agent can use */
77-
tools: Array<Tool>;
77+
tools?: Array<Tool>;
7878
/** Model Context Protocol servers the agent can use */
7979
mcp_servers?: any; // TODO: Implement `MCPServer` . Then uncomment: Array<MCPServer>
8080
/** Checks that run before generating a response */
@@ -126,7 +126,7 @@ export class Agent<TContext> {
126126
model_settings: ModelSettings = new ModelSettings();
127127

128128
/** A list of tools that the agent can use */
129-
tools: Array<Tool> = [];
129+
tools?: Array<Tool> = [];
130130

131131
/**
132132
* A list of Model Context Protocol servers that the agent can use.
@@ -188,7 +188,7 @@ export class Agent<TContext> {
188188
this.handoffs = handoffs;
189189
this.model = model ?? DEFAULT_MODEL;
190190
this.model_settings = model_settings ?? new ModelSettings();
191-
this.tools = tools;
191+
this.tools = tools || [];
192192
this.mcp_servers = mcp_servers;
193193
this.input_guardrails = input_guardrails ?? [];
194194
this.output_guardrails = output_guardrails ?? [];
@@ -229,7 +229,7 @@ export class Agent<TContext> {
229229
*/
230230
async getAllTools(): Promise<Tool[]> {
231231
const mcpTools = await this.getMCPTools();
232-
return [...mcpTools, ...this.tools];
232+
return [...(mcpTools ?? []), ...(this.tools ?? [])];
233233
}
234234

235235
/**

src/agents/items/index.ts

+3-3
Original file line numberDiff line numberDiff line change
@@ -239,16 +239,16 @@ export class ModelResponse {
239239
* An ID for the response which can be used to refer to the response in subsequent calls to the
240240
* model. Not supported by all model providers.
241241
*/
242-
referenceable_id: string | null;
242+
response_id: string | null;
243243

244244
constructor(
245245
output: TResponseOutputItem[],
246246
usage: Usage,
247-
referenceable_id: string | null = null
247+
response_id: string | null = null
248248
) {
249249
this.output = output;
250250
this.usage = usage;
251-
this.referenceable_id = referenceable_id;
251+
this.response_id = response_id;
252252
}
253253

254254
/**

src/agents/models/interface.ts

+4-2
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,8 @@ export abstract class Model {
7171
tools: Tool[],
7272
outputSchema: AgentOutputSchema | null,
7373
handoffs: Handoff<any>[],
74-
tracing: ModelTracing
74+
tracing: ModelTracing,
75+
previousResponseId?: string
7576
): Promise<ModelResponse>;
7677

7778
/**
@@ -93,7 +94,8 @@ export abstract class Model {
9394
tools: Tool[],
9495
outputSchema: AgentOutputSchema | null,
9596
handoffs: Handoff<any>[],
96-
tracing: ModelTracing
97+
tracing: ModelTracing,
98+
previousResponseId?: string
9799
): AsyncIterableIterator<TResponseStreamEvent>;
98100
}
99101

src/agents/models/openai-chatcompletions.ts

+4-2
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,8 @@ export class OpenAIChatCompletionsModel implements Model {
8787
tools: Tool[],
8888
outputSchema: AgentOutputSchema | null,
8989
handoffs: Handoff<any>[],
90-
tracing: ModelTracing
90+
tracing: ModelTracing,
91+
previousResponseId?: string
9192
): Promise<ModelResponse> {
9293
const convertedInput =
9394
typeof input === 'string'
@@ -160,7 +161,8 @@ export class OpenAIChatCompletionsModel implements Model {
160161
tools: Tool[],
161162
outputSchema: AgentOutputSchema | null,
162163
handoffs: Handoff<any>[],
163-
tracing: ModelTracing
164+
tracing: ModelTracing,
165+
previousResponseId?: string
164166
): AsyncGenerator<TResponseStreamEvent> {
165167
const convertedInput =
166168
typeof input === 'string'

src/agents/models/openai-responses.ts

+11-5
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,8 @@ export class OpenAIResponsesModel implements Model {
214214
tools: Tool[],
215215
outputSchema: AgentOutputSchema | null,
216216
handoffs: Handoff<any>[],
217-
tracing: ModelTracing
217+
tracing: ModelTracing,
218+
previousResponseId?: string
218219
): Promise<ModelResponse> {
219220
try {
220221
const response = await this.fetchResponse(
@@ -224,7 +225,8 @@ export class OpenAIResponsesModel implements Model {
224225
tools,
225226
outputSchema,
226227
handoffs,
227-
false
228+
false,
229+
previousResponseId
228230
);
229231

230232
if (!('usage' in response) || !response.usage) {
@@ -268,7 +270,8 @@ export class OpenAIResponsesModel implements Model {
268270
tools: Tool[],
269271
outputSchema: AgentOutputSchema | null,
270272
handoffs: Handoff<any>[],
271-
tracing: ModelTracing
273+
tracing: ModelTracing,
274+
previousResponseId?: string
272275
): AsyncIterableIterator<TResponseStreamEvent> {
273276
const stream = await this.fetchResponse(
274277
systemInstructions,
@@ -277,7 +280,8 @@ export class OpenAIResponsesModel implements Model {
277280
tools,
278281
outputSchema,
279282
handoffs,
280-
true
283+
true,
284+
previousResponseId
281285
);
282286

283287
if (!(stream instanceof Stream)) {
@@ -296,7 +300,8 @@ export class OpenAIResponsesModel implements Model {
296300
tools: Tool[],
297301
outputSchema: AgentOutputSchema | null,
298302
handoffs: Handoff<any>[],
299-
stream: boolean
303+
stream: boolean,
304+
previousResponseId?: string
300305
): Promise<Response | Stream<ResponseStreamEvent>> {
301306
const listInput = Array.isArray(input)
302307
? input
@@ -337,6 +342,7 @@ export class OpenAIResponsesModel implements Model {
337342
text: responseFormat,
338343
store: modelSettings.store ?? undefined,
339344
reasoning: modelSettings.reasoning ?? null,
345+
previous_response_id: previousResponseId ?? undefined,
340346
};
341347

342348
// console.log('----PARAMA SOKARIm\n\n', params, '\n\n----');

src/agents/result.ts

+4
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,10 @@ export abstract class RunResultBase {
112112

113113
return [...originalItems, ...newItems];
114114
}
115+
116+
get lastResponseId(): string | null {
117+
return this.rawResponses[this.rawResponses.length - 1]?.response_id ?? null;
118+
}
115119
}
116120

117121
/**

0 commit comments

Comments
 (0)