diff --git a/examples/basic/previous-response-id.ts b/examples/basic/previous-response-id.ts new file mode 100644 index 0000000..45e233f --- /dev/null +++ b/examples/basic/previous-response-id.ts @@ -0,0 +1,95 @@ +import { Agent, Runner } from '../../src/agents'; + +/** + * This demonstrates usage of the `previous_response_id` parameter to continue a conversation. + * The second run passes the previous response ID to the model, which allows it to continue the + * conversation without re-sending the previous messages. + * + * Notes: + * 1. This only applies to the OpenAI Responses API. Other models will ignore this parameter. + * 2. Responses are only stored for 30 days as of this writing, so in production you should + * store the response ID along with an expiration date; if the response is no longer valid, + * you'll need to re-send the previous conversation history. + */ + +async function main() { + const agent = new Agent({ + name: 'Assistant', + instructions: 'You are a helpful assistant. be VERY concise.', + }); + + const result = await Runner.run( + agent, + 'What is the largest country in South America?' + ); + console.log(result.finalOutput); + // Brazil + console.log('first message response_id:', result.lastResponseId); + + const followupResult = await Runner.run( + agent, + 'What is the capital of that country?', + { previousResponseId: result.lastResponseId! } + ); + console.log(followupResult.finalOutput); + // Brasilia + console.log('second message response_id:', followupResult.lastResponseId); +} + +async function mainStream() { + const agent = new Agent({ + name: 'Assistant', + instructions: 'You are a helpful assistant. be VERY concise.', + tools: [], + }); + + const result = Runner.runStreamed( + agent, + 'What is the largest country in South America?' + ); + + for await (const event of result.streamEvents()) { + if ( + event.type === 'raw_response_event' && + event.data.type === 'response.output_text.delta' + ) { + process.stdout.write(event.data.delta); + } + } + console.log('\n---'); + + console.log('first messageresponse_id:', result.lastResponseId); + + const followupResult = Runner.runStreamed( + agent, + 'What is the capital of that country?', + { previousResponseId: result.lastResponseId! } + ); + + for await (const event of followupResult.streamEvents()) { + if ( + event.type === 'raw_response_event' && + event.data.type === 'response.output_text.delta' + ) { + process.stdout.write(event.data.delta); + } + } + + console.log('\n---'); + console.log('second message response_id:', followupResult.lastResponseId); +} + +// Get user input for stream mode +const readline = require('readline').createInterface({ + input: process.stdin, + output: process.stdout, +}); + +readline.question('Run in stream mode? (y/n): ', (answer: string) => { + readline.close(); + if (answer.toLowerCase() === 'y') { + mainStream(); + } else { + main(); + } +}); diff --git a/package.json b/package.json index d9b4cc3..4cdd95d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai-agents-js", - "version": "0.1.2", + "version": "0.1.4", "description": "An unofficial Node.js library for building AI agents and multi-agent workflows with OpenAI", "main": "dist/index.js", "types": "dist/index.d.ts", diff --git a/src/agents/agent/index.ts b/src/agents/agent/index.ts index 92ba705..c215216 100644 --- a/src/agents/agent/index.ts +++ b/src/agents/agent/index.ts @@ -74,7 +74,7 @@ interface AgentProps { /** Model-specific tuning parameters */ model_settings?: ModelSettings; /** A list of tools that the agent can use */ - tools: Array; + tools?: Array; /** Model Context Protocol servers the agent can use */ mcp_servers?: any; // TODO: Implement `MCPServer` . Then uncomment: Array /** Checks that run before generating a response */ @@ -126,7 +126,7 @@ export class Agent { model_settings: ModelSettings = new ModelSettings(); /** A list of tools that the agent can use */ - tools: Array = []; + tools?: Array = []; /** * A list of Model Context Protocol servers that the agent can use. @@ -188,7 +188,7 @@ export class Agent { this.handoffs = handoffs; this.model = model ?? DEFAULT_MODEL; this.model_settings = model_settings ?? new ModelSettings(); - this.tools = tools; + this.tools = tools || []; this.mcp_servers = mcp_servers; this.input_guardrails = input_guardrails ?? []; this.output_guardrails = output_guardrails ?? []; @@ -229,7 +229,7 @@ export class Agent { */ async getAllTools(): Promise { const mcpTools = await this.getMCPTools(); - return [...mcpTools, ...this.tools]; + return [...(mcpTools ?? []), ...(this.tools ?? [])]; } /** diff --git a/src/agents/items/index.ts b/src/agents/items/index.ts index 38c4117..dbb9da6 100644 --- a/src/agents/items/index.ts +++ b/src/agents/items/index.ts @@ -239,16 +239,16 @@ export class ModelResponse { * An ID for the response which can be used to refer to the response in subsequent calls to the * model. Not supported by all model providers. */ - referenceable_id: string | null; + response_id: string | null; constructor( output: TResponseOutputItem[], usage: Usage, - referenceable_id: string | null = null + response_id: string | null = null ) { this.output = output; this.usage = usage; - this.referenceable_id = referenceable_id; + this.response_id = response_id; } /** diff --git a/src/agents/models/interface.ts b/src/agents/models/interface.ts index e70aa96..aeea3ce 100644 --- a/src/agents/models/interface.ts +++ b/src/agents/models/interface.ts @@ -71,7 +71,8 @@ export abstract class Model { tools: Tool[], outputSchema: AgentOutputSchema | null, handoffs: Handoff[], - tracing: ModelTracing + tracing: ModelTracing, + previousResponseId?: string ): Promise; /** @@ -93,7 +94,8 @@ export abstract class Model { tools: Tool[], outputSchema: AgentOutputSchema | null, handoffs: Handoff[], - tracing: ModelTracing + tracing: ModelTracing, + previousResponseId?: string ): AsyncIterableIterator; } diff --git a/src/agents/models/openai-chatcompletions.ts b/src/agents/models/openai-chatcompletions.ts index acf55c1..94120ee 100644 --- a/src/agents/models/openai-chatcompletions.ts +++ b/src/agents/models/openai-chatcompletions.ts @@ -87,7 +87,8 @@ export class OpenAIChatCompletionsModel implements Model { tools: Tool[], outputSchema: AgentOutputSchema | null, handoffs: Handoff[], - tracing: ModelTracing + tracing: ModelTracing, + previousResponseId?: string ): Promise { const convertedInput = typeof input === 'string' @@ -160,7 +161,8 @@ export class OpenAIChatCompletionsModel implements Model { tools: Tool[], outputSchema: AgentOutputSchema | null, handoffs: Handoff[], - tracing: ModelTracing + tracing: ModelTracing, + previousResponseId?: string ): AsyncGenerator { const convertedInput = typeof input === 'string' diff --git a/src/agents/models/openai-responses.ts b/src/agents/models/openai-responses.ts index 11aee62..9310257 100644 --- a/src/agents/models/openai-responses.ts +++ b/src/agents/models/openai-responses.ts @@ -214,7 +214,8 @@ export class OpenAIResponsesModel implements Model { tools: Tool[], outputSchema: AgentOutputSchema | null, handoffs: Handoff[], - tracing: ModelTracing + tracing: ModelTracing, + previousResponseId?: string ): Promise { try { const response = await this.fetchResponse( @@ -224,7 +225,8 @@ export class OpenAIResponsesModel implements Model { tools, outputSchema, handoffs, - false + false, + previousResponseId ); if (!('usage' in response) || !response.usage) { @@ -268,7 +270,8 @@ export class OpenAIResponsesModel implements Model { tools: Tool[], outputSchema: AgentOutputSchema | null, handoffs: Handoff[], - tracing: ModelTracing + tracing: ModelTracing, + previousResponseId?: string ): AsyncIterableIterator { const stream = await this.fetchResponse( systemInstructions, @@ -277,7 +280,8 @@ export class OpenAIResponsesModel implements Model { tools, outputSchema, handoffs, - true + true, + previousResponseId ); if (!(stream instanceof Stream)) { @@ -296,7 +300,8 @@ export class OpenAIResponsesModel implements Model { tools: Tool[], outputSchema: AgentOutputSchema | null, handoffs: Handoff[], - stream: boolean + stream: boolean, + previousResponseId?: string ): Promise> { const listInput = Array.isArray(input) ? input @@ -337,6 +342,7 @@ export class OpenAIResponsesModel implements Model { text: responseFormat, store: modelSettings.store ?? undefined, reasoning: modelSettings.reasoning ?? null, + previous_response_id: previousResponseId ?? undefined, }; // console.log('----PARAMA SOKARIm\n\n', params, '\n\n----'); diff --git a/src/agents/result.ts b/src/agents/result.ts index 1ed7374..6c832fa 100644 --- a/src/agents/result.ts +++ b/src/agents/result.ts @@ -112,6 +112,10 @@ export abstract class RunResultBase { return [...originalItems, ...newItems]; } + + get lastResponseId(): string | null { + return this.rawResponses[this.rawResponses.length - 1]?.response_id ?? null; + } } /** diff --git a/src/agents/run.ts b/src/agents/run.ts index 08baddc..70a86b8 100644 --- a/src/agents/run.ts +++ b/src/agents/run.ts @@ -116,20 +116,20 @@ export class RunConfig { /** * Whether tracing is disabled for the agent run. If disabled, we will not trace the agent run. */ - tracingDisabled: boolean = false; + tracingDisabled?: boolean = false; /** * Whether we include potentially sensitive data (for example: inputs/outputs of tool calls or * LLM generations) in traces. If False, we'll still create spans for these events, but the * sensitive data will not be included. */ - traceIncludeSensitiveData: boolean = true; + traceIncludeSensitiveData?: boolean = true; /** * The name of the run, used for tracing. Should be a logical name for the run, like * "Code generation workflow" or "Customer support agent". */ - workflowName: string = 'Agent workflow'; + workflowName?: string = 'Agent workflow'; /** * A custom trace ID to use for tracing. If not provided, we will generate a new trace ID. @@ -175,6 +175,7 @@ export class Runner { maxTurns?: number; hooks?: RunHooks; runConfig?: RunConfig; + previousResponseId?: string; } = {} ): Promise { const { @@ -191,7 +192,7 @@ export class Runner { const newTrace = currentTrace ? null : trace( - runConfig.workflowName, + runConfig.workflowName ?? 'Agent workflow', runConfig.traceId, runConfig.groupId, runConfig.traceMetadata, @@ -286,6 +287,7 @@ export class Runner { runConfig, shouldRunAgentStartHooks, toolUseTracker, + previousResponseId: options.previousResponseId, }); } else { // Fetch tools for subsequent turns if agent changed (span is new) @@ -306,6 +308,7 @@ export class Runner { runConfig, shouldRunAgentStartHooks, toolUseTracker, + previousResponseId: options.previousResponseId, }); } shouldRunAgentStartHooks = false; @@ -444,6 +447,7 @@ export class Runner { maxTurns?: number; hooks?: RunHooks; runConfig?: RunConfig; + previousResponseId?: string; } = {} ): RunResultStreaming { const { @@ -458,7 +462,7 @@ export class Runner { const newTrace = currentTrace ? null : trace( - runConfig.workflowName, + runConfig.workflowName ?? 'Agent workflow', runConfig.traceId, runConfig.groupId, runConfig.traceMetadata, @@ -495,7 +499,8 @@ export class Runner { maxTurns, hooks, contextWrapper, - runConfig + runConfig, + options.previousResponseId ); // Set the task on the streamed result @@ -717,6 +722,7 @@ export class Runner { runConfig, shouldRunAgentStartHooks, toolUseTracker, + previousResponseId, }: { agent: Agent; allTools: Tool[]; @@ -727,6 +733,7 @@ export class Runner { runConfig: RunConfig; shouldRunAgentStartHooks: boolean; toolUseTracker: AgentToolUseTracker; + previousResponseId?: string; }): Promise { // Ensure we run the hooks before anything else if (shouldRunAgentStartHooks) { @@ -753,7 +760,8 @@ export class Runner { handoffs, contextWrapper, runConfig, - toolUseTracker + toolUseTracker, + previousResponseId ); contextWrapper.usage.add(newResponse.usage); @@ -786,7 +794,8 @@ export class Runner { handoffs: Handoff[], contextWrapper: RunContextWrapper, runConfig: RunConfig, - toolUseTracker: AgentToolUseTracker + toolUseTracker: AgentToolUseTracker, + previousResponseId?: string ): Promise { const model = Runner._getModel(agent, runConfig); const modelSettings = agent.model_settings.resolve(runConfig.modelSettings); @@ -814,9 +823,10 @@ export class Runner { outputSchema, handoffs, getModelTracingImpl( - runConfig.tracingDisabled, - runConfig.traceIncludeSensitiveData - ) + runConfig.tracingDisabled ?? false, + runConfig.traceIncludeSensitiveData ?? true + ), + previousResponseId ); contextWrapper.usage.add(newResponse.usage); @@ -890,7 +900,8 @@ export class Runner { maxTurns: number, hooks: RunHooks, contextWrapper: RunContextWrapper, - runConfig: RunConfig + runConfig: RunConfig, + previousResponseId?: string ): Promise { const toolUseTracker = new AgentToolUseTracker(); let currentAgent = startingAgent; @@ -916,7 +927,6 @@ export class Runner { try { await streamedResult._input_guardrails_task; // Wait for guardrails before starting turns } catch (e) { - console.log(' OROSPO CUCU PATLMAA', e); if (e instanceof InputGuardrailTripwireTriggered) { // Error already logged and attached to span in _runInputGuardrails await streamedResult.setError(e); // Propagate error to stream consumer @@ -996,7 +1006,8 @@ export class Runner { toolUseTracker, await Runner._getAllTools(currentAgent), // Pass current tools currentOriginalInput, // Pass input potentially modified by handoffs - currentGeneratedItems // Pass items accumulated so far + currentGeneratedItems, // Pass items accumulated so far + previousResponseId ); shouldRunAgentStartHooks = false; @@ -1261,7 +1272,8 @@ export class Runner { toolUseTracker: AgentToolUseTracker, allTools: Tool[], originalInput: string | TResponseInputItem[], // Receive current input state - preStepItems: RunItem[] // Receive items generated before this turn + preStepItems: RunItem[], // Receive items generated before this turn + previousResponseId?: string ): Promise { // Returns the result after streaming completes if (shouldRunAgentStartHooks) { @@ -1321,9 +1333,10 @@ export class Runner { outputSchema, handoffs, getModelTracingImpl( - runConfig.tracingDisabled, - runConfig.traceIncludeSensitiveData - ) + runConfig.tracingDisabled ?? false, + runConfig.traceIncludeSensitiveData ?? true + ), + previousResponseId ); let partialText = '';