216 lines
7.3 KiB
JavaScript
216 lines
7.3 KiB
JavaScript
"use strict";
|
|
/**
|
|
* Copyright (c) Microsoft Corporation.
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
exports.OpenAI = void 0;
|
|
const fetchWithTimeout_1 = require("../fetchWithTimeout");
|
|
const types_1 = require("../types");
|
|
class OpenAI {
|
|
name = 'openai';
|
|
async complete(conversation, options) {
|
|
return complete(conversation, options);
|
|
}
|
|
}
|
|
exports.OpenAI = OpenAI;
|
|
async function complete(conversation, options) {
|
|
const inputItems = conversation.messages.map(toResponseInputItems).flat();
|
|
const tools = conversation.tools.map(toOpenAIFunctionTool);
|
|
const { response, error } = await create({
|
|
model: options.model,
|
|
temperature: options.temperature,
|
|
input: inputItems,
|
|
instructions: systemPrompt(conversation.systemPrompt),
|
|
tools: tools.length > 0 ? tools : undefined,
|
|
tool_choice: conversation.tools.length > 0 ? 'auto' : undefined,
|
|
parallel_tool_calls: false,
|
|
max_output_tokens: options.maxTokens,
|
|
reasoning: toOpenAIReasoning(options.reasoning),
|
|
}, options);
|
|
if (!response || error)
|
|
return { result: (0, types_1.assistantMessageFromError)(error ?? 'No response from OpenAI API'), usage: (0, types_1.emptyUsage)() };
|
|
// Parse response output items
|
|
const stopReason = { code: 'ok' };
|
|
if (response.incomplete_details?.reason === 'max_output_tokens')
|
|
stopReason.code = 'max_tokens';
|
|
const result = { role: 'assistant', content: [], stopReason };
|
|
const usage = {
|
|
input: response.usage?.input_tokens ?? 0,
|
|
output: response.usage?.output_tokens ?? 0,
|
|
};
|
|
if (stopReason.code !== 'ok')
|
|
return { result, usage };
|
|
for (const item of response.output) {
|
|
if (item.type === 'message' && item.role === 'assistant') {
|
|
result.openaiId = item.id;
|
|
result.openaiStatus = item.status;
|
|
for (const contentPart of item.content) {
|
|
if (contentPart.type === 'output_text') {
|
|
result.content.push({
|
|
type: 'text',
|
|
text: contentPart.text,
|
|
});
|
|
}
|
|
}
|
|
}
|
|
else if (item.type === 'function_call') {
|
|
// Add tool call
|
|
result.content.push(toToolCall(item));
|
|
}
|
|
}
|
|
return { result, usage };
|
|
}
|
|
async function create(createParams, options) {
|
|
const headers = {
|
|
'Content-Type': 'application/json',
|
|
'Authorization': `Bearer ${options.apiKey}`,
|
|
};
|
|
const debugBody = { ...createParams, tools: `${createParams.tools?.length ?? 0} tools` };
|
|
options.debug?.('lowire:openai-responses')('Request:', JSON.stringify(debugBody, null, 2));
|
|
const response = await (0, fetchWithTimeout_1.fetchWithTimeout)(options.apiEndpoint ?? `https://api.openai.com/v1/responses`, {
|
|
method: 'POST',
|
|
headers,
|
|
body: JSON.stringify(createParams),
|
|
signal: options.signal,
|
|
timeout: options.apiTimeout
|
|
});
|
|
const responseText = await response.text();
|
|
const responseBody = JSON.parse(responseText);
|
|
if (!response.ok) {
|
|
try {
|
|
return { error: responseBody.error.message };
|
|
}
|
|
catch {
|
|
return { error: responseText };
|
|
}
|
|
}
|
|
options.debug?.('lowire:openai-responses')('Response:', JSON.stringify(responseBody, null, 2));
|
|
return { response: responseBody };
|
|
}
|
|
function toResultContentPart(part) {
|
|
if (part.type === 'text') {
|
|
return {
|
|
type: 'input_text',
|
|
text: part.text,
|
|
};
|
|
}
|
|
if (part.type === 'image') {
|
|
return {
|
|
type: 'input_image',
|
|
image_url: `data:${part.mimeType};base64,${part.data}`,
|
|
detail: 'auto',
|
|
};
|
|
}
|
|
throw new Error(`Cannot convert content part of type ${part.type} to response content part`);
|
|
}
|
|
function toResponseInputItems(message) {
|
|
if (message.role === 'user') {
|
|
return [{
|
|
type: 'message',
|
|
role: 'user',
|
|
content: message.content
|
|
}];
|
|
}
|
|
if (message.role === 'assistant') {
|
|
const textParts = message.content.filter(part => part.type === 'text');
|
|
const toolCallParts = message.content.filter(part => part.type === 'tool_call');
|
|
const items = [];
|
|
// Add assistant message with text content
|
|
if (textParts.length > 0) {
|
|
const outputMessage = {
|
|
id: message.openaiId,
|
|
status: message.openaiStatus,
|
|
type: 'message',
|
|
role: 'assistant',
|
|
content: textParts.map(part => ({
|
|
type: 'output_text',
|
|
text: part.text,
|
|
annotations: [],
|
|
logprobs: []
|
|
}))
|
|
};
|
|
items.push(outputMessage);
|
|
}
|
|
if (message.toolError) {
|
|
items.push({
|
|
type: 'message',
|
|
role: 'user',
|
|
content: message.toolError
|
|
});
|
|
}
|
|
items.push(...toolCallParts.map(toFunctionToolCall).flat());
|
|
return items;
|
|
}
|
|
throw new Error(`Unsupported message role: ${message.role}`);
|
|
}
|
|
function toOpenAIFunctionTool(tool) {
|
|
return {
|
|
type: 'function',
|
|
name: tool.name,
|
|
description: tool.description ?? null,
|
|
parameters: tool.inputSchema,
|
|
strict: null,
|
|
};
|
|
}
|
|
function toFunctionToolCall(toolCall) {
|
|
const result = [{
|
|
type: 'function_call',
|
|
call_id: toolCall.id,
|
|
name: toolCall.name,
|
|
arguments: JSON.stringify(toolCall.arguments),
|
|
id: toolCall.openaiId,
|
|
status: toolCall.openaiStatus,
|
|
}];
|
|
if (toolCall.result) {
|
|
result.push({
|
|
type: 'function_call_output',
|
|
call_id: toolCall.id,
|
|
output: toolCall.result.content.map(toResultContentPart),
|
|
});
|
|
}
|
|
return result;
|
|
}
|
|
function toToolCall(functionCall) {
|
|
return {
|
|
type: 'tool_call',
|
|
name: functionCall.name,
|
|
arguments: JSON.parse(functionCall.arguments),
|
|
id: functionCall.call_id,
|
|
openaiId: functionCall.id,
|
|
openaiStatus: functionCall.status,
|
|
};
|
|
}
|
|
function toOpenAIReasoning(reasoning) {
|
|
switch (reasoning) {
|
|
case 'none':
|
|
return { effort: 'none' };
|
|
case 'medium':
|
|
return { effort: 'medium' };
|
|
case 'high':
|
|
return { effort: 'high' };
|
|
}
|
|
}
|
|
const systemPrompt = (prompt) => `
|
|
### System instructions
|
|
|
|
${prompt}
|
|
|
|
### Tool calling instructions
|
|
- Make sure every message contains a tool call.
|
|
- When you use a tool, you may provide a brief thought or explanation in the content field
|
|
immediately before the tool_call. Do not split this into separate messages.
|
|
- Every reply must include a tool call.
|
|
`;
|