oai responses api, tool call retries

This commit is contained in:
2026-05-02 21:44:32 -07:00
parent 8d6c069a33
commit 015253c0af
11 changed files with 369 additions and 40 deletions

View File

@@ -1,7 +1,12 @@
import { performance } from "node:perf_hooks";
import { prisma } from "../db.js";
import { anthropicClient, openaiClient, xaiClient } from "./providers.js";
import { buildToolLogMessageData, runToolAwareOpenAIChatStream, type ToolExecutionEvent } from "./chat-tools.js";
import {
buildToolLogMessageData,
runToolAwareChatCompletionsStream,
runToolAwareOpenAIChatStream,
type ToolExecutionEvent,
} from "./chat-tools.js";
import { buildAnthropicConversationMessage, getAnthropicSystemPrompt } from "./message-content.js";
import type { MultiplexRequest, Provider } from "./types.js";
@@ -58,18 +63,33 @@ export async function* runMultiplexStream(req: MultiplexRequest): AsyncGenerator
try {
if (req.provider === "openai" || req.provider === "xai") {
const client = req.provider === "openai" ? openaiClient() : xaiClient();
for await (const ev of runToolAwareOpenAIChatStream({
client,
model: req.model,
messages: req.messages,
temperature: req.temperature,
maxTokens: req.maxTokens,
logContext: {
provider: req.provider,
model: req.model,
chatId,
},
})) {
const streamEvents =
req.provider === "openai"
? runToolAwareOpenAIChatStream({
client,
model: req.model,
messages: req.messages,
temperature: req.temperature,
maxTokens: req.maxTokens,
logContext: {
provider: req.provider,
model: req.model,
chatId,
},
})
: runToolAwareChatCompletionsStream({
client,
model: req.model,
messages: req.messages,
temperature: req.temperature,
maxTokens: req.maxTokens,
logContext: {
provider: req.provider,
model: req.model,
chatId,
},
});
for await (const ev of streamEvents) {
if (ev.type === "delta") {
text += ev.text;
yield { type: "delta", text: ev.text };