import { performance } from "node:perf_hooks"; import { prisma } from "../db.js"; import { anthropicClient, openaiClient, xaiClient } from "./providers.js"; import { buildToolLogMessageData, runToolAwareOpenAIChat, type ToolExecutionEvent } from "./chat-tools.js"; import type { MultiplexRequest, Provider } from "./types.js"; export type StreamEvent = | { type: "meta"; chatId: string; callId: string; provider: Provider; model: string } | { type: "tool_call"; event: ToolExecutionEvent } | { type: "delta"; text: string } | { type: "done"; text: string; usage?: { inputTokens?: number; outputTokens?: number; totalTokens?: number } } | { type: "error"; message: string }; function getChatIdOrCreate(chatId?: string) { if (chatId) return Promise.resolve(chatId); return prisma.chat.create({ data: {}, select: { id: true } }).then((c) => c.id); } export async function* runMultiplexStream(req: MultiplexRequest): AsyncGenerator { const t0 = performance.now(); const chatId = await getChatIdOrCreate(req.chatId); const call = await prisma.llmCall.create({ data: { chatId, provider: req.provider as any, model: req.model, request: req as any, }, select: { id: true }, }); await prisma.$transaction([ prisma.chat.update({ where: { id: chatId }, data: { lastUsedProvider: req.provider as any, lastUsedModel: req.model, }, }), prisma.chat.updateMany({ where: { id: chatId, initiatedProvider: null }, data: { initiatedProvider: req.provider as any, initiatedModel: req.model, }, }), ]); yield { type: "meta", chatId, callId: call.id, provider: req.provider, model: req.model }; let text = ""; let usage: StreamEvent extends any ? any : never; let raw: unknown = { streamed: true }; let toolMessages: ReturnType[] = []; try { if (req.provider === "openai" || req.provider === "xai") { const client = req.provider === "openai" ? openaiClient() : xaiClient(); const toolEvents: ToolExecutionEvent[] = []; const r = await runToolAwareOpenAIChat({ client, model: req.model, messages: req.messages, temperature: req.temperature, maxTokens: req.maxTokens, onToolEvent: (event) => { toolEvents.push(event); }, logContext: { provider: req.provider, model: req.model, chatId, }, }); raw = r.raw; text = r.text; usage = r.usage; toolMessages = toolEvents.map((event) => buildToolLogMessageData(chatId, event)); for (const event of toolEvents) { yield { type: "tool_call", event }; } if (text) { yield { type: "delta", text }; } } else if (req.provider === "anthropic") { const client = anthropicClient(); const system = req.messages.find((m) => m.role === "system")?.content; const msgs = req.messages .filter((m) => m.role !== "system") .map((m) => ({ role: m.role === "assistant" ? "assistant" : "user", content: m.content })); const stream = await client.messages.create({ model: req.model, system, max_tokens: req.maxTokens ?? 1024, temperature: req.temperature, messages: msgs as any, stream: true, }); for await (const ev of stream as any as AsyncIterable) { // Anthropic streaming events include content_block_delta with text_delta if (ev?.type === "content_block_delta" && ev?.delta?.type === "text_delta") { const delta = ev.delta.text ?? ""; if (delta) { text += delta; yield { type: "delta", text: delta }; } } // capture usage if present on message_delta if (ev?.type === "message_delta" && ev?.usage) { usage = { inputTokens: ev.usage.input_tokens, outputTokens: ev.usage.output_tokens, totalTokens: (ev.usage.input_tokens ?? 0) + (ev.usage.output_tokens ?? 0), }; } // some streams end with message_stop } raw = { streamed: true, provider: "anthropic" }; } else { throw new Error(`unknown provider: ${req.provider}`); } const latencyMs = Math.round(performance.now() - t0); await prisma.$transaction(async (tx) => { if (toolMessages.length) { await tx.message.createMany({ data: toolMessages.map((message) => ({ chatId: message.chatId, role: message.role as any, content: message.content, name: message.name, metadata: message.metadata as any, })), }); } await tx.message.create({ data: { chatId, role: "assistant" as any, content: text }, }); await tx.llmCall.update({ where: { id: call.id }, data: { response: raw as any, latencyMs, inputTokens: usage?.inputTokens, outputTokens: usage?.outputTokens, totalTokens: usage?.totalTokens, }, }); }); yield { type: "done", text, usage }; } catch (e: any) { const latencyMs = Math.round(performance.now() - t0); await prisma.llmCall.update({ where: { id: call.id }, data: { error: e?.message ?? String(e), latencyMs, }, }); yield { type: "error", message: e?.message ?? String(e) }; } }