Files
Sybil-2/server/src/llm/multiplexer.ts

156 lines
4.6 KiB
TypeScript

import { performance } from "node:perf_hooks";
import { prisma } from "../db.js";
import { anthropicClient, openaiClient, xaiClient } from "./providers.js";
import { buildToolLogMessageData, runToolAwareOpenAIChat } from "./chat-tools.js";
import type { MultiplexRequest, MultiplexResponse, Provider } from "./types.js";
function asProviderEnum(p: Provider) {
// Prisma enum values match these strings.
return p;
}
export async function runMultiplex(req: MultiplexRequest): Promise<MultiplexResponse> {
const t0 = performance.now();
const chatId = req.chatId ?? (await prisma.chat.create({ data: {}, select: { id: true } })).id;
// Persist call record early so we can attach errors.
const call = await prisma.llmCall.create({
data: {
chatId,
provider: asProviderEnum(req.provider) as any,
model: req.model,
request: req as any,
},
select: { id: true, chatId: true },
});
await prisma.$transaction([
prisma.chat.update({
where: { id: chatId },
data: {
lastUsedProvider: asProviderEnum(req.provider) as any,
lastUsedModel: req.model,
},
}),
prisma.chat.updateMany({
where: { id: chatId, initiatedProvider: null },
data: {
initiatedProvider: asProviderEnum(req.provider) as any,
initiatedModel: req.model,
},
}),
]);
try {
let outText = "";
let usage: MultiplexResponse["usage"] | undefined;
let raw: unknown;
let toolMessages: ReturnType<typeof buildToolLogMessageData>[] = [];
if (req.provider === "openai" || req.provider === "xai") {
const client = req.provider === "openai" ? openaiClient() : xaiClient();
const r = await runToolAwareOpenAIChat({
client,
model: req.model,
messages: req.messages,
temperature: req.temperature,
maxTokens: req.maxTokens,
logContext: {
provider: req.provider,
model: req.model,
chatId,
},
});
raw = r.raw;
outText = r.text;
usage = r.usage;
toolMessages = r.toolEvents.map((event) => buildToolLogMessageData(call.chatId, event));
} else if (req.provider === "anthropic") {
const client = anthropicClient();
// Anthropic splits system prompt. We'll convert first system message into system string.
const system = req.messages.find((m) => m.role === "system")?.content;
const msgs = req.messages
.filter((m) => m.role !== "system")
.map((m) => ({ role: m.role === "assistant" ? "assistant" : "user", content: m.content }));
const r = await client.messages.create({
model: req.model,
system,
max_tokens: req.maxTokens ?? 1024,
temperature: req.temperature,
messages: msgs as any,
});
raw = r;
outText = r.content
.map((c: any) => (c.type === "text" ? c.text : ""))
.join("")
.trim();
// Anthropic usage (SDK typing varies by version)
const ru: any = (r as any).usage;
if (ru) {
usage = {
inputTokens: ru.input_tokens,
outputTokens: ru.output_tokens,
totalTokens: (ru.input_tokens ?? 0) + (ru.output_tokens ?? 0),
};
}
} else {
throw new Error(`unknown provider: ${req.provider}`);
}
const latencyMs = Math.round(performance.now() - t0);
// Store tool activity (if any), assistant message, and call record.
await prisma.$transaction(async (tx) => {
if (toolMessages.length) {
await tx.message.createMany({
data: toolMessages.map((message) => ({
chatId: message.chatId,
role: message.role as any,
content: message.content,
name: message.name,
metadata: message.metadata as any,
})),
});
}
await tx.message.create({
data: {
chatId: call.chatId,
role: "assistant" as any,
content: outText,
},
});
await tx.llmCall.update({
where: { id: call.id },
data: {
response: raw as any,
latencyMs,
inputTokens: usage?.inputTokens,
outputTokens: usage?.outputTokens,
totalTokens: usage?.totalTokens,
},
});
});
return {
provider: req.provider,
model: req.model,
message: { role: "assistant", content: outText },
usage,
raw,
};
} catch (e: any) {
const latencyMs = Math.round(performance.now() - t0);
await prisma.llmCall.update({
where: { id: call.id },
data: {
error: e?.message ?? String(e),
latencyMs,
},
});
throw e;
}
}