initial commit: add server

This commit is contained in:
2026-02-13 22:43:55 -08:00
commit 5faa57a741
16 changed files with 2882 additions and 0 deletions

View File

@@ -0,0 +1,124 @@
import { performance } from "node:perf_hooks";
import { prisma } from "../db.js";
import { anthropicClient, openaiClient, xaiClient } from "./providers.js";
import type { MultiplexRequest, MultiplexResponse, Provider } from "./types.js";
function asProviderEnum(p: Provider) {
// Prisma enum values match these strings.
return p;
}
export async function runMultiplex(req: MultiplexRequest): Promise<MultiplexResponse> {
const t0 = performance.now();
// Persist call record early so we can attach errors.
const call = await prisma.llmCall.create({
data: {
chatId: req.chatId ?? (await prisma.chat.create({ data: {} })).id,
provider: asProviderEnum(req.provider) as any,
model: req.model,
request: req as any,
},
select: { id: true, chatId: true },
});
try {
let outText = "";
let usage: MultiplexResponse["usage"] | undefined;
let raw: unknown;
if (req.provider === "openai" || req.provider === "xai") {
const client = req.provider === "openai" ? openaiClient() : xaiClient();
const r = await client.chat.completions.create({
model: req.model,
// OpenAI SDK has very specific message union types; our normalized schema is compatible.
messages: req.messages.map((m) => ({ role: m.role, content: m.content, name: m.name })) as any,
temperature: req.temperature,
max_tokens: req.maxTokens,
});
raw = r;
outText = r.choices?.[0]?.message?.content ?? "";
usage = r.usage
? {
inputTokens: r.usage.prompt_tokens,
outputTokens: r.usage.completion_tokens,
totalTokens: r.usage.total_tokens,
}
: undefined;
} else if (req.provider === "anthropic") {
const client = anthropicClient();
// Anthropic splits system prompt. We'll convert first system message into system string.
const system = req.messages.find((m) => m.role === "system")?.content;
const msgs = req.messages
.filter((m) => m.role !== "system")
.map((m) => ({ role: m.role === "assistant" ? "assistant" : "user", content: m.content }));
const r = await client.messages.create({
model: req.model,
system,
max_tokens: req.maxTokens ?? 1024,
temperature: req.temperature,
messages: msgs as any,
});
raw = r;
outText = r.content
.map((c: any) => (c.type === "text" ? c.text : ""))
.join("")
.trim();
// Anthropic usage (SDK typing varies by version)
const ru: any = (r as any).usage;
if (ru) {
usage = {
inputTokens: ru.input_tokens,
outputTokens: ru.output_tokens,
totalTokens: (ru.input_tokens ?? 0) + (ru.output_tokens ?? 0),
};
}
} else {
throw new Error(`unknown provider: ${req.provider}`);
}
const latencyMs = Math.round(performance.now() - t0);
// Store assistant message + call record
await prisma.$transaction([
prisma.message.create({
data: {
chatId: call.chatId,
role: "assistant" as any,
content: outText,
},
}),
prisma.llmCall.update({
where: { id: call.id },
data: {
response: raw as any,
latencyMs,
inputTokens: usage?.inputTokens,
outputTokens: usage?.outputTokens,
totalTokens: usage?.totalTokens,
},
}),
]);
return {
provider: req.provider,
model: req.model,
message: { role: "assistant", content: outText },
usage,
raw,
};
} catch (e: any) {
const latencyMs = Math.round(performance.now() - t0);
await prisma.llmCall.update({
where: { id: call.id },
data: {
error: e?.message ?? String(e),
latencyMs,
},
});
throw e;
}
}

View File

@@ -0,0 +1,19 @@
import OpenAI from "openai";
import Anthropic from "@anthropic-ai/sdk";
import { env } from "../env.js";
export function openaiClient() {
if (!env.OPENAI_API_KEY) throw new Error("OPENAI_API_KEY not set");
return new OpenAI({ apiKey: env.OPENAI_API_KEY });
}
// xAI (Grok) is OpenAI-compatible at https://api.x.ai/v1
export function xaiClient() {
if (!env.XAI_API_KEY) throw new Error("XAI_API_KEY not set");
return new OpenAI({ apiKey: env.XAI_API_KEY, baseURL: "https://api.x.ai/v1" });
}
export function anthropicClient() {
if (!env.ANTHROPIC_API_KEY) throw new Error("ANTHROPIC_API_KEY not set");
return new Anthropic({ apiKey: env.ANTHROPIC_API_KEY });
}

130
server/src/llm/streaming.ts Normal file
View File

@@ -0,0 +1,130 @@
import { performance } from "node:perf_hooks";
import type OpenAI from "openai";
import { prisma } from "../db.js";
import { anthropicClient, openaiClient, xaiClient } from "./providers.js";
import type { MultiplexRequest, Provider } from "./types.js";
export type StreamEvent =
| { type: "meta"; chatId: string; callId: string; provider: Provider; model: string }
| { type: "delta"; text: string }
| { type: "done"; text: string; usage?: { inputTokens?: number; outputTokens?: number; totalTokens?: number } }
| { type: "error"; message: string };
function getChatIdOrCreate(chatId?: string) {
if (chatId) return Promise.resolve(chatId);
return prisma.chat.create({ data: {}, select: { id: true } }).then((c) => c.id);
}
export async function* runMultiplexStream(req: MultiplexRequest): AsyncGenerator<StreamEvent> {
const t0 = performance.now();
const chatId = await getChatIdOrCreate(req.chatId);
const call = await prisma.llmCall.create({
data: {
chatId,
provider: req.provider as any,
model: req.model,
request: req as any,
},
select: { id: true },
});
yield { type: "meta", chatId, callId: call.id, provider: req.provider, model: req.model };
let text = "";
let usage: StreamEvent extends any ? any : never;
let raw: unknown = { streamed: true };
try {
if (req.provider === "openai" || req.provider === "xai") {
const client = req.provider === "openai" ? openaiClient() : xaiClient();
const stream = await client.chat.completions.create({
model: req.model,
messages: req.messages.map((m) => ({ role: m.role, content: m.content, name: m.name })) as any,
temperature: req.temperature,
max_tokens: req.maxTokens,
stream: true,
});
for await (const chunk of stream as any as AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk>) {
const delta = chunk.choices?.[0]?.delta?.content ?? "";
if (delta) {
text += delta;
yield { type: "delta", text: delta };
}
}
// no guaranteed usage in stream mode across providers; leave empty for now
} else if (req.provider === "anthropic") {
const client = anthropicClient();
const system = req.messages.find((m) => m.role === "system")?.content;
const msgs = req.messages
.filter((m) => m.role !== "system")
.map((m) => ({ role: m.role === "assistant" ? "assistant" : "user", content: m.content }));
const stream = await client.messages.create({
model: req.model,
system,
max_tokens: req.maxTokens ?? 1024,
temperature: req.temperature,
messages: msgs as any,
stream: true,
});
for await (const ev of stream as any as AsyncIterable<any>) {
// Anthropic streaming events include content_block_delta with text_delta
if (ev?.type === "content_block_delta" && ev?.delta?.type === "text_delta") {
const delta = ev.delta.text ?? "";
if (delta) {
text += delta;
yield { type: "delta", text: delta };
}
}
// capture usage if present on message_delta
if (ev?.type === "message_delta" && ev?.usage) {
usage = {
inputTokens: ev.usage.input_tokens,
outputTokens: ev.usage.output_tokens,
totalTokens:
(ev.usage.input_tokens ?? 0) + (ev.usage.output_tokens ?? 0),
};
}
// some streams end with message_stop
}
} else {
throw new Error(`unknown provider: ${req.provider}`);
}
const latencyMs = Math.round(performance.now() - t0);
await prisma.$transaction([
prisma.message.create({
data: { chatId, role: "assistant" as any, content: text },
}),
prisma.llmCall.update({
where: { id: call.id },
data: {
response: raw as any,
latencyMs,
inputTokens: usage?.inputTokens,
outputTokens: usage?.outputTokens,
totalTokens: usage?.totalTokens,
},
}),
]);
yield { type: "done", text, usage };
} catch (e: any) {
const latencyMs = Math.round(performance.now() - t0);
await prisma.llmCall.update({
where: { id: call.id },
data: {
error: e?.message ?? String(e),
latencyMs,
},
});
yield { type: "error", message: e?.message ?? String(e) };
}
}

28
server/src/llm/types.ts Normal file
View File

@@ -0,0 +1,28 @@
export type Provider = "openai" | "anthropic" | "xai";
export type ChatMessage = {
role: "system" | "user" | "assistant" | "tool";
content: string;
name?: string;
};
export type MultiplexRequest = {
chatId?: string;
provider: Provider;
model: string;
messages: ChatMessage[];
temperature?: number;
maxTokens?: number;
};
export type MultiplexResponse = {
provider: Provider;
model: string;
message: { role: "assistant"; content: string };
usage?: {
inputTokens?: number;
outputTokens?: number;
totalTokens?: number;
};
raw: unknown;
};