initial commit: add server

This commit is contained in:
2026-02-13 22:43:55 -08:00
commit 5faa57a741
16 changed files with 2882 additions and 0 deletions

22
server/src/auth.ts Normal file
View File

@@ -0,0 +1,22 @@
import type { FastifyRequest } from "fastify";
import { env } from "./env.js";
export function requireAdmin(req: FastifyRequest) {
// If ADMIN_TOKEN isn't set, run in "open" mode (dev).
if (!env.ADMIN_TOKEN) return;
const auth = req.headers.authorization;
if (!auth?.startsWith("Bearer ")) {
const err = new Error("missing bearer token");
// @ts-expect-error attach status
err.statusCode = 401;
throw err;
}
const token = auth.slice("Bearer ".length);
if (token !== env.ADMIN_TOKEN) {
const err = new Error("invalid bearer token");
// @ts-expect-error attach status
err.statusCode = 403;
throw err;
}
}

16
server/src/db.ts Normal file
View File

@@ -0,0 +1,16 @@
import { PrismaClient } from "@prisma/client";
export const prisma = new PrismaClient({
log: [
{ emit: "event", level: "error" },
{ emit: "event", level: "warn" },
],
});
prisma.$on("error", (e: any) => {
console.error("[prisma:error]", e);
});
prisma.$on("warn", (e: any) => {
console.warn("[prisma:warn]", e);
});

19
server/src/env.ts Normal file
View File

@@ -0,0 +1,19 @@
import { z } from "zod";
import "dotenv/config";
const EnvSchema = z.object({
PORT: z.coerce.number().int().positive().default(8787),
HOST: z.string().default("0.0.0.0"),
// simple bearer-token auth for your personal backend
ADMIN_TOKEN: z.string().min(20).optional(),
// provider keys
OPENAI_API_KEY: z.string().optional(),
ANTHROPIC_API_KEY: z.string().optional(),
XAI_API_KEY: z.string().optional(),
});
export type Env = z.infer<typeof EnvSchema>;
export const env: Env = EnvSchema.parse(process.env);

45
server/src/index.ts Normal file
View File

@@ -0,0 +1,45 @@
import Fastify from "fastify";
import cors from "@fastify/cors";
import swagger from "@fastify/swagger";
import swaggerUI from "@fastify/swagger-ui";
import sensible from "@fastify/sensible";
import { env } from "./env.js";
import { registerRoutes } from "./routes.js";
const app = Fastify({
logger: {
transport: {
target: "pino-pretty",
options: { colorize: true, translateTime: "SYS:standard" },
},
},
});
await app.register(cors, { origin: true, credentials: true });
await app.register(swagger, {
openapi: {
info: {
title: "LLM Multiplexer Backend",
version: "0.1.0",
},
},
});
await app.register(swaggerUI, { routePrefix: "/docs" });
await app.register(sensible);
app.setErrorHandler((err, _req, reply) => {
const e = err as any;
const statusCode = e.statusCode ?? 500;
reply.status(statusCode).send({
error: true,
message: e.message ?? String(e),
statusCode,
});
});
await registerRoutes(app);
await app.listen({ port: env.PORT, host: env.HOST });
app.log.info(`listening on http://${env.HOST}:${env.PORT}`);

View File

@@ -0,0 +1,124 @@
import { performance } from "node:perf_hooks";
import { prisma } from "../db.js";
import { anthropicClient, openaiClient, xaiClient } from "./providers.js";
import type { MultiplexRequest, MultiplexResponse, Provider } from "./types.js";
function asProviderEnum(p: Provider) {
// Prisma enum values match these strings.
return p;
}
export async function runMultiplex(req: MultiplexRequest): Promise<MultiplexResponse> {
const t0 = performance.now();
// Persist call record early so we can attach errors.
const call = await prisma.llmCall.create({
data: {
chatId: req.chatId ?? (await prisma.chat.create({ data: {} })).id,
provider: asProviderEnum(req.provider) as any,
model: req.model,
request: req as any,
},
select: { id: true, chatId: true },
});
try {
let outText = "";
let usage: MultiplexResponse["usage"] | undefined;
let raw: unknown;
if (req.provider === "openai" || req.provider === "xai") {
const client = req.provider === "openai" ? openaiClient() : xaiClient();
const r = await client.chat.completions.create({
model: req.model,
// OpenAI SDK has very specific message union types; our normalized schema is compatible.
messages: req.messages.map((m) => ({ role: m.role, content: m.content, name: m.name })) as any,
temperature: req.temperature,
max_tokens: req.maxTokens,
});
raw = r;
outText = r.choices?.[0]?.message?.content ?? "";
usage = r.usage
? {
inputTokens: r.usage.prompt_tokens,
outputTokens: r.usage.completion_tokens,
totalTokens: r.usage.total_tokens,
}
: undefined;
} else if (req.provider === "anthropic") {
const client = anthropicClient();
// Anthropic splits system prompt. We'll convert first system message into system string.
const system = req.messages.find((m) => m.role === "system")?.content;
const msgs = req.messages
.filter((m) => m.role !== "system")
.map((m) => ({ role: m.role === "assistant" ? "assistant" : "user", content: m.content }));
const r = await client.messages.create({
model: req.model,
system,
max_tokens: req.maxTokens ?? 1024,
temperature: req.temperature,
messages: msgs as any,
});
raw = r;
outText = r.content
.map((c: any) => (c.type === "text" ? c.text : ""))
.join("")
.trim();
// Anthropic usage (SDK typing varies by version)
const ru: any = (r as any).usage;
if (ru) {
usage = {
inputTokens: ru.input_tokens,
outputTokens: ru.output_tokens,
totalTokens: (ru.input_tokens ?? 0) + (ru.output_tokens ?? 0),
};
}
} else {
throw new Error(`unknown provider: ${req.provider}`);
}
const latencyMs = Math.round(performance.now() - t0);
// Store assistant message + call record
await prisma.$transaction([
prisma.message.create({
data: {
chatId: call.chatId,
role: "assistant" as any,
content: outText,
},
}),
prisma.llmCall.update({
where: { id: call.id },
data: {
response: raw as any,
latencyMs,
inputTokens: usage?.inputTokens,
outputTokens: usage?.outputTokens,
totalTokens: usage?.totalTokens,
},
}),
]);
return {
provider: req.provider,
model: req.model,
message: { role: "assistant", content: outText },
usage,
raw,
};
} catch (e: any) {
const latencyMs = Math.round(performance.now() - t0);
await prisma.llmCall.update({
where: { id: call.id },
data: {
error: e?.message ?? String(e),
latencyMs,
},
});
throw e;
}
}

View File

@@ -0,0 +1,19 @@
import OpenAI from "openai";
import Anthropic from "@anthropic-ai/sdk";
import { env } from "../env.js";
export function openaiClient() {
if (!env.OPENAI_API_KEY) throw new Error("OPENAI_API_KEY not set");
return new OpenAI({ apiKey: env.OPENAI_API_KEY });
}
// xAI (Grok) is OpenAI-compatible at https://api.x.ai/v1
export function xaiClient() {
if (!env.XAI_API_KEY) throw new Error("XAI_API_KEY not set");
return new OpenAI({ apiKey: env.XAI_API_KEY, baseURL: "https://api.x.ai/v1" });
}
export function anthropicClient() {
if (!env.ANTHROPIC_API_KEY) throw new Error("ANTHROPIC_API_KEY not set");
return new Anthropic({ apiKey: env.ANTHROPIC_API_KEY });
}

130
server/src/llm/streaming.ts Normal file
View File

@@ -0,0 +1,130 @@
import { performance } from "node:perf_hooks";
import type OpenAI from "openai";
import { prisma } from "../db.js";
import { anthropicClient, openaiClient, xaiClient } from "./providers.js";
import type { MultiplexRequest, Provider } from "./types.js";
export type StreamEvent =
| { type: "meta"; chatId: string; callId: string; provider: Provider; model: string }
| { type: "delta"; text: string }
| { type: "done"; text: string; usage?: { inputTokens?: number; outputTokens?: number; totalTokens?: number } }
| { type: "error"; message: string };
function getChatIdOrCreate(chatId?: string) {
if (chatId) return Promise.resolve(chatId);
return prisma.chat.create({ data: {}, select: { id: true } }).then((c) => c.id);
}
export async function* runMultiplexStream(req: MultiplexRequest): AsyncGenerator<StreamEvent> {
const t0 = performance.now();
const chatId = await getChatIdOrCreate(req.chatId);
const call = await prisma.llmCall.create({
data: {
chatId,
provider: req.provider as any,
model: req.model,
request: req as any,
},
select: { id: true },
});
yield { type: "meta", chatId, callId: call.id, provider: req.provider, model: req.model };
let text = "";
let usage: StreamEvent extends any ? any : never;
let raw: unknown = { streamed: true };
try {
if (req.provider === "openai" || req.provider === "xai") {
const client = req.provider === "openai" ? openaiClient() : xaiClient();
const stream = await client.chat.completions.create({
model: req.model,
messages: req.messages.map((m) => ({ role: m.role, content: m.content, name: m.name })) as any,
temperature: req.temperature,
max_tokens: req.maxTokens,
stream: true,
});
for await (const chunk of stream as any as AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk>) {
const delta = chunk.choices?.[0]?.delta?.content ?? "";
if (delta) {
text += delta;
yield { type: "delta", text: delta };
}
}
// no guaranteed usage in stream mode across providers; leave empty for now
} else if (req.provider === "anthropic") {
const client = anthropicClient();
const system = req.messages.find((m) => m.role === "system")?.content;
const msgs = req.messages
.filter((m) => m.role !== "system")
.map((m) => ({ role: m.role === "assistant" ? "assistant" : "user", content: m.content }));
const stream = await client.messages.create({
model: req.model,
system,
max_tokens: req.maxTokens ?? 1024,
temperature: req.temperature,
messages: msgs as any,
stream: true,
});
for await (const ev of stream as any as AsyncIterable<any>) {
// Anthropic streaming events include content_block_delta with text_delta
if (ev?.type === "content_block_delta" && ev?.delta?.type === "text_delta") {
const delta = ev.delta.text ?? "";
if (delta) {
text += delta;
yield { type: "delta", text: delta };
}
}
// capture usage if present on message_delta
if (ev?.type === "message_delta" && ev?.usage) {
usage = {
inputTokens: ev.usage.input_tokens,
outputTokens: ev.usage.output_tokens,
totalTokens:
(ev.usage.input_tokens ?? 0) + (ev.usage.output_tokens ?? 0),
};
}
// some streams end with message_stop
}
} else {
throw new Error(`unknown provider: ${req.provider}`);
}
const latencyMs = Math.round(performance.now() - t0);
await prisma.$transaction([
prisma.message.create({
data: { chatId, role: "assistant" as any, content: text },
}),
prisma.llmCall.update({
where: { id: call.id },
data: {
response: raw as any,
latencyMs,
inputTokens: usage?.inputTokens,
outputTokens: usage?.outputTokens,
totalTokens: usage?.totalTokens,
},
}),
]);
yield { type: "done", text, usage };
} catch (e: any) {
const latencyMs = Math.round(performance.now() - t0);
await prisma.llmCall.update({
where: { id: call.id },
data: {
error: e?.message ?? String(e),
latencyMs,
},
});
yield { type: "error", message: e?.message ?? String(e) };
}
}

28
server/src/llm/types.ts Normal file
View File

@@ -0,0 +1,28 @@
export type Provider = "openai" | "anthropic" | "xai";
export type ChatMessage = {
role: "system" | "user" | "assistant" | "tool";
content: string;
name?: string;
};
export type MultiplexRequest = {
chatId?: string;
provider: Provider;
model: string;
messages: ChatMessage[];
temperature?: number;
maxTokens?: number;
};
export type MultiplexResponse = {
provider: Provider;
model: string;
message: { role: "assistant"; content: string };
usage?: {
inputTokens?: number;
outputTokens?: number;
totalTokens?: number;
};
raw: unknown;
};

171
server/src/routes.ts Normal file
View File

@@ -0,0 +1,171 @@
import { z } from "zod";
import type { FastifyInstance } from "fastify";
import { prisma } from "./db.js";
import { requireAdmin } from "./auth.js";
import { runMultiplex } from "./llm/multiplexer.js";
import { runMultiplexStream } from "./llm/streaming.js";
export async function registerRoutes(app: FastifyInstance) {
app.get("/health", async () => ({ ok: true }));
app.get("/v1/chats", async (req) => {
requireAdmin(req);
const chats = await prisma.chat.findMany({
orderBy: { updatedAt: "desc" },
take: 100,
select: { id: true, title: true, createdAt: true, updatedAt: true },
});
return { chats };
});
app.post("/v1/chats", async (req) => {
requireAdmin(req);
const Body = z.object({ title: z.string().optional() });
const body = Body.parse(req.body ?? {});
const chat = await prisma.chat.create({ data: { title: body.title } });
return { chat };
});
app.get("/v1/chats/:chatId", async (req) => {
requireAdmin(req);
const Params = z.object({ chatId: z.string() });
const { chatId } = Params.parse(req.params);
const chat = await prisma.chat.findUnique({
where: { id: chatId },
include: { messages: { orderBy: { createdAt: "asc" } }, calls: { orderBy: { createdAt: "desc" } } },
});
if (!chat) return app.httpErrors.notFound("chat not found");
return { chat };
});
app.post("/v1/chats/:chatId/messages", async (req) => {
requireAdmin(req);
const Params = z.object({ chatId: z.string() });
const Body = z.object({
role: z.enum(["system", "user", "assistant", "tool"]),
content: z.string(),
name: z.string().optional(),
metadata: z.unknown().optional(),
});
const { chatId } = Params.parse(req.params);
const body = Body.parse(req.body);
const msg = await prisma.message.create({
data: {
chatId,
role: body.role as any,
content: body.content,
name: body.name,
metadata: body.metadata as any,
},
});
return { message: msg };
});
// Main: create a completion via provider+model and store everything.
app.post("/v1/chat-completions", async (req) => {
requireAdmin(req);
const Body = z.object({
chatId: z.string().optional(),
provider: z.enum(["openai", "anthropic", "xai"]),
model: z.string().min(1),
messages: z.array(
z.object({
role: z.enum(["system", "user", "assistant", "tool"]),
content: z.string(),
name: z.string().optional(),
})
),
temperature: z.number().min(0).max(2).optional(),
maxTokens: z.number().int().positive().optional(),
});
const body = Body.parse(req.body);
// ensure chat exists if provided
if (body.chatId) {
const exists = await prisma.chat.findUnique({ where: { id: body.chatId }, select: { id: true } });
if (!exists) return app.httpErrors.notFound("chat not found");
}
// store user messages (anything not assistant) for DB fidelity
if (body.chatId) {
const toInsert = body.messages.filter((m) => m.role !== "assistant");
if (toInsert.length) {
await prisma.message.createMany({
data: toInsert.map((m) => ({ chatId: body.chatId!, role: m.role as any, content: m.content, name: m.name })),
});
}
}
const result = await runMultiplex(body);
return {
chatId: body.chatId ?? null,
...result,
};
});
// Streaming SSE endpoint.
app.post("/v1/chat-completions/stream", async (req, reply) => {
requireAdmin(req);
const Body = z.object({
chatId: z.string().optional(),
provider: z.enum(["openai", "anthropic", "xai"]),
model: z.string().min(1),
messages: z.array(
z.object({
role: z.enum(["system", "user", "assistant", "tool"]),
content: z.string(),
name: z.string().optional(),
})
),
temperature: z.number().min(0).max(2).optional(),
maxTokens: z.number().int().positive().optional(),
});
const body = Body.parse(req.body);
// ensure chat exists if provided
if (body.chatId) {
const exists = await prisma.chat.findUnique({ where: { id: body.chatId }, select: { id: true } });
if (!exists) return app.httpErrors.notFound("chat not found");
}
// store user messages (anything not assistant) for DB fidelity
if (body.chatId) {
const toInsert = body.messages.filter((m) => m.role !== "assistant");
if (toInsert.length) {
await prisma.message.createMany({
data: toInsert.map((m) => ({ chatId: body.chatId!, role: m.role as any, content: m.content, name: m.name })),
});
}
}
reply.raw.writeHead(200, {
"Content-Type": "text/event-stream; charset=utf-8",
"Cache-Control": "no-cache, no-transform",
Connection: "keep-alive",
});
const send = (event: string, data: any) => {
reply.raw.write(`event: ${event}\n`);
reply.raw.write(`data: ${JSON.stringify(data)}\n\n`);
};
for await (const ev of runMultiplexStream(body)) {
if (ev.type === "meta") send("meta", ev);
else if (ev.type === "delta") send("delta", ev);
else if (ev.type === "done") send("done", ev);
else if (ev.type === "error") send("error", ev);
}
reply.raw.end();
return reply;
});
}