Compare commits
3 Commits
70583c7ce7
...
e299dd4d23
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e299dd4d23 | ||
|
|
afd2336540 | ||
|
|
2a16dca469 |
12
.gitignore
vendored
Normal file
12
.gitignore
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
node_modules
|
||||
.env
|
||||
.env.*
|
||||
dev.db
|
||||
*.db
|
||||
prisma/migrations
|
||||
.DS_Store
|
||||
dist
|
||||
|
||||
# leftovers from early prisma v7 experiment
|
||||
/generated
|
||||
prisma.config.ts.bak
|
||||
65
README.md
65
README.md
@@ -1,3 +1,66 @@
|
||||
# llm-backend
|
||||
|
||||
LLM multiplexer + personal chat DB backend
|
||||
Backend API for:
|
||||
- LLM multiplexer (OpenAI / Anthropic / xAI (Grok))
|
||||
- Personal chat database (chats/messages + LLM call log)
|
||||
|
||||
## Stack
|
||||
- Node.js + TypeScript
|
||||
- Fastify (HTTP)
|
||||
- Prisma + SQLite (dev)
|
||||
|
||||
## Quick start
|
||||
|
||||
```bash
|
||||
cd llm-backend
|
||||
cp .env.example .env
|
||||
npm run db:migrate
|
||||
npm run dev
|
||||
```
|
||||
|
||||
Open docs: `http://localhost:8787/docs`
|
||||
|
||||
## Auth
|
||||
|
||||
Set `ADMIN_TOKEN` and send:
|
||||
|
||||
`Authorization: Bearer <ADMIN_TOKEN>`
|
||||
|
||||
If `ADMIN_TOKEN` is not set, the server runs in open mode (dev).
|
||||
|
||||
## Env
|
||||
- `OPENAI_API_KEY`
|
||||
- `ANTHROPIC_API_KEY`
|
||||
- `XAI_API_KEY`
|
||||
|
||||
## API
|
||||
- `GET /health`
|
||||
- `GET /v1/chats`
|
||||
- `POST /v1/chats`
|
||||
- `GET /v1/chats/:chatId`
|
||||
- `POST /v1/chats/:chatId/messages`
|
||||
- `POST /v1/chat-completions`
|
||||
- `POST /v1/chat-completions/stream` (SSE)
|
||||
|
||||
`POST /v1/chat-completions` body example:
|
||||
|
||||
```json
|
||||
{
|
||||
"chatId": "<optional chat id>",
|
||||
"provider": "openai",
|
||||
"model": "gpt-4.1-mini",
|
||||
"messages": [
|
||||
{"role":"system","content":"You are helpful."},
|
||||
{"role":"user","content":"Say hi"}
|
||||
],
|
||||
"temperature": 0.2,
|
||||
"maxTokens": 256
|
||||
}
|
||||
```
|
||||
|
||||
## Next steps (planned)
|
||||
- Better streaming protocol compatibility (OpenAI-style chunks + cancellation)
|
||||
- Tool/function calling normalization
|
||||
- User accounts + per-device API keys
|
||||
- Postgres support + migrations for prod
|
||||
- Attachments + embeddings + semantic search
|
||||
|
||||
2327
package-lock.json
generated
Normal file
2327
package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
34
package.json
Normal file
34
package.json
Normal file
@@ -0,0 +1,34 @@
|
||||
{
|
||||
"name": "llm-backend",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"description": "LLM multiplexer + personal chat database backend",
|
||||
"scripts": {
|
||||
"dev": "tsx watch src/index.ts",
|
||||
"start": "node dist/index.js",
|
||||
"build": "tsc -p tsconfig.json",
|
||||
"prisma:generate": "prisma generate",
|
||||
"db:migrate": "prisma migrate dev",
|
||||
"db:studio": "prisma studio"
|
||||
},
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.71.2",
|
||||
"@fastify/cors": "^11.2.0",
|
||||
"@fastify/sensible": "^6.0.4",
|
||||
"@fastify/swagger": "^9.6.1",
|
||||
"@fastify/swagger-ui": "^5.2.5",
|
||||
"@prisma/client": "^6.16.1",
|
||||
"dotenv": "^17.2.3",
|
||||
"fastify": "^5.7.2",
|
||||
"openai": "^6.16.0",
|
||||
"pino-pretty": "^13.1.3",
|
||||
"zod": "^4.3.6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^25.0.10",
|
||||
"prisma": "^6.16.1",
|
||||
"tsx": "^4.21.0",
|
||||
"typescript": "^5.9.3"
|
||||
}
|
||||
}
|
||||
94
prisma/schema.prisma
Normal file
94
prisma/schema.prisma
Normal file
@@ -0,0 +1,94 @@
|
||||
// Prisma schema for the personal chat DB + LLM call log
|
||||
|
||||
generator client {
|
||||
provider = "prisma-client-js"
|
||||
}
|
||||
|
||||
datasource db {
|
||||
provider = "sqlite"
|
||||
url = env("DATABASE_URL")
|
||||
}
|
||||
|
||||
enum Provider {
|
||||
openai
|
||||
anthropic
|
||||
xai
|
||||
}
|
||||
|
||||
enum MessageRole {
|
||||
system
|
||||
user
|
||||
assistant
|
||||
tool
|
||||
}
|
||||
|
||||
model User {
|
||||
id String @id @default(cuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
|
||||
// minimal for now (single-user is fine). Keep extensible.
|
||||
handle String? @unique
|
||||
|
||||
chats Chat[]
|
||||
}
|
||||
|
||||
model Chat {
|
||||
id String @id @default(cuid())
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
|
||||
title String?
|
||||
|
||||
user User? @relation(fields: [userId], references: [id])
|
||||
userId String?
|
||||
|
||||
messages Message[]
|
||||
calls LlmCall[]
|
||||
|
||||
@@index([userId])
|
||||
}
|
||||
|
||||
model Message {
|
||||
id String @id @default(cuid())
|
||||
createdAt DateTime @default(now())
|
||||
|
||||
chat Chat @relation(fields: [chatId], references: [id], onDelete: Cascade)
|
||||
chatId String
|
||||
|
||||
role MessageRole
|
||||
content String
|
||||
|
||||
// for tool messages or attachments later
|
||||
name String?
|
||||
metadata Json?
|
||||
|
||||
@@index([chatId, createdAt])
|
||||
}
|
||||
|
||||
model LlmCall {
|
||||
id String @id @default(cuid())
|
||||
createdAt DateTime @default(now())
|
||||
|
||||
chat Chat @relation(fields: [chatId], references: [id], onDelete: Cascade)
|
||||
chatId String
|
||||
|
||||
provider Provider
|
||||
model String
|
||||
|
||||
// request/response snapshots for debugging + replay
|
||||
request Json
|
||||
response Json?
|
||||
|
||||
// usage/cost basics
|
||||
inputTokens Int?
|
||||
outputTokens Int?
|
||||
totalTokens Int?
|
||||
|
||||
latencyMs Int?
|
||||
|
||||
error String?
|
||||
|
||||
@@index([chatId, createdAt])
|
||||
@@index([provider, model, createdAt])
|
||||
}
|
||||
22
src/auth.ts
Normal file
22
src/auth.ts
Normal file
@@ -0,0 +1,22 @@
|
||||
import type { FastifyRequest } from "fastify";
|
||||
import { env } from "./env.js";
|
||||
|
||||
export function requireAdmin(req: FastifyRequest) {
|
||||
// If ADMIN_TOKEN isn't set, run in "open" mode (dev).
|
||||
if (!env.ADMIN_TOKEN) return;
|
||||
|
||||
const auth = req.headers.authorization;
|
||||
if (!auth?.startsWith("Bearer ")) {
|
||||
const err = new Error("missing bearer token");
|
||||
// @ts-expect-error attach status
|
||||
err.statusCode = 401;
|
||||
throw err;
|
||||
}
|
||||
const token = auth.slice("Bearer ".length);
|
||||
if (token !== env.ADMIN_TOKEN) {
|
||||
const err = new Error("invalid bearer token");
|
||||
// @ts-expect-error attach status
|
||||
err.statusCode = 403;
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
16
src/db.ts
Normal file
16
src/db.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
import { PrismaClient } from "@prisma/client";
|
||||
|
||||
export const prisma = new PrismaClient({
|
||||
log: [
|
||||
{ emit: "event", level: "error" },
|
||||
{ emit: "event", level: "warn" },
|
||||
],
|
||||
});
|
||||
|
||||
prisma.$on("error", (e: any) => {
|
||||
console.error("[prisma:error]", e);
|
||||
});
|
||||
|
||||
prisma.$on("warn", (e: any) => {
|
||||
console.warn("[prisma:warn]", e);
|
||||
});
|
||||
19
src/env.ts
Normal file
19
src/env.ts
Normal file
@@ -0,0 +1,19 @@
|
||||
import { z } from "zod";
|
||||
import "dotenv/config";
|
||||
|
||||
const EnvSchema = z.object({
|
||||
PORT: z.coerce.number().int().positive().default(8787),
|
||||
HOST: z.string().default("0.0.0.0"),
|
||||
|
||||
// simple bearer-token auth for your personal backend
|
||||
ADMIN_TOKEN: z.string().min(20).optional(),
|
||||
|
||||
// provider keys
|
||||
OPENAI_API_KEY: z.string().optional(),
|
||||
ANTHROPIC_API_KEY: z.string().optional(),
|
||||
XAI_API_KEY: z.string().optional(),
|
||||
});
|
||||
|
||||
export type Env = z.infer<typeof EnvSchema>;
|
||||
|
||||
export const env: Env = EnvSchema.parse(process.env);
|
||||
45
src/index.ts
Normal file
45
src/index.ts
Normal file
@@ -0,0 +1,45 @@
|
||||
import Fastify from "fastify";
|
||||
import cors from "@fastify/cors";
|
||||
import swagger from "@fastify/swagger";
|
||||
import swaggerUI from "@fastify/swagger-ui";
|
||||
import sensible from "@fastify/sensible";
|
||||
import { env } from "./env.js";
|
||||
import { registerRoutes } from "./routes.js";
|
||||
|
||||
const app = Fastify({
|
||||
logger: {
|
||||
transport: {
|
||||
target: "pino-pretty",
|
||||
options: { colorize: true, translateTime: "SYS:standard" },
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
await app.register(cors, { origin: true, credentials: true });
|
||||
|
||||
await app.register(swagger, {
|
||||
openapi: {
|
||||
info: {
|
||||
title: "LLM Multiplexer Backend",
|
||||
version: "0.1.0",
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
await app.register(swaggerUI, { routePrefix: "/docs" });
|
||||
await app.register(sensible);
|
||||
|
||||
app.setErrorHandler((err, _req, reply) => {
|
||||
const e = err as any;
|
||||
const statusCode = e.statusCode ?? 500;
|
||||
reply.status(statusCode).send({
|
||||
error: true,
|
||||
message: e.message ?? String(e),
|
||||
statusCode,
|
||||
});
|
||||
});
|
||||
|
||||
await registerRoutes(app);
|
||||
|
||||
await app.listen({ port: env.PORT, host: env.HOST });
|
||||
app.log.info(`listening on http://${env.HOST}:${env.PORT}`);
|
||||
124
src/llm/multiplexer.ts
Normal file
124
src/llm/multiplexer.ts
Normal file
@@ -0,0 +1,124 @@
|
||||
import { performance } from "node:perf_hooks";
|
||||
import { prisma } from "../db.js";
|
||||
import { anthropicClient, openaiClient, xaiClient } from "./providers.js";
|
||||
import type { MultiplexRequest, MultiplexResponse, Provider } from "./types.js";
|
||||
|
||||
function asProviderEnum(p: Provider) {
|
||||
// Prisma enum values match these strings.
|
||||
return p;
|
||||
}
|
||||
|
||||
export async function runMultiplex(req: MultiplexRequest): Promise<MultiplexResponse> {
|
||||
const t0 = performance.now();
|
||||
|
||||
// Persist call record early so we can attach errors.
|
||||
const call = await prisma.llmCall.create({
|
||||
data: {
|
||||
chatId: req.chatId ?? (await prisma.chat.create({ data: {} })).id,
|
||||
provider: asProviderEnum(req.provider) as any,
|
||||
model: req.model,
|
||||
request: req as any,
|
||||
},
|
||||
select: { id: true, chatId: true },
|
||||
});
|
||||
|
||||
try {
|
||||
let outText = "";
|
||||
let usage: MultiplexResponse["usage"] | undefined;
|
||||
let raw: unknown;
|
||||
|
||||
if (req.provider === "openai" || req.provider === "xai") {
|
||||
const client = req.provider === "openai" ? openaiClient() : xaiClient();
|
||||
const r = await client.chat.completions.create({
|
||||
model: req.model,
|
||||
// OpenAI SDK has very specific message union types; our normalized schema is compatible.
|
||||
messages: req.messages.map((m) => ({ role: m.role, content: m.content, name: m.name })) as any,
|
||||
temperature: req.temperature,
|
||||
max_tokens: req.maxTokens,
|
||||
});
|
||||
raw = r;
|
||||
outText = r.choices?.[0]?.message?.content ?? "";
|
||||
usage = r.usage
|
||||
? {
|
||||
inputTokens: r.usage.prompt_tokens,
|
||||
outputTokens: r.usage.completion_tokens,
|
||||
totalTokens: r.usage.total_tokens,
|
||||
}
|
||||
: undefined;
|
||||
} else if (req.provider === "anthropic") {
|
||||
const client = anthropicClient();
|
||||
|
||||
// Anthropic splits system prompt. We'll convert first system message into system string.
|
||||
const system = req.messages.find((m) => m.role === "system")?.content;
|
||||
const msgs = req.messages
|
||||
.filter((m) => m.role !== "system")
|
||||
.map((m) => ({ role: m.role === "assistant" ? "assistant" : "user", content: m.content }));
|
||||
|
||||
const r = await client.messages.create({
|
||||
model: req.model,
|
||||
system,
|
||||
max_tokens: req.maxTokens ?? 1024,
|
||||
temperature: req.temperature,
|
||||
messages: msgs as any,
|
||||
});
|
||||
raw = r;
|
||||
outText = r.content
|
||||
.map((c: any) => (c.type === "text" ? c.text : ""))
|
||||
.join("")
|
||||
.trim();
|
||||
|
||||
// Anthropic usage (SDK typing varies by version)
|
||||
const ru: any = (r as any).usage;
|
||||
if (ru) {
|
||||
usage = {
|
||||
inputTokens: ru.input_tokens,
|
||||
outputTokens: ru.output_tokens,
|
||||
totalTokens: (ru.input_tokens ?? 0) + (ru.output_tokens ?? 0),
|
||||
};
|
||||
}
|
||||
} else {
|
||||
throw new Error(`unknown provider: ${req.provider}`);
|
||||
}
|
||||
|
||||
const latencyMs = Math.round(performance.now() - t0);
|
||||
|
||||
// Store assistant message + call record
|
||||
await prisma.$transaction([
|
||||
prisma.message.create({
|
||||
data: {
|
||||
chatId: call.chatId,
|
||||
role: "assistant" as any,
|
||||
content: outText,
|
||||
},
|
||||
}),
|
||||
prisma.llmCall.update({
|
||||
where: { id: call.id },
|
||||
data: {
|
||||
response: raw as any,
|
||||
latencyMs,
|
||||
inputTokens: usage?.inputTokens,
|
||||
outputTokens: usage?.outputTokens,
|
||||
totalTokens: usage?.totalTokens,
|
||||
},
|
||||
}),
|
||||
]);
|
||||
|
||||
return {
|
||||
provider: req.provider,
|
||||
model: req.model,
|
||||
message: { role: "assistant", content: outText },
|
||||
usage,
|
||||
raw,
|
||||
};
|
||||
} catch (e: any) {
|
||||
const latencyMs = Math.round(performance.now() - t0);
|
||||
await prisma.llmCall.update({
|
||||
where: { id: call.id },
|
||||
data: {
|
||||
error: e?.message ?? String(e),
|
||||
latencyMs,
|
||||
},
|
||||
});
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
19
src/llm/providers.ts
Normal file
19
src/llm/providers.ts
Normal file
@@ -0,0 +1,19 @@
|
||||
import OpenAI from "openai";
|
||||
import Anthropic from "@anthropic-ai/sdk";
|
||||
import { env } from "../env.js";
|
||||
|
||||
export function openaiClient() {
|
||||
if (!env.OPENAI_API_KEY) throw new Error("OPENAI_API_KEY not set");
|
||||
return new OpenAI({ apiKey: env.OPENAI_API_KEY });
|
||||
}
|
||||
|
||||
// xAI (Grok) is OpenAI-compatible at https://api.x.ai/v1
|
||||
export function xaiClient() {
|
||||
if (!env.XAI_API_KEY) throw new Error("XAI_API_KEY not set");
|
||||
return new OpenAI({ apiKey: env.XAI_API_KEY, baseURL: "https://api.x.ai/v1" });
|
||||
}
|
||||
|
||||
export function anthropicClient() {
|
||||
if (!env.ANTHROPIC_API_KEY) throw new Error("ANTHROPIC_API_KEY not set");
|
||||
return new Anthropic({ apiKey: env.ANTHROPIC_API_KEY });
|
||||
}
|
||||
130
src/llm/streaming.ts
Normal file
130
src/llm/streaming.ts
Normal file
@@ -0,0 +1,130 @@
|
||||
import { performance } from "node:perf_hooks";
|
||||
import type OpenAI from "openai";
|
||||
import { prisma } from "../db.js";
|
||||
import { anthropicClient, openaiClient, xaiClient } from "./providers.js";
|
||||
import type { MultiplexRequest, Provider } from "./types.js";
|
||||
|
||||
export type StreamEvent =
|
||||
| { type: "meta"; chatId: string; callId: string; provider: Provider; model: string }
|
||||
| { type: "delta"; text: string }
|
||||
| { type: "done"; text: string; usage?: { inputTokens?: number; outputTokens?: number; totalTokens?: number } }
|
||||
| { type: "error"; message: string };
|
||||
|
||||
function getChatIdOrCreate(chatId?: string) {
|
||||
if (chatId) return Promise.resolve(chatId);
|
||||
return prisma.chat.create({ data: {}, select: { id: true } }).then((c) => c.id);
|
||||
}
|
||||
|
||||
export async function* runMultiplexStream(req: MultiplexRequest): AsyncGenerator<StreamEvent> {
|
||||
const t0 = performance.now();
|
||||
const chatId = await getChatIdOrCreate(req.chatId);
|
||||
|
||||
const call = await prisma.llmCall.create({
|
||||
data: {
|
||||
chatId,
|
||||
provider: req.provider as any,
|
||||
model: req.model,
|
||||
request: req as any,
|
||||
},
|
||||
select: { id: true },
|
||||
});
|
||||
|
||||
yield { type: "meta", chatId, callId: call.id, provider: req.provider, model: req.model };
|
||||
|
||||
let text = "";
|
||||
let usage: StreamEvent extends any ? any : never;
|
||||
let raw: unknown = { streamed: true };
|
||||
|
||||
try {
|
||||
if (req.provider === "openai" || req.provider === "xai") {
|
||||
const client = req.provider === "openai" ? openaiClient() : xaiClient();
|
||||
|
||||
const stream = await client.chat.completions.create({
|
||||
model: req.model,
|
||||
messages: req.messages.map((m) => ({ role: m.role, content: m.content, name: m.name })) as any,
|
||||
temperature: req.temperature,
|
||||
max_tokens: req.maxTokens,
|
||||
stream: true,
|
||||
});
|
||||
|
||||
for await (const chunk of stream as any as AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk>) {
|
||||
const delta = chunk.choices?.[0]?.delta?.content ?? "";
|
||||
if (delta) {
|
||||
text += delta;
|
||||
yield { type: "delta", text: delta };
|
||||
}
|
||||
}
|
||||
|
||||
// no guaranteed usage in stream mode across providers; leave empty for now
|
||||
} else if (req.provider === "anthropic") {
|
||||
const client = anthropicClient();
|
||||
|
||||
const system = req.messages.find((m) => m.role === "system")?.content;
|
||||
const msgs = req.messages
|
||||
.filter((m) => m.role !== "system")
|
||||
.map((m) => ({ role: m.role === "assistant" ? "assistant" : "user", content: m.content }));
|
||||
|
||||
const stream = await client.messages.create({
|
||||
model: req.model,
|
||||
system,
|
||||
max_tokens: req.maxTokens ?? 1024,
|
||||
temperature: req.temperature,
|
||||
messages: msgs as any,
|
||||
stream: true,
|
||||
});
|
||||
|
||||
for await (const ev of stream as any as AsyncIterable<any>) {
|
||||
// Anthropic streaming events include content_block_delta with text_delta
|
||||
if (ev?.type === "content_block_delta" && ev?.delta?.type === "text_delta") {
|
||||
const delta = ev.delta.text ?? "";
|
||||
if (delta) {
|
||||
text += delta;
|
||||
yield { type: "delta", text: delta };
|
||||
}
|
||||
}
|
||||
// capture usage if present on message_delta
|
||||
if (ev?.type === "message_delta" && ev?.usage) {
|
||||
usage = {
|
||||
inputTokens: ev.usage.input_tokens,
|
||||
outputTokens: ev.usage.output_tokens,
|
||||
totalTokens:
|
||||
(ev.usage.input_tokens ?? 0) + (ev.usage.output_tokens ?? 0),
|
||||
};
|
||||
}
|
||||
// some streams end with message_stop
|
||||
}
|
||||
} else {
|
||||
throw new Error(`unknown provider: ${req.provider}`);
|
||||
}
|
||||
|
||||
const latencyMs = Math.round(performance.now() - t0);
|
||||
|
||||
await prisma.$transaction([
|
||||
prisma.message.create({
|
||||
data: { chatId, role: "assistant" as any, content: text },
|
||||
}),
|
||||
prisma.llmCall.update({
|
||||
where: { id: call.id },
|
||||
data: {
|
||||
response: raw as any,
|
||||
latencyMs,
|
||||
inputTokens: usage?.inputTokens,
|
||||
outputTokens: usage?.outputTokens,
|
||||
totalTokens: usage?.totalTokens,
|
||||
},
|
||||
}),
|
||||
]);
|
||||
|
||||
yield { type: "done", text, usage };
|
||||
} catch (e: any) {
|
||||
const latencyMs = Math.round(performance.now() - t0);
|
||||
await prisma.llmCall.update({
|
||||
where: { id: call.id },
|
||||
data: {
|
||||
error: e?.message ?? String(e),
|
||||
latencyMs,
|
||||
},
|
||||
});
|
||||
yield { type: "error", message: e?.message ?? String(e) };
|
||||
}
|
||||
}
|
||||
28
src/llm/types.ts
Normal file
28
src/llm/types.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
export type Provider = "openai" | "anthropic" | "xai";
|
||||
|
||||
export type ChatMessage = {
|
||||
role: "system" | "user" | "assistant" | "tool";
|
||||
content: string;
|
||||
name?: string;
|
||||
};
|
||||
|
||||
export type MultiplexRequest = {
|
||||
chatId?: string;
|
||||
provider: Provider;
|
||||
model: string;
|
||||
messages: ChatMessage[];
|
||||
temperature?: number;
|
||||
maxTokens?: number;
|
||||
};
|
||||
|
||||
export type MultiplexResponse = {
|
||||
provider: Provider;
|
||||
model: string;
|
||||
message: { role: "assistant"; content: string };
|
||||
usage?: {
|
||||
inputTokens?: number;
|
||||
outputTokens?: number;
|
||||
totalTokens?: number;
|
||||
};
|
||||
raw: unknown;
|
||||
};
|
||||
171
src/routes.ts
Normal file
171
src/routes.ts
Normal file
@@ -0,0 +1,171 @@
|
||||
import { z } from "zod";
|
||||
import type { FastifyInstance } from "fastify";
|
||||
import { prisma } from "./db.js";
|
||||
import { requireAdmin } from "./auth.js";
|
||||
import { runMultiplex } from "./llm/multiplexer.js";
|
||||
import { runMultiplexStream } from "./llm/streaming.js";
|
||||
|
||||
export async function registerRoutes(app: FastifyInstance) {
|
||||
app.get("/health", async () => ({ ok: true }));
|
||||
|
||||
app.get("/v1/chats", async (req) => {
|
||||
requireAdmin(req);
|
||||
const chats = await prisma.chat.findMany({
|
||||
orderBy: { updatedAt: "desc" },
|
||||
take: 100,
|
||||
select: { id: true, title: true, createdAt: true, updatedAt: true },
|
||||
});
|
||||
return { chats };
|
||||
});
|
||||
|
||||
app.post("/v1/chats", async (req) => {
|
||||
requireAdmin(req);
|
||||
const Body = z.object({ title: z.string().optional() });
|
||||
const body = Body.parse(req.body ?? {});
|
||||
const chat = await prisma.chat.create({ data: { title: body.title } });
|
||||
return { chat };
|
||||
});
|
||||
|
||||
app.get("/v1/chats/:chatId", async (req) => {
|
||||
requireAdmin(req);
|
||||
const Params = z.object({ chatId: z.string() });
|
||||
const { chatId } = Params.parse(req.params);
|
||||
|
||||
const chat = await prisma.chat.findUnique({
|
||||
where: { id: chatId },
|
||||
include: { messages: { orderBy: { createdAt: "asc" } }, calls: { orderBy: { createdAt: "desc" } } },
|
||||
});
|
||||
if (!chat) return app.httpErrors.notFound("chat not found");
|
||||
return { chat };
|
||||
});
|
||||
|
||||
app.post("/v1/chats/:chatId/messages", async (req) => {
|
||||
requireAdmin(req);
|
||||
const Params = z.object({ chatId: z.string() });
|
||||
const Body = z.object({
|
||||
role: z.enum(["system", "user", "assistant", "tool"]),
|
||||
content: z.string(),
|
||||
name: z.string().optional(),
|
||||
metadata: z.unknown().optional(),
|
||||
});
|
||||
|
||||
const { chatId } = Params.parse(req.params);
|
||||
const body = Body.parse(req.body);
|
||||
|
||||
const msg = await prisma.message.create({
|
||||
data: {
|
||||
chatId,
|
||||
role: body.role as any,
|
||||
content: body.content,
|
||||
name: body.name,
|
||||
metadata: body.metadata as any,
|
||||
},
|
||||
});
|
||||
|
||||
return { message: msg };
|
||||
});
|
||||
|
||||
// Main: create a completion via provider+model and store everything.
|
||||
app.post("/v1/chat-completions", async (req) => {
|
||||
requireAdmin(req);
|
||||
|
||||
const Body = z.object({
|
||||
chatId: z.string().optional(),
|
||||
provider: z.enum(["openai", "anthropic", "xai"]),
|
||||
model: z.string().min(1),
|
||||
messages: z.array(
|
||||
z.object({
|
||||
role: z.enum(["system", "user", "assistant", "tool"]),
|
||||
content: z.string(),
|
||||
name: z.string().optional(),
|
||||
})
|
||||
),
|
||||
temperature: z.number().min(0).max(2).optional(),
|
||||
maxTokens: z.number().int().positive().optional(),
|
||||
});
|
||||
|
||||
const body = Body.parse(req.body);
|
||||
|
||||
// ensure chat exists if provided
|
||||
if (body.chatId) {
|
||||
const exists = await prisma.chat.findUnique({ where: { id: body.chatId }, select: { id: true } });
|
||||
if (!exists) return app.httpErrors.notFound("chat not found");
|
||||
}
|
||||
|
||||
// store user messages (anything not assistant) for DB fidelity
|
||||
if (body.chatId) {
|
||||
const toInsert = body.messages.filter((m) => m.role !== "assistant");
|
||||
if (toInsert.length) {
|
||||
await prisma.message.createMany({
|
||||
data: toInsert.map((m) => ({ chatId: body.chatId!, role: m.role as any, content: m.content, name: m.name })),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const result = await runMultiplex(body);
|
||||
|
||||
return {
|
||||
chatId: body.chatId ?? null,
|
||||
...result,
|
||||
};
|
||||
});
|
||||
|
||||
// Streaming SSE endpoint.
|
||||
app.post("/v1/chat-completions/stream", async (req, reply) => {
|
||||
requireAdmin(req);
|
||||
|
||||
const Body = z.object({
|
||||
chatId: z.string().optional(),
|
||||
provider: z.enum(["openai", "anthropic", "xai"]),
|
||||
model: z.string().min(1),
|
||||
messages: z.array(
|
||||
z.object({
|
||||
role: z.enum(["system", "user", "assistant", "tool"]),
|
||||
content: z.string(),
|
||||
name: z.string().optional(),
|
||||
})
|
||||
),
|
||||
temperature: z.number().min(0).max(2).optional(),
|
||||
maxTokens: z.number().int().positive().optional(),
|
||||
});
|
||||
|
||||
const body = Body.parse(req.body);
|
||||
|
||||
// ensure chat exists if provided
|
||||
if (body.chatId) {
|
||||
const exists = await prisma.chat.findUnique({ where: { id: body.chatId }, select: { id: true } });
|
||||
if (!exists) return app.httpErrors.notFound("chat not found");
|
||||
}
|
||||
|
||||
// store user messages (anything not assistant) for DB fidelity
|
||||
if (body.chatId) {
|
||||
const toInsert = body.messages.filter((m) => m.role !== "assistant");
|
||||
if (toInsert.length) {
|
||||
await prisma.message.createMany({
|
||||
data: toInsert.map((m) => ({ chatId: body.chatId!, role: m.role as any, content: m.content, name: m.name })),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
reply.raw.writeHead(200, {
|
||||
"Content-Type": "text/event-stream; charset=utf-8",
|
||||
"Cache-Control": "no-cache, no-transform",
|
||||
Connection: "keep-alive",
|
||||
});
|
||||
|
||||
const send = (event: string, data: any) => {
|
||||
reply.raw.write(`event: ${event}\n`);
|
||||
reply.raw.write(`data: ${JSON.stringify(data)}\n\n`);
|
||||
};
|
||||
|
||||
for await (const ev of runMultiplexStream(body)) {
|
||||
if (ev.type === "meta") send("meta", ev);
|
||||
else if (ev.type === "delta") send("delta", ev);
|
||||
else if (ev.type === "done") send("done", ev);
|
||||
else if (ev.type === "error") send("error", ev);
|
||||
}
|
||||
|
||||
reply.raw.end();
|
||||
return reply;
|
||||
});
|
||||
}
|
||||
15
tsconfig.json
Normal file
15
tsconfig.json
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2022",
|
||||
"module": "NodeNext",
|
||||
"moduleResolution": "NodeNext",
|
||||
"lib": ["ES2022"],
|
||||
"strict": true,
|
||||
"esModuleInterop": true,
|
||||
"skipLibCheck": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"outDir": "dist",
|
||||
"rootDir": "src"
|
||||
},
|
||||
"include": ["src/**/*"]
|
||||
}
|
||||
Reference in New Issue
Block a user