initial commit: add server
This commit is contained in:
130
server/src/llm/streaming.ts
Normal file
130
server/src/llm/streaming.ts
Normal file
@@ -0,0 +1,130 @@
|
||||
import { performance } from "node:perf_hooks";
|
||||
import type OpenAI from "openai";
|
||||
import { prisma } from "../db.js";
|
||||
import { anthropicClient, openaiClient, xaiClient } from "./providers.js";
|
||||
import type { MultiplexRequest, Provider } from "./types.js";
|
||||
|
||||
export type StreamEvent =
|
||||
| { type: "meta"; chatId: string; callId: string; provider: Provider; model: string }
|
||||
| { type: "delta"; text: string }
|
||||
| { type: "done"; text: string; usage?: { inputTokens?: number; outputTokens?: number; totalTokens?: number } }
|
||||
| { type: "error"; message: string };
|
||||
|
||||
function getChatIdOrCreate(chatId?: string) {
|
||||
if (chatId) return Promise.resolve(chatId);
|
||||
return prisma.chat.create({ data: {}, select: { id: true } }).then((c) => c.id);
|
||||
}
|
||||
|
||||
export async function* runMultiplexStream(req: MultiplexRequest): AsyncGenerator<StreamEvent> {
|
||||
const t0 = performance.now();
|
||||
const chatId = await getChatIdOrCreate(req.chatId);
|
||||
|
||||
const call = await prisma.llmCall.create({
|
||||
data: {
|
||||
chatId,
|
||||
provider: req.provider as any,
|
||||
model: req.model,
|
||||
request: req as any,
|
||||
},
|
||||
select: { id: true },
|
||||
});
|
||||
|
||||
yield { type: "meta", chatId, callId: call.id, provider: req.provider, model: req.model };
|
||||
|
||||
let text = "";
|
||||
let usage: StreamEvent extends any ? any : never;
|
||||
let raw: unknown = { streamed: true };
|
||||
|
||||
try {
|
||||
if (req.provider === "openai" || req.provider === "xai") {
|
||||
const client = req.provider === "openai" ? openaiClient() : xaiClient();
|
||||
|
||||
const stream = await client.chat.completions.create({
|
||||
model: req.model,
|
||||
messages: req.messages.map((m) => ({ role: m.role, content: m.content, name: m.name })) as any,
|
||||
temperature: req.temperature,
|
||||
max_tokens: req.maxTokens,
|
||||
stream: true,
|
||||
});
|
||||
|
||||
for await (const chunk of stream as any as AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk>) {
|
||||
const delta = chunk.choices?.[0]?.delta?.content ?? "";
|
||||
if (delta) {
|
||||
text += delta;
|
||||
yield { type: "delta", text: delta };
|
||||
}
|
||||
}
|
||||
|
||||
// no guaranteed usage in stream mode across providers; leave empty for now
|
||||
} else if (req.provider === "anthropic") {
|
||||
const client = anthropicClient();
|
||||
|
||||
const system = req.messages.find((m) => m.role === "system")?.content;
|
||||
const msgs = req.messages
|
||||
.filter((m) => m.role !== "system")
|
||||
.map((m) => ({ role: m.role === "assistant" ? "assistant" : "user", content: m.content }));
|
||||
|
||||
const stream = await client.messages.create({
|
||||
model: req.model,
|
||||
system,
|
||||
max_tokens: req.maxTokens ?? 1024,
|
||||
temperature: req.temperature,
|
||||
messages: msgs as any,
|
||||
stream: true,
|
||||
});
|
||||
|
||||
for await (const ev of stream as any as AsyncIterable<any>) {
|
||||
// Anthropic streaming events include content_block_delta with text_delta
|
||||
if (ev?.type === "content_block_delta" && ev?.delta?.type === "text_delta") {
|
||||
const delta = ev.delta.text ?? "";
|
||||
if (delta) {
|
||||
text += delta;
|
||||
yield { type: "delta", text: delta };
|
||||
}
|
||||
}
|
||||
// capture usage if present on message_delta
|
||||
if (ev?.type === "message_delta" && ev?.usage) {
|
||||
usage = {
|
||||
inputTokens: ev.usage.input_tokens,
|
||||
outputTokens: ev.usage.output_tokens,
|
||||
totalTokens:
|
||||
(ev.usage.input_tokens ?? 0) + (ev.usage.output_tokens ?? 0),
|
||||
};
|
||||
}
|
||||
// some streams end with message_stop
|
||||
}
|
||||
} else {
|
||||
throw new Error(`unknown provider: ${req.provider}`);
|
||||
}
|
||||
|
||||
const latencyMs = Math.round(performance.now() - t0);
|
||||
|
||||
await prisma.$transaction([
|
||||
prisma.message.create({
|
||||
data: { chatId, role: "assistant" as any, content: text },
|
||||
}),
|
||||
prisma.llmCall.update({
|
||||
where: { id: call.id },
|
||||
data: {
|
||||
response: raw as any,
|
||||
latencyMs,
|
||||
inputTokens: usage?.inputTokens,
|
||||
outputTokens: usage?.outputTokens,
|
||||
totalTokens: usage?.totalTokens,
|
||||
},
|
||||
}),
|
||||
]);
|
||||
|
||||
yield { type: "done", text, usage };
|
||||
} catch (e: any) {
|
||||
const latencyMs = Math.round(performance.now() - t0);
|
||||
await prisma.llmCall.update({
|
||||
where: { id: call.id },
|
||||
data: {
|
||||
error: e?.message ?? String(e),
|
||||
latencyMs,
|
||||
},
|
||||
});
|
||||
yield { type: "error", message: e?.message ?? String(e) };
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user