fix streaming
This commit is contained in:
107
server/tests/chat-tools-streaming.test.ts
Normal file
107
server/tests/chat-tools-streaming.test.ts
Normal file
@@ -0,0 +1,107 @@
|
||||
import assert from "node:assert/strict";
|
||||
import test from "node:test";
|
||||
import {
|
||||
runToolAwareChatCompletionsStream,
|
||||
runToolAwareOpenAIChatStream,
|
||||
type ToolAwareStreamingEvent,
|
||||
} from "../src/llm/chat-tools.js";
|
||||
|
||||
async function* streamFrom(events: any[]) {
|
||||
for (const event of events) {
|
||||
await Promise.resolve();
|
||||
yield event;
|
||||
}
|
||||
}
|
||||
|
||||
async function collectEvents(iterable: AsyncIterable<ToolAwareStreamingEvent>) {
|
||||
const events: ToolAwareStreamingEvent[] = [];
|
||||
for await (const event of iterable) {
|
||||
events.push(event);
|
||||
}
|
||||
return events;
|
||||
}
|
||||
|
||||
test("OpenAI Responses stream emits text deltas as they arrive", async () => {
|
||||
const outputMessage = {
|
||||
id: "msg_1",
|
||||
type: "message",
|
||||
role: "assistant",
|
||||
status: "completed",
|
||||
content: [{ type: "output_text", text: "Hello" }],
|
||||
};
|
||||
const client = {
|
||||
responses: {
|
||||
create: async () =>
|
||||
streamFrom([
|
||||
{ type: "response.output_item.added", item: { ...outputMessage, content: [] }, output_index: 0 },
|
||||
{ type: "response.output_text.delta", delta: "Hel", output_index: 0, content_index: 0 },
|
||||
{ type: "response.output_text.delta", delta: "lo", output_index: 0, content_index: 0 },
|
||||
{ type: "response.output_item.done", item: outputMessage, output_index: 0 },
|
||||
{
|
||||
type: "response.completed",
|
||||
response: {
|
||||
status: "completed",
|
||||
output_text: "Hello",
|
||||
output: [outputMessage],
|
||||
usage: { input_tokens: 2, output_tokens: 1, total_tokens: 3 },
|
||||
},
|
||||
},
|
||||
]),
|
||||
},
|
||||
};
|
||||
|
||||
const events = await collectEvents(
|
||||
runToolAwareOpenAIChatStream({
|
||||
client: client as any,
|
||||
model: "gpt-test",
|
||||
messages: [{ role: "user", content: "Say hello" }],
|
||||
})
|
||||
);
|
||||
|
||||
assert.deepEqual(
|
||||
events.map((event) => event.type),
|
||||
["delta", "delta", "done"]
|
||||
);
|
||||
assert.deepEqual(
|
||||
events.filter((event) => event.type === "delta").map((event) => event.text),
|
||||
["Hel", "lo"]
|
||||
);
|
||||
assert.equal(events.at(-1)?.type === "done" ? events.at(-1)?.result.text : null, "Hello");
|
||||
});
|
||||
|
||||
test("OpenAI-compatible Chat Completions stream emits text deltas as they arrive", async () => {
|
||||
const client = {
|
||||
chat: {
|
||||
completions: {
|
||||
create: async () =>
|
||||
streamFrom([
|
||||
{ choices: [{ delta: { role: "assistant" } }] },
|
||||
{ choices: [{ delta: { content: "Hel" } }] },
|
||||
{ choices: [{ delta: { content: "lo" } }] },
|
||||
{
|
||||
choices: [{ delta: {}, finish_reason: "stop" }],
|
||||
usage: { prompt_tokens: 2, completion_tokens: 1, total_tokens: 3 },
|
||||
},
|
||||
]),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const events = await collectEvents(
|
||||
runToolAwareChatCompletionsStream({
|
||||
client: client as any,
|
||||
model: "grok-test",
|
||||
messages: [{ role: "user", content: "Say hello" }],
|
||||
})
|
||||
);
|
||||
|
||||
assert.deepEqual(
|
||||
events.map((event) => event.type),
|
||||
["delta", "delta", "done"]
|
||||
);
|
||||
assert.deepEqual(
|
||||
events.filter((event) => event.type === "delta").map((event) => event.text),
|
||||
["Hel", "lo"]
|
||||
);
|
||||
assert.equal(events.at(-1)?.type === "done" ? events.at(-1)?.result.text : null, "Hello");
|
||||
});
|
||||
Reference in New Issue
Block a user