feat: async task pipeline for media and llm workflows

这个提交包含在:
cryptocommuniums-afk
2026-03-15 00:12:26 +08:00
父节点 1cc863e60e
当前提交 20e183d2da
修改 36 个文件,包含 1961 行新增339 行删除

查看文件

@@ -68,6 +68,29 @@ describe("invokeLLM", () => {
expect(JSON.parse(request.body)).not.toHaveProperty("thinking");
});
it("allows overriding the model per request", async () => {
process.env.LLM_API_URL = "https://one.hao.work/v1/chat/completions";
process.env.LLM_API_KEY = "test-key";
process.env.LLM_MODEL = "qwen3.5-plus";
const fetchMock = vi.fn().mockResolvedValue({
ok: true,
json: async () => mockSuccessResponse,
});
vi.stubGlobal("fetch", fetchMock);
const { invokeLLM } = await import("./llm");
await invokeLLM({
model: "qwen3-vl-235b-a22b",
messages: [{ role: "user", content: "describe image" }],
});
const [, request] = fetchMock.mock.calls[0] as [string, { body: string }];
expect(JSON.parse(request.body)).toMatchObject({
model: "qwen3-vl-235b-a22b",
});
});
it("falls back to legacy forge variables when LLM_* values are absent", async () => {
delete process.env.LLM_API_URL;
delete process.env.LLM_API_KEY;