Feat: add env-driven LLM configuration and smoke test
这个提交包含在:
106
server/_core/llm.test.ts
普通文件
106
server/_core/llm.test.ts
普通文件
@@ -0,0 +1,106 @@
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
const ORIGINAL_ENV = { ...process.env };
|
||||
|
||||
const mockSuccessResponse = {
|
||||
id: "chatcmpl-test",
|
||||
created: 1,
|
||||
model: "qwen3.5-plus",
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: "你好,我是测试响应。",
|
||||
},
|
||||
finish_reason: "stop",
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
describe("invokeLLM", () => {
|
||||
beforeEach(() => {
|
||||
vi.resetModules();
|
||||
vi.restoreAllMocks();
|
||||
process.env = { ...ORIGINAL_ENV };
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env = { ...ORIGINAL_ENV };
|
||||
vi.unstubAllGlobals();
|
||||
});
|
||||
|
||||
it("uses LLM_* environment variables for request config", async () => {
|
||||
process.env.LLM_API_URL = "https://one.hao.work/v1/chat/completions";
|
||||
process.env.LLM_API_KEY = "test-key";
|
||||
process.env.LLM_MODEL = "qwen3.5-plus";
|
||||
process.env.LLM_MAX_TOKENS = "4096";
|
||||
process.env.LLM_ENABLE_THINKING = "0";
|
||||
|
||||
const fetchMock = vi.fn().mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => mockSuccessResponse,
|
||||
});
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
|
||||
const { invokeLLM } = await import("./llm");
|
||||
await invokeLLM({
|
||||
messages: [{ role: "user", content: "你好" }],
|
||||
});
|
||||
|
||||
expect(fetchMock).toHaveBeenCalledTimes(1);
|
||||
expect(fetchMock).toHaveBeenCalledWith(
|
||||
"https://one.hao.work/v1/chat/completions",
|
||||
expect.objectContaining({
|
||||
method: "POST",
|
||||
headers: expect.objectContaining({
|
||||
authorization: "Bearer test-key",
|
||||
}),
|
||||
})
|
||||
);
|
||||
|
||||
const [, request] = fetchMock.mock.calls[0] as [string, { body: string }];
|
||||
expect(JSON.parse(request.body)).toMatchObject({
|
||||
model: "qwen3.5-plus",
|
||||
max_tokens: 4096,
|
||||
messages: [{ role: "user", content: "你好" }],
|
||||
});
|
||||
expect(JSON.parse(request.body)).not.toHaveProperty("thinking");
|
||||
});
|
||||
|
||||
it("falls back to legacy forge variables when LLM_* values are absent", async () => {
|
||||
delete process.env.LLM_API_URL;
|
||||
delete process.env.LLM_API_KEY;
|
||||
delete process.env.LLM_MODEL;
|
||||
delete process.env.LLM_MAX_TOKENS;
|
||||
delete process.env.LLM_ENABLE_THINKING;
|
||||
delete process.env.LLM_THINKING_BUDGET;
|
||||
process.env.BUILT_IN_FORGE_API_URL = "https://forge.example.com";
|
||||
process.env.BUILT_IN_FORGE_API_KEY = "legacy-key";
|
||||
|
||||
const fetchMock = vi.fn().mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => mockSuccessResponse,
|
||||
});
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
|
||||
const { invokeLLM } = await import("./llm");
|
||||
await invokeLLM({
|
||||
messages: [{ role: "user", content: "legacy" }],
|
||||
});
|
||||
|
||||
expect(fetchMock).toHaveBeenCalledWith(
|
||||
"https://forge.example.com/v1/chat/completions",
|
||||
expect.objectContaining({
|
||||
headers: expect.objectContaining({
|
||||
authorization: "Bearer legacy-key",
|
||||
}),
|
||||
})
|
||||
);
|
||||
|
||||
const [, request] = fetchMock.mock.calls[0] as [string, { body: string }];
|
||||
expect(JSON.parse(request.body)).toMatchObject({
|
||||
model: "gemini-2.5-flash",
|
||||
});
|
||||
});
|
||||
});
|
||||
在新工单中引用
屏蔽一个用户