feat: async task pipeline for media and llm workflows
这个提交包含在:
@@ -11,6 +11,7 @@ const parseBoolean = (value: string | undefined, fallback: boolean) => {
|
||||
|
||||
export const ENV = {
|
||||
appId: process.env.VITE_APP_ID ?? "",
|
||||
appPublicBaseUrl: process.env.APP_PUBLIC_BASE_URL ?? "",
|
||||
cookieSecret: process.env.JWT_SECRET ?? "",
|
||||
databaseUrl: process.env.DATABASE_URL ?? "",
|
||||
oAuthServerUrl: process.env.OAUTH_SERVER_URL ?? "",
|
||||
@@ -27,7 +28,22 @@ export const ENV = {
|
||||
llmApiKey:
|
||||
process.env.LLM_API_KEY ?? process.env.BUILT_IN_FORGE_API_KEY ?? "",
|
||||
llmModel: process.env.LLM_MODEL ?? "gemini-2.5-flash",
|
||||
llmVisionApiUrl:
|
||||
process.env.LLM_VISION_API_URL ??
|
||||
process.env.LLM_API_URL ??
|
||||
(process.env.BUILT_IN_FORGE_API_URL
|
||||
? `${process.env.BUILT_IN_FORGE_API_URL.replace(/\/$/, "")}/v1/chat/completions`
|
||||
: ""),
|
||||
llmVisionApiKey:
|
||||
process.env.LLM_VISION_API_KEY ??
|
||||
process.env.LLM_API_KEY ??
|
||||
process.env.BUILT_IN_FORGE_API_KEY ??
|
||||
"",
|
||||
llmVisionModel: process.env.LLM_VISION_MODEL ?? process.env.LLM_MODEL ?? "gemini-2.5-flash",
|
||||
llmMaxTokens: parseInteger(process.env.LLM_MAX_TOKENS, 32768),
|
||||
llmEnableThinking: parseBoolean(process.env.LLM_ENABLE_THINKING, false),
|
||||
llmThinkingBudget: parseInteger(process.env.LLM_THINKING_BUDGET, 128),
|
||||
mediaServiceUrl: process.env.MEDIA_SERVICE_URL ?? "",
|
||||
backgroundTaskPollMs: parseInteger(process.env.BACKGROUND_TASK_POLL_MS, 3000),
|
||||
backgroundTaskStaleMs: parseInteger(process.env.BACKGROUND_TASK_STALE_MS, 300000),
|
||||
};
|
||||
|
||||
@@ -68,6 +68,29 @@ describe("invokeLLM", () => {
|
||||
expect(JSON.parse(request.body)).not.toHaveProperty("thinking");
|
||||
});
|
||||
|
||||
it("allows overriding the model per request", async () => {
|
||||
process.env.LLM_API_URL = "https://one.hao.work/v1/chat/completions";
|
||||
process.env.LLM_API_KEY = "test-key";
|
||||
process.env.LLM_MODEL = "qwen3.5-plus";
|
||||
|
||||
const fetchMock = vi.fn().mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => mockSuccessResponse,
|
||||
});
|
||||
vi.stubGlobal("fetch", fetchMock);
|
||||
|
||||
const { invokeLLM } = await import("./llm");
|
||||
await invokeLLM({
|
||||
model: "qwen3-vl-235b-a22b",
|
||||
messages: [{ role: "user", content: "describe image" }],
|
||||
});
|
||||
|
||||
const [, request] = fetchMock.mock.calls[0] as [string, { body: string }];
|
||||
expect(JSON.parse(request.body)).toMatchObject({
|
||||
model: "qwen3-vl-235b-a22b",
|
||||
});
|
||||
});
|
||||
|
||||
it("falls back to legacy forge variables when LLM_* values are absent", async () => {
|
||||
delete process.env.LLM_API_URL;
|
||||
delete process.env.LLM_API_KEY;
|
||||
|
||||
@@ -57,6 +57,9 @@ export type ToolChoice =
|
||||
|
||||
export type InvokeParams = {
|
||||
messages: Message[];
|
||||
model?: string;
|
||||
apiUrl?: string;
|
||||
apiKey?: string;
|
||||
tools?: Tool[];
|
||||
toolChoice?: ToolChoice;
|
||||
tool_choice?: ToolChoice;
|
||||
@@ -209,13 +212,15 @@ const normalizeToolChoice = (
|
||||
return toolChoice;
|
||||
};
|
||||
|
||||
const resolveApiUrl = () =>
|
||||
ENV.llmApiUrl && ENV.llmApiUrl.trim().length > 0
|
||||
const resolveApiUrl = (apiUrl?: string) =>
|
||||
apiUrl && apiUrl.trim().length > 0
|
||||
? apiUrl
|
||||
: ENV.llmApiUrl && ENV.llmApiUrl.trim().length > 0
|
||||
? ENV.llmApiUrl
|
||||
: "https://forge.manus.im/v1/chat/completions";
|
||||
|
||||
const assertApiKey = () => {
|
||||
if (!ENV.llmApiKey) {
|
||||
const assertApiKey = (apiKey?: string) => {
|
||||
if (!(apiKey || ENV.llmApiKey)) {
|
||||
throw new Error("LLM_API_KEY is not configured");
|
||||
}
|
||||
};
|
||||
@@ -266,10 +271,13 @@ const normalizeResponseFormat = ({
|
||||
};
|
||||
|
||||
export async function invokeLLM(params: InvokeParams): Promise<InvokeResult> {
|
||||
assertApiKey();
|
||||
assertApiKey(params.apiKey);
|
||||
|
||||
const {
|
||||
messages,
|
||||
model,
|
||||
apiUrl,
|
||||
apiKey,
|
||||
tools,
|
||||
toolChoice,
|
||||
tool_choice,
|
||||
@@ -280,7 +288,7 @@ export async function invokeLLM(params: InvokeParams): Promise<InvokeResult> {
|
||||
} = params;
|
||||
|
||||
const payload: Record<string, unknown> = {
|
||||
model: ENV.llmModel,
|
||||
model: model || ENV.llmModel,
|
||||
messages: messages.map(normalizeMessage),
|
||||
};
|
||||
|
||||
@@ -315,11 +323,11 @@ export async function invokeLLM(params: InvokeParams): Promise<InvokeResult> {
|
||||
payload.response_format = normalizedResponseFormat;
|
||||
}
|
||||
|
||||
const response = await fetch(resolveApiUrl(), {
|
||||
const response = await fetch(resolveApiUrl(apiUrl), {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"content-type": "application/json",
|
||||
authorization: `Bearer ${ENV.llmApiKey}`,
|
||||
authorization: `Bearer ${apiKey || ENV.llmApiKey}`,
|
||||
},
|
||||
body: JSON.stringify(payload),
|
||||
});
|
||||
|
||||
@@ -6,7 +6,7 @@ export function serveStatic(app: Express) {
|
||||
const distPath =
|
||||
process.env.NODE_ENV === "development"
|
||||
? path.resolve(import.meta.dirname, "../..", "dist", "public")
|
||||
: path.resolve(import.meta.dirname, "public");
|
||||
: path.resolve(import.meta.dirname, "..", "public");
|
||||
if (!fs.existsSync(distPath)) {
|
||||
console.error(
|
||||
`Could not find the build directory: ${distPath}, make sure to build the client first`
|
||||
|
||||
在新工单中引用
屏蔽一个用户