From c62109c0225f9b67bcb746f98bc2466a369fbf70 Mon Sep 17 00:00:00 2001 From: hailin Date: Tue, 27 May 2025 14:42:52 +0800 Subject: [PATCH] . --- .../app/[locale]/[workspaceid]/layout.tsx | 2 +- chatdesk-ui/lib/models/fetch-models.ts | 26 +++++++++---------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/chatdesk-ui/app/[locale]/[workspaceid]/layout.tsx b/chatdesk-ui/app/[locale]/[workspaceid]/layout.tsx index b61ed4e..7cf47f7 100644 --- a/chatdesk-ui/app/[locale]/[workspaceid]/layout.tsx +++ b/chatdesk-ui/app/[locale]/[workspaceid]/layout.tsx @@ -184,7 +184,7 @@ export default function WorkspaceLayout({ children }: WorkspaceLayoutProps) { includeWorkspaceInstructions: workspace?.include_workspace_instructions || true, embeddingsProvider: - (workspace?.embeddings_provider as "openai" | "local") || "openai" + (workspace?.embeddings_provider as "openai" | "local" | "bge-m3") || "bge-m3" }) setLoading(false) diff --git a/chatdesk-ui/lib/models/fetch-models.ts b/chatdesk-ui/lib/models/fetch-models.ts index f08791e..f00ab46 100644 --- a/chatdesk-ui/lib/models/fetch-models.ts +++ b/chatdesk-ui/lib/models/fetch-models.ts @@ -54,20 +54,20 @@ export const fetchHostedModels = async (profile: Tables<"profiles">) => { export const fetchOllamaModels = async () => { try { - // const response = await fetch( - // process.env.NEXT_PUBLIC_OLLAMA_URL + "/api/tags" - // ) - const response = await fetch( - process.env.NEXT_PUBLIC_OLLAMA_URL + "/api/tags", - { - method: "GET", - headers: { - "Authorization": "Bearer token-abc123", - "Content-Type": "application/json" - } - } - ) + process.env.NEXT_PUBLIC_OLLAMA_URL + "/api/tags" + ) + + // const response = await fetch( + // process.env.NEXT_PUBLIC_OLLAMA_URL + "/api/tags", + // { + // method: "GET", + // headers: { + // "Authorization": "Bearer token-abc123", + // "Content-Type": "application/json" + // } + // } + // ) if (!response.ok) { throw new Error(`LLM server is not responding.`)