448 lines
12 KiB
TypeScript
448 lines
12 KiB
TypeScript
import { ChatbotUIContext } from "@/context/context"
|
|
import { getAssistantCollectionsByAssistantId } from "@/db/assistant-collections"
|
|
import { getAssistantFilesByAssistantId } from "@/db/assistant-files"
|
|
import { getAssistantToolsByAssistantId } from "@/db/assistant-tools"
|
|
import { updateChat } from "@/db/chats"
|
|
import { getCollectionFilesByCollectionId } from "@/db/collection-files"
|
|
import { deleteMessagesIncludingAndAfter } from "@/db/messages"
|
|
import { buildFinalMessages } from "@/lib/build-prompt"
|
|
import { Tables } from "@/supabase/types"
|
|
import { ChatMessage, ChatPayload, LLMID, ModelProvider } from "@/types"
|
|
import { useRouter } from "next/navigation"
|
|
import { useContext, useEffect, useRef } from "react"
|
|
import { LLM_LIST } from "../../../lib/models/llm/llm-list"
|
|
|
|
import i18nConfig from "@/i18nConfig"
|
|
|
|
import {
|
|
createTempMessages,
|
|
handleCreateChat,
|
|
handleCreateMessages,
|
|
handleHostedChat,
|
|
handleLocalChat,
|
|
handleRetrieval,
|
|
processResponse,
|
|
validateChatSettings
|
|
} from "../chat-helpers"
|
|
|
|
import { usePathname } from "next/navigation"
|
|
|
|
export const useChatHandler = () => {
|
|
const pathname = usePathname() // 获取当前路径
|
|
const router = useRouter()
|
|
|
|
// 提取当前路径中的 locale 部分
|
|
const locale = pathname.split("/")[1] || "en"
|
|
|
|
const {
|
|
userInput,
|
|
chatFiles,
|
|
setUserInput,
|
|
setNewMessageImages,
|
|
profile,
|
|
setIsGenerating,
|
|
setChatMessages,
|
|
setFirstTokenReceived,
|
|
selectedChat,
|
|
selectedWorkspace,
|
|
setSelectedChat,
|
|
setChats,
|
|
setSelectedTools,
|
|
availableLocalModels,
|
|
availableOpenRouterModels,
|
|
abortController,
|
|
setAbortController,
|
|
chatSettings,
|
|
newMessageImages,
|
|
selectedAssistant,
|
|
chatMessages,
|
|
chatImages,
|
|
setChatImages,
|
|
setChatFiles,
|
|
setNewMessageFiles,
|
|
setShowFilesDisplay,
|
|
newMessageFiles,
|
|
chatFileItems,
|
|
setChatFileItems,
|
|
setToolInUse,
|
|
useRetrieval,
|
|
sourceCount,
|
|
setIsPromptPickerOpen,
|
|
setIsFilePickerOpen,
|
|
selectedTools,
|
|
selectedPreset,
|
|
setChatSettings,
|
|
models,
|
|
isPromptPickerOpen,
|
|
isFilePickerOpen,
|
|
isToolPickerOpen
|
|
} = useContext(ChatbotUIContext)
|
|
|
|
const chatInputRef = useRef<HTMLTextAreaElement>(null)
|
|
|
|
useEffect(() => {
|
|
if (!isPromptPickerOpen || !isFilePickerOpen || !isToolPickerOpen) {
|
|
chatInputRef.current?.focus()
|
|
}
|
|
}, [isPromptPickerOpen, isFilePickerOpen, isToolPickerOpen])
|
|
|
|
const handleNewChat = async () => {
|
|
if (!selectedWorkspace) return
|
|
|
|
setUserInput("")
|
|
setChatMessages([])
|
|
setSelectedChat(null)
|
|
setChatFileItems([])
|
|
|
|
setIsGenerating(false)
|
|
setFirstTokenReceived(false)
|
|
|
|
setChatFiles([])
|
|
setChatImages([])
|
|
setNewMessageFiles([])
|
|
setNewMessageImages([])
|
|
setShowFilesDisplay(false)
|
|
setIsPromptPickerOpen(false)
|
|
setIsFilePickerOpen(false)
|
|
|
|
setSelectedTools([])
|
|
setToolInUse("none")
|
|
|
|
if (selectedAssistant) {
|
|
setChatSettings({
|
|
model: selectedAssistant.model as LLMID,
|
|
prompt: selectedAssistant.prompt,
|
|
temperature: selectedAssistant.temperature,
|
|
contextLength: selectedAssistant.context_length,
|
|
includeProfileContext: selectedAssistant.include_profile_context,
|
|
includeWorkspaceInstructions:
|
|
selectedAssistant.include_workspace_instructions,
|
|
embeddingsProvider: selectedAssistant.embeddings_provider as
|
|
| "openai"
|
|
| "local"
|
|
})
|
|
|
|
let allFiles = []
|
|
|
|
const assistantFiles = (
|
|
await getAssistantFilesByAssistantId(selectedAssistant.id)
|
|
).files
|
|
allFiles = [...assistantFiles]
|
|
const assistantCollections = (
|
|
await getAssistantCollectionsByAssistantId(selectedAssistant.id)
|
|
).collections
|
|
for (const collection of assistantCollections) {
|
|
const collectionFiles = (
|
|
await getCollectionFilesByCollectionId(collection.id)
|
|
).files
|
|
allFiles = [...allFiles, ...collectionFiles]
|
|
}
|
|
const assistantTools = (
|
|
await getAssistantToolsByAssistantId(selectedAssistant.id)
|
|
).tools
|
|
|
|
setSelectedTools(assistantTools)
|
|
setChatFiles(
|
|
allFiles.map(file => ({
|
|
id: file.id,
|
|
name: file.name,
|
|
type: file.type,
|
|
file: null
|
|
}))
|
|
)
|
|
|
|
if (allFiles.length > 0) setShowFilesDisplay(true)
|
|
} else if (selectedPreset) {
|
|
setChatSettings({
|
|
model: selectedPreset.model as LLMID,
|
|
prompt: selectedPreset.prompt,
|
|
temperature: selectedPreset.temperature,
|
|
contextLength: selectedPreset.context_length,
|
|
includeProfileContext: selectedPreset.include_profile_context,
|
|
includeWorkspaceInstructions:
|
|
selectedPreset.include_workspace_instructions,
|
|
embeddingsProvider: selectedPreset.embeddings_provider as
|
|
| "openai"
|
|
| "local"
|
|
})
|
|
} else if (selectedWorkspace) {
|
|
// setChatSettings({
|
|
// model: (selectedWorkspace.default_model ||
|
|
// "gpt-4-1106-preview") as LLMID,
|
|
// prompt:
|
|
// selectedWorkspace.default_prompt ||
|
|
// "You are a friendly, helpful AI assistant.",
|
|
// temperature: selectedWorkspace.default_temperature || 0.5,
|
|
// contextLength: selectedWorkspace.default_context_length || 4096,
|
|
// includeProfileContext:
|
|
// selectedWorkspace.include_profile_context || true,
|
|
// includeWorkspaceInstructions:
|
|
// selectedWorkspace.include_workspace_instructions || true,
|
|
// embeddingsProvider:
|
|
// (selectedWorkspace.embeddings_provider as "openai" | "local") ||
|
|
// "openai"
|
|
// })
|
|
}
|
|
|
|
|
|
const pathSegments = pathname.split("/").filter(Boolean)
|
|
const locales = i18nConfig.locales
|
|
const defaultLocale = i18nConfig.defaultLocale
|
|
|
|
let locale: (typeof locales)[number] = defaultLocale
|
|
|
|
const segment = pathSegments[0] as (typeof locales)[number]
|
|
|
|
if (locales.includes(segment)) {
|
|
locale = segment
|
|
}
|
|
const homePath = locale === defaultLocale ? "/" : `/${locale}`
|
|
|
|
return router.push(`${homePath}/${selectedWorkspace.id}/chat`)
|
|
|
|
// return router.push(`/${locale}/${selectedWorkspace.id}/chat`)
|
|
}
|
|
|
|
const handleFocusChatInput = () => {
|
|
chatInputRef.current?.focus()
|
|
}
|
|
|
|
const handleStopMessage = () => {
|
|
if (abortController) {
|
|
abortController.abort()
|
|
}
|
|
}
|
|
|
|
const handleSendMessage = async (
|
|
messageContent: string,
|
|
chatMessages: ChatMessage[],
|
|
isRegeneration: boolean
|
|
) => {
|
|
const startingInput = messageContent
|
|
|
|
try {
|
|
setUserInput("")
|
|
setIsGenerating(true)
|
|
setIsPromptPickerOpen(false)
|
|
setIsFilePickerOpen(false)
|
|
setNewMessageImages([])
|
|
|
|
const newAbortController = new AbortController()
|
|
setAbortController(newAbortController)
|
|
|
|
const modelData = [
|
|
...models.map(model => ({
|
|
modelId: model.model_id as LLMID,
|
|
modelName: model.name,
|
|
provider: "custom" as ModelProvider,
|
|
hostedId: model.id,
|
|
platformLink: "",
|
|
imageInput: false
|
|
})),
|
|
...LLM_LIST,
|
|
...availableLocalModels,
|
|
...availableOpenRouterModels
|
|
].find(llm => llm.modelId === chatSettings?.model)
|
|
|
|
validateChatSettings(
|
|
chatSettings,
|
|
modelData,
|
|
profile,
|
|
selectedWorkspace,
|
|
messageContent
|
|
)
|
|
|
|
let currentChat = selectedChat ? { ...selectedChat } : null
|
|
|
|
const b64Images = newMessageImages.map(image => image.base64)
|
|
|
|
let retrievedFileItems: Tables<"file_items">[] = []
|
|
|
|
if (
|
|
(newMessageFiles.length > 0 || chatFiles.length > 0) &&
|
|
useRetrieval
|
|
) {
|
|
setToolInUse("retrieval")
|
|
|
|
retrievedFileItems = await handleRetrieval(
|
|
userInput,
|
|
newMessageFiles,
|
|
chatFiles,
|
|
chatSettings!.embeddingsProvider,
|
|
sourceCount
|
|
)
|
|
}
|
|
|
|
const { tempUserChatMessage, tempAssistantChatMessage } =
|
|
createTempMessages(
|
|
messageContent,
|
|
chatMessages,
|
|
chatSettings!,
|
|
b64Images,
|
|
isRegeneration,
|
|
setChatMessages,
|
|
selectedAssistant
|
|
)
|
|
|
|
let payload: ChatPayload = {
|
|
chatSettings: chatSettings!,
|
|
workspaceInstructions: selectedWorkspace!.instructions || "",
|
|
chatMessages: isRegeneration
|
|
? [...chatMessages]
|
|
: [...chatMessages, tempUserChatMessage],
|
|
assistant: selectedChat?.assistant_id ? selectedAssistant : null,
|
|
messageFileItems: retrievedFileItems,
|
|
chatFileItems: chatFileItems
|
|
}
|
|
|
|
let generatedText = ""
|
|
|
|
if (selectedTools.length > 0) {
|
|
setToolInUse("Tools")
|
|
|
|
const formattedMessages = await buildFinalMessages(
|
|
payload,
|
|
profile!,
|
|
chatImages
|
|
)
|
|
|
|
const response = await fetch("/api/chat/tools", {
|
|
method: "POST",
|
|
headers: {
|
|
"Content-Type": "application/json"
|
|
},
|
|
body: JSON.stringify({
|
|
chatSettings: payload.chatSettings,
|
|
messages: formattedMessages,
|
|
selectedTools
|
|
})
|
|
})
|
|
|
|
setToolInUse("none")
|
|
|
|
generatedText = await processResponse(
|
|
response,
|
|
isRegeneration
|
|
? payload.chatMessages[payload.chatMessages.length - 1]
|
|
: tempAssistantChatMessage,
|
|
true,
|
|
newAbortController,
|
|
setFirstTokenReceived,
|
|
setChatMessages,
|
|
setToolInUse
|
|
)
|
|
} else {
|
|
if (modelData!.provider === "ollama") {
|
|
generatedText = await handleLocalChat(
|
|
payload,
|
|
profile!,
|
|
chatSettings!,
|
|
tempAssistantChatMessage,
|
|
isRegeneration,
|
|
newAbortController,
|
|
setIsGenerating,
|
|
setFirstTokenReceived,
|
|
setChatMessages,
|
|
setToolInUse
|
|
)
|
|
} else {
|
|
generatedText = await handleHostedChat(
|
|
payload,
|
|
profile!,
|
|
modelData!,
|
|
tempAssistantChatMessage,
|
|
isRegeneration,
|
|
newAbortController,
|
|
newMessageImages,
|
|
chatImages,
|
|
setIsGenerating,
|
|
setFirstTokenReceived,
|
|
setChatMessages,
|
|
setToolInUse
|
|
)
|
|
}
|
|
}
|
|
|
|
if (!currentChat) {
|
|
currentChat = await handleCreateChat(
|
|
chatSettings!,
|
|
profile!,
|
|
selectedWorkspace!,
|
|
messageContent,
|
|
selectedAssistant!,
|
|
newMessageFiles,
|
|
setSelectedChat,
|
|
setChats,
|
|
setChatFiles
|
|
)
|
|
} else {
|
|
const updatedChat = await updateChat(currentChat.id, {
|
|
updated_at: new Date().toISOString()
|
|
})
|
|
|
|
setChats(prevChats => {
|
|
const updatedChats = prevChats.map(prevChat =>
|
|
prevChat.id === updatedChat.id ? updatedChat : prevChat
|
|
)
|
|
|
|
return updatedChats
|
|
})
|
|
}
|
|
|
|
await handleCreateMessages(
|
|
chatMessages,
|
|
currentChat,
|
|
profile!,
|
|
modelData!,
|
|
messageContent,
|
|
generatedText,
|
|
newMessageImages,
|
|
isRegeneration,
|
|
retrievedFileItems,
|
|
setChatMessages,
|
|
setChatFileItems,
|
|
setChatImages,
|
|
selectedAssistant
|
|
)
|
|
|
|
setIsGenerating(false)
|
|
setFirstTokenReceived(false)
|
|
} catch (error) {
|
|
setIsGenerating(false)
|
|
setFirstTokenReceived(false)
|
|
setUserInput(startingInput)
|
|
}
|
|
}
|
|
|
|
const handleSendEdit = async (
|
|
editedContent: string,
|
|
sequenceNumber: number
|
|
) => {
|
|
if (!selectedChat) return
|
|
|
|
await deleteMessagesIncludingAndAfter(
|
|
selectedChat.user_id,
|
|
selectedChat.id,
|
|
sequenceNumber
|
|
)
|
|
|
|
const filteredMessages = chatMessages.filter(
|
|
chatMessage => chatMessage.message.sequence_number < sequenceNumber
|
|
)
|
|
|
|
setChatMessages(filteredMessages)
|
|
|
|
handleSendMessage(editedContent, filteredMessages, false)
|
|
}
|
|
|
|
return {
|
|
chatInputRef,
|
|
prompt,
|
|
handleNewChat,
|
|
handleSendMessage,
|
|
handleFocusChatInput,
|
|
handleStopMessage,
|
|
handleSendEdit
|
|
}
|
|
}
|