42 lines
1.5 KiB
Python
42 lines
1.5 KiB
Python
from fastapi import APIRouter, HTTPException, Query
|
|
from pydantic import BaseModel
|
|
from app.core.embedding import embedder
|
|
from app.core.config import settings
|
|
from llama_index.vector_stores.faiss import FaissVectorStore
|
|
from llama_index import VectorStoreIndex, ServiceContext
|
|
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
|
import os
|
|
|
|
router = APIRouter()
|
|
|
|
class QueryRequest(BaseModel):
|
|
query: str
|
|
|
|
@router.post("/search")
|
|
def search_docs(request: QueryRequest, user_id: str = Query(..., description="用户ID")):
|
|
try:
|
|
index_path = os.path.join("index_data", f"{user_id}.index")
|
|
if not os.path.exists(index_path):
|
|
raise HTTPException(status_code=404, detail="用户索引不存在")
|
|
|
|
# 构建 LlamaIndex 检索器
|
|
faiss_store = FaissVectorStore.from_persist_path(index_path)
|
|
service_context = ServiceContext.from_defaults(embed_model=embedder)
|
|
index = VectorStoreIndex.from_vector_store(faiss_store, service_context=service_context)
|
|
|
|
# 检索结果(真实文本)
|
|
retriever = index.as_retriever(similarity_top_k=settings.TOP_K)
|
|
nodes = retriever.retrieve(request.query)
|
|
|
|
return {
|
|
"user_id": user_id,
|
|
"query": request.query,
|
|
"results": [
|
|
{"score": float(node.score or 0), "text": node.get_content()}
|
|
for node in nodes
|
|
]
|
|
}
|
|
|
|
except Exception as e:
|
|
raise HTTPException(status_code=500, detail=str(e))
|