Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- .gitattributes +1 -0
- app.py +45 -6
- family.jpg +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
family.jpg filter=lfs diff=lfs merge=lfs -text
|
app.py
CHANGED
@@ -30,7 +30,7 @@ index = faiss.IndexFlatL2(dimension)
|
|
30 |
index.add(np.array(embeddings))
|
31 |
|
32 |
# 构建 LangChain 兼容的 VectorStore
|
33 |
-
from
|
34 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
35 |
|
36 |
index_to_docstore_id = {i: str(i) for i in range(len(docs))}
|
@@ -67,7 +67,6 @@ index = faiss.IndexFlatL2(dimension)
|
|
67 |
index.add(np.array(embeddings))
|
68 |
|
69 |
# 构建 LangChain 兼容的 VectorStore
|
70 |
-
from langchain.docstore.in_memory import InMemoryDocstore
|
71 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
72 |
|
73 |
index_to_docstore_id = {i: str(i) for i in range(len(docs))}
|
@@ -84,7 +83,7 @@ vectorstore = FAISS(
|
|
84 |
retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
|
85 |
|
86 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
87 |
-
from
|
88 |
from langchain.chains import ConversationalRetrievalChain
|
89 |
from langchain.memory import ConversationBufferMemory
|
90 |
from langchain.prompts import PromptTemplate
|
@@ -126,6 +125,40 @@ custom_prompt = PromptTemplate(
|
|
126 |
|
127 |
|
128 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
# 构建多轮问答链
|
130 |
qa_chain = ConversationalRetrievalChain.from_llm(
|
131 |
llm=llm,
|
@@ -138,9 +171,15 @@ qa_chain = ConversationalRetrievalChain.from_llm(
|
|
138 |
def chat(user_input, history):
|
139 |
history = history or []
|
140 |
chat_history = [(q, a) for q, a in history]
|
141 |
-
|
142 |
-
|
143 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
history.append((user_input, reply))
|
145 |
return history, history
|
146 |
|
|
|
30 |
index.add(np.array(embeddings))
|
31 |
|
32 |
# 构建 LangChain 兼容的 VectorStore
|
33 |
+
from langchain_community.docstore.in_memory import InMemoryDocstore
|
34 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
35 |
|
36 |
index_to_docstore_id = {i: str(i) for i in range(len(docs))}
|
|
|
67 |
index.add(np.array(embeddings))
|
68 |
|
69 |
# 构建 LangChain 兼容的 VectorStore
|
|
|
70 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
71 |
|
72 |
index_to_docstore_id = {i: str(i) for i in range(len(docs))}
|
|
|
83 |
retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
|
84 |
|
85 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
86 |
+
from langchain_community.llms import HuggingFacePipeline
|
87 |
from langchain.chains import ConversationalRetrievalChain
|
88 |
from langchain.memory import ConversationBufferMemory
|
89 |
from langchain.prompts import PromptTemplate
|
|
|
125 |
|
126 |
|
127 |
import gradio as gr
|
128 |
+
css = """
|
129 |
+
/* 背景图 */
|
130 |
+
.gradio-container {
|
131 |
+
background-image: url("/assets/family.jpg"); /* 相对路径 */
|
132 |
+
background-size: cover;
|
133 |
+
background-position: center;
|
134 |
+
height: 100vh; /* 设置为全屏背景 */
|
135 |
+
}
|
136 |
+
/* 动漫风格按钮 */
|
137 |
+
.gr-button {
|
138 |
+
background-color: #ff69b4; /* 粉色背景 */
|
139 |
+
border-radius: 50px; /* 圆形按钮 */
|
140 |
+
font-size: 18px;
|
141 |
+
font-weight: bold;
|
142 |
+
color: white;
|
143 |
+
transition: all 0.3s ease-in-out;
|
144 |
+
padding: 12px 20px;
|
145 |
+
}
|
146 |
+
.gr-button:hover {
|
147 |
+
background-color: #ff1493; /* 鼠标悬停时颜色变化 */
|
148 |
+
transform: scale(1.1); /* 放大按钮 */
|
149 |
+
}
|
150 |
+
/* 思考动画 */
|
151 |
+
.thinking {
|
152 |
+
animation: spin 2s infinite linear;
|
153 |
+
}
|
154 |
+
@keyframes spin {
|
155 |
+
0% { transform: rotate(0deg); }
|
156 |
+
50% { transform: rotate(180deg); }
|
157 |
+
100% { transform: rotate(360deg); }
|
158 |
+
}
|
159 |
+
...
|
160 |
+
"""
|
161 |
+
|
162 |
# 构建多轮问答链
|
163 |
qa_chain = ConversationalRetrievalChain.from_llm(
|
164 |
llm=llm,
|
|
|
171 |
def chat(user_input, history):
|
172 |
history = history or []
|
173 |
chat_history = [(q, a) for q, a in history]
|
174 |
+
|
175 |
+
try:
|
176 |
+
# 使用 qa_chain 生成回答(不进行检索,直接参考语料库)
|
177 |
+
result = qa_chain.invoke({"question": user_input, "chat_history": chat_history})
|
178 |
+
reply = result.get("answer", "勾巴,我也不知道!!!")
|
179 |
+
except Exception as e:
|
180 |
+
reply = f"唔sophia累了,可能要犯错了: {str(e)}"
|
181 |
+
|
182 |
+
# 更新对话历史
|
183 |
history.append((user_input, reply))
|
184 |
return history, history
|
185 |
|
family.jpg
ADDED
![]() |
Git LFS Details
|