Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- app.py +151 -0
- requirements.txt +6 -0
- wxid_818dcjgh2rie12_0_7235.json +0 -0
app.py
ADDED
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from sentence_transformers import SentenceTransformer
|
2 |
+
from langchain.vectorstores import FAISS
|
3 |
+
from langchain.docstore.document import Document
|
4 |
+
import faiss
|
5 |
+
import numpy as np
|
6 |
+
import json
|
7 |
+
|
8 |
+
# 加载对话内容
|
9 |
+
file_path = r"C:\Users\Administrator\Downloads\wxdump_work\export\wxid_x7etd588hufg12\json\wxid_818dcjgh2rie12\wxid_818dcjgh2rie12_0_7235.json"
|
10 |
+
try:
|
11 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
12 |
+
chunks = json.load(f)
|
13 |
+
except FileNotFoundError:
|
14 |
+
print(f"File not found: {file_path}")
|
15 |
+
exit()
|
16 |
+
except json.JSONDecodeError:
|
17 |
+
print(f"Error decoding JSON from file: {file_path}")
|
18 |
+
exit()
|
19 |
+
|
20 |
+
# 假设每个 chunk 是一个字典,并且包含一个 'text' 键,存储实际的对话文本
|
21 |
+
docs = [Document(page_content=chunk.get('text', '')) for chunk in chunks]
|
22 |
+
|
23 |
+
# 加载嵌入模型
|
24 |
+
model = SentenceTransformer("BAAI/bge-base-zh")
|
25 |
+
embeddings = model.encode([doc.page_content for doc in docs], show_progress_bar=True)
|
26 |
+
|
27 |
+
# 构建 FAISS 索引
|
28 |
+
dimension = embeddings.shape[1]
|
29 |
+
index = faiss.IndexFlatL2(dimension)
|
30 |
+
index.add(np.array(embeddings))
|
31 |
+
|
32 |
+
# 构建 LangChain 兼容的 VectorStore
|
33 |
+
from langchain.docstore.in_memory import InMemoryDocstore
|
34 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
35 |
+
|
36 |
+
index_to_docstore_id = {i: str(i) for i in range(len(docs))}
|
37 |
+
docstore = {str(i): doc for i, doc in enumerate(docs)}
|
38 |
+
|
39 |
+
vectorstore = FAISS(
|
40 |
+
embedding_function=HuggingFaceEmbeddings(model_name='BAAI/bge-base-zh').embed_query,
|
41 |
+
index=index,
|
42 |
+
docstore=InMemoryDocstore(docstore),
|
43 |
+
index_to_docstore_id=index_to_docstore_id
|
44 |
+
)
|
45 |
+
|
46 |
+
# 构建 Retriever
|
47 |
+
retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
|
48 |
+
from sentence_transformers import SentenceTransformer
|
49 |
+
from langchain.vectorstores import FAISS
|
50 |
+
from langchain.docstore.document import Document
|
51 |
+
import faiss
|
52 |
+
import numpy as np
|
53 |
+
import json
|
54 |
+
|
55 |
+
# 加载对话内容
|
56 |
+
with open('wechat_chunks.json', 'r', encoding='utf-8') as f:
|
57 |
+
chunks = json.load(f)
|
58 |
+
|
59 |
+
docs = [Document(page_content=chunk) for chunk in chunks]
|
60 |
+
|
61 |
+
# 加载嵌入模型
|
62 |
+
model = SentenceTransformer("BAAI/bge-base-zh")
|
63 |
+
embeddings = model.encode([doc.page_content for doc in docs], show_progress_bar=True)
|
64 |
+
|
65 |
+
# 构建 FAISS 索引
|
66 |
+
dimension = embeddings.shape[1]
|
67 |
+
index = faiss.IndexFlatL2(dimension)
|
68 |
+
index.add(np.array(embeddings))
|
69 |
+
|
70 |
+
# 构建 LangChain 兼容的 VectorStore
|
71 |
+
from langchain.docstore.in_memory import InMemoryDocstore
|
72 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
73 |
+
|
74 |
+
index_to_docstore_id = {i: str(i) for i in range(len(docs))}
|
75 |
+
docstore = {str(i): doc for i, doc in enumerate(docs)}
|
76 |
+
|
77 |
+
vectorstore = FAISS(
|
78 |
+
embedding_function=HuggingFaceEmbeddings(model_name='BAAI/bge-base-zh').embed_query,
|
79 |
+
index=index,
|
80 |
+
docstore=InMemoryDocstore(docstore),
|
81 |
+
index_to_docstore_id=index_to_docstore_id
|
82 |
+
)
|
83 |
+
|
84 |
+
# 构建 Retriever
|
85 |
+
retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
|
86 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
87 |
+
from langchain.llms import HuggingFacePipeline
|
88 |
+
from langchain.chains import ConversationalRetrievalChain
|
89 |
+
from langchain.memory import ConversationBufferMemory
|
90 |
+
from langchain.prompts import PromptTemplate
|
91 |
+
|
92 |
+
# 加载模型
|
93 |
+
model_name = "Qwen/Qwen1.5-0.5B-Chat"
|
94 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
95 |
+
model = AutoModelForCausalLM.from_pretrained(model_name).eval()
|
96 |
+
|
97 |
+
# 构建生成管道
|
98 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512)
|
99 |
+
llm = HuggingFacePipeline(pipeline=pipe)
|
100 |
+
|
101 |
+
# 设置 prompt 模板
|
102 |
+
custom_prompt = PromptTemplate.from_template(
|
103 |
+
"""
|
104 |
+
你是一个可爱的微信好友,请模仿以下对话中的语气,特别是“对方”(即 is_sender = 0 的说话者)的说话风格。
|
105 |
+
你的语气要俏皮、有点可爱、适度调侃,不要太正式。使用微信风格的口语表达,不用太长!
|
106 |
+
|
107 |
+
以下是之前的微信聊天片段:
|
108 |
+
{context}
|
109 |
+
|
110 |
+
现在我说:
|
111 |
+
{question}
|
112 |
+
|
113 |
+
你应该怎么用这种风格来回复我?
|
114 |
+
"""
|
115 |
+
)
|
116 |
+
|
117 |
+
# 构建多轮问答链
|
118 |
+
qa_chain = ConversationalRetrievalChain.from_llm(
|
119 |
+
llm=llm,
|
120 |
+
retriever=retriever,
|
121 |
+
memory=ConversationBufferMemory(return_messages=True),
|
122 |
+
combine_docs_chain_kwargs={"prompt": custom_prompt},
|
123 |
+
return_source_documents=False
|
124 |
+
)
|
125 |
+
import gradio as gr
|
126 |
+
|
127 |
+
# 聊天函数
|
128 |
+
def chat(user_input, history):
|
129 |
+
history = history or []
|
130 |
+
chat_history = [(q, a) for q, a in history]
|
131 |
+
|
132 |
+
result = qa_chain.invoke({"question": user_input, "chat_history": chat_history})
|
133 |
+
reply = result["answer"]
|
134 |
+
history.append((user_input, reply))
|
135 |
+
return history, history
|
136 |
+
|
137 |
+
# Gradio 页面设计
|
138 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
139 |
+
gr.Markdown("# 🎀 Sophia Chat Agent")
|
140 |
+
gr.Markdown("这是 **Sophia Jr**,来和笨笨认识一下吧!😄")
|
141 |
+
|
142 |
+
chatbot = gr.Chatbot()
|
143 |
+
msg = gr.Textbox(label="请输入你的话...", placeholder="跟 Sophia 聊聊吧", lines=2)
|
144 |
+
state = gr.State([])
|
145 |
+
|
146 |
+
send_btn = gr.Button("发送")
|
147 |
+
|
148 |
+
send_btn.click(chat, inputs=[msg, state], outputs=[chatbot, state])
|
149 |
+
msg.submit(chat, inputs=[msg, state], outputs=[chatbot, state])
|
150 |
+
|
151 |
+
demo.launch(share=True)
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio==4.15.0
|
2 |
+
transformers>=4.36.2
|
3 |
+
langchain>=0.1.0
|
4 |
+
sentence-transformers
|
5 |
+
faiss-cpu
|
6 |
+
huggingface-hub
|
wxid_818dcjgh2rie12_0_7235.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|