Nerva5678 commited on
Commit
c61a210
·
verified ·
1 Parent(s): 8c7b4c7

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -0
app.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ from langchain.embeddings.huggingface import HuggingFaceEmbeddings
4
+ from langchain.vectorstores import FAISS
5
+ from langchain.llms import HuggingFacePipeline
6
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
7
+
8
+ st.title("🤖 Excel 問答 AI(ChatGLM 驅動)")
9
+ st.markdown("上傳 Excel(A欄:問題,B欄:答案),開始提問吧!")
10
+
11
+ uploaded_file = st.file_uploader("上傳你的問答 Excel", type=["xlsx"])
12
+
13
+ if uploaded_file:
14
+ df = pd.read_excel(uploaded_file)
15
+ if not {'問題', '答案'}.issubset(df.columns):
16
+ st.error("Excel 檔案需包含 '問題' 和 '答案' 欄位")
17
+ else:
18
+ texts = [f"問題:{q}\n答案:{a}" for q, a in zip(df['問題'], df['答案'])]
19
+ st.info("正在建立向量資料庫...")
20
+ embeddings = HuggingFaceEmbeddings(model_name="shibing624/text2vec-base-chinese")
21
+ vectorstore = FAISS.from_texts(texts, embedding=embeddings)
22
+
23
+ st.info("正在載入 ChatGLM 模型...")
24
+ tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm3-6b", trust_remote_code=True)
25
+ model = AutoModelForCausalLM.from_pretrained("THUDM/chatglm3-6b", trust_remote_code=True)
26
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512)
27
+ llm = HuggingFacePipeline(pipeline=pipe)
28
+
29
+ qa = RetrievalQA.from_chain_type(llm=llm, retriever=vectorstore.as_retriever(), chain_type="stuff")
30
+
31
+ query = st.text_input("請輸入你的問題:")
32
+ if query:
33
+ with st.spinner("AI 回答中..."):
34
+ result = qa.run(query)
35
+ st.success(result)