import streamlit as st import torch from transformers import AutoTokenizer from semviqa.SER.qatc_model import QATCForQuestionAnswering # Load QATC Model @st.cache_resource() def load_qatc_model(): tokenizer = AutoTokenizer.from_pretrained("xuandin/semviqa-qatc-vimrc-viwikifc") model = QATCForQuestionAnswering.from_pretrained("xuandin/semviqa-qatc-vimrc-viwikifc") return tokenizer, model # Streamlit UI Configuration st.set_page_config(page_title="SemViQA Demo", layout="wide") # Improved UI Design st.markdown(""" """, unsafe_allow_html=True) st.markdown("

🔍 SemViQA: A Semantic Question Answering System for Vietnamese Information Fact-Checking

", unsafe_allow_html=True) st.markdown("

Enter a claim and context to verify its accuracy

", unsafe_allow_html=True) # Sidebar - Configuration Settings st.sidebar.header("⚙️ Settings") tfidf_threshold = st.sidebar.slider("🔧 TF-IDF Threshold", 0.0, 1.0, 0.5, 0.01) length_ratio_threshold = st.sidebar.slider("📏 Length Ratio Threshold", 0.1, 1.0, 0.5, 0.01) qatc_model = st.sidebar.selectbox("🤖 Select QATC Model", ["xuandin/semviqa-qatc-vimrc-viwikifc"]) # User Input Fields claim = st.text_area("✍️ Enter Claim", "Vietnam is a country in Southeast Asia.") context = st.text_area("📖 Enter Context", "Vietnam is a country located in Southeast Asia, covering an area of over 331,000 km² with a population of more than 98 million people.") if st.button("🔎 Verify"): tokenizer, model = load_qatc_model() inputs = tokenizer(claim, context, return_tensors="pt", truncation=True, max_length=512) with torch.no_grad(): outputs = model(**inputs) start_idx = torch.argmax(outputs.start_logits) end_idx = torch.argmax(outputs.end_logits) tokens = inputs["input_ids"][0][start_idx : end_idx + 1] evidence_result = tokenizer.decode(tokens, skip_special_tokens=True) st.markdown("""

📌 Result

🔍 Evidence: {}

""".format(evidence_result), unsafe_allow_html=True)