import streamlit as st import torch from transformers import AutoTokenizer from semviqa.ser.qatc_model import QATCForQuestionAnswering from semviqa.tvc.model import ClaimModelForClassification from semviqa.ser.ser_eval import extract_evidence_tfidf_qatc from semviqa.tvc.tvc_eval import classify_claim import time import pandas as pd # Load models with caching @st.cache_resource() def load_model(model_name, model_class, is_bc=False): tokenizer = AutoTokenizer.from_pretrained(model_name) model = model_class.from_pretrained(model_name, num_labels=3 if not is_bc else 2) model.eval() return tokenizer, model # Set up page configuration st.set_page_config(page_title="SemViQA Demo", layout="wide") # Custom CSS and JavaScript to make the sidebar sticky st.markdown( """ """, unsafe_allow_html=True, ) # Container for the whole content with dynamic height with st.container(): st.markdown("
SemViQA: A Semantic Question Answering System for Vietnamese Information Fact-Checking
", unsafe_allow_html=True) st.markdown("Enter the claim and context to verify its accuracy
", unsafe_allow_html=True) # Sidebar: Global Settings with st.sidebar.expander("⚙️ Settings", expanded=True): tfidf_threshold = st.slider("TF-IDF Threshold", 0.0, 1.0, 0.5, 0.01) length_ratio_threshold = st.slider("Length Ratio Threshold", 0.1, 1.0, 0.5, 0.01) qatc_model_name = st.selectbox("QATC Model", [ "SemViQA/qatc-infoxlm-viwikifc", "SemViQA/qatc-infoxlm-isedsc01", "SemViQA/qatc-vimrc-viwikifc", "SemViQA/qatc-vimrc-isedsc01" ]) bc_model_name = st.selectbox("Binary Classification Model", [ "SemViQA/bc-xlmr-viwikifc", "SemViQA/bc-xlmr-isedsc01", "SemViQA/bc-infoxlm-viwikifc", "SemViQA/bc-infoxlm-isedsc01", "SemViQA/bc-erniem-viwikifc", "SemViQA/bc-erniem-isedsc01" ]) tc_model_name = st.selectbox("3-Class Classification Model", [ "SemViQA/tc-xlmr-viwikifc", "SemViQA/tc-xlmr-isedsc01", "SemViQA/tc-infoxlm-viwikifc", "SemViQA/tc-infoxlm-isedsc01", "SemViQA/tc-erniem-viwikifc", "SemViQA/tc-erniem-isedsc01" ]) show_details = st.checkbox("Show Probability Details", value=False) # Store verification history if 'history' not in st.session_state: st.session_state.history = [] if 'latest_result' not in st.session_state: st.session_state.latest_result = None # Load the selected models tokenizer_qatc, model_qatc = load_model(qatc_model_name, QATCForQuestionAnswering) tokenizer_bc, model_bc = load_model(bc_model_name, ClaimModelForClassification, is_bc=True) tokenizer_tc, model_tc = load_model(tc_model_name, ClaimModelForClassification) # Icons for results verdict_icons = { "SUPPORTED": "✅", "REFUTED": "❌", "NEI": "⚠️" } # Tabs: Verify, History, About tabs = st.tabs(["Verify", "History", "About"]) # --- Tab Verify --- with tabs[0]: st.subheader("Verify a Claim") # 2-column layout: input on the left, results on the right col_input, col_result = st.columns([2, 1]) with col_input: claim = st.text_area("Enter Claim", "Vietnam is a country in Southeast Asia.") context = st.text_area("Enter Context", "Vietnam is a country located in Southeast Asia, covering an area of over 331,000 km² with a population of more than 98 million people.") verify_button = st.button("Verify", key="verify_button") with col_result: st.markdown("3-Class Probability: {prob3class.item():.2f}
3-Class Predicted Label: {['NEI', 'SUPPORTED', 'REFUTED'][pred_tc]}
2-Class Probability: {prob2class.item():.2f}
2-Class Predicted Label: {['SUPPORTED', 'REFUTED'][pred_bc]}
""" verdict_time = time.time() - verdict_start_time # Store verification history and the latest result st.session_state.history.append({ "claim": claim, "evidence": evidence, "verdict": verdict, "evidence_time": evidence_time, "verdict_time": verdict_time, "details": details }) st.session_state.latest_result = { "claim": claim, "evidence": evidence, "verdict": verdict, "evidence_time": evidence_time, "verdict_time": verdict_time, "details": details } if torch.cuda.is_available(): torch.cuda.empty_cache() # Display the result after verification res = st.session_state.latest_result st.markdown(f"""Claim: {res['claim']}
Evidence: {res['evidence']}
Evidence Inference Time: {res['evidence_time']:.2f} seconds
Verdict Inference Time: {res['verdict_time']:.2f} seconds
{res['details']} {res['verdict']}