import gradio as gr from transformers import AutoModelForSequenceClassification, AutoTokenizer import torch # Load model and tokenizer model_name = "cross-encoder/ms-marco-MiniLM-L-12-v2" print("Loading model and tokenizer...") tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name) model.eval() # Set model to evaluation mode print("Model and tokenizer loaded successfully.") # Function to compute relevance score def get_relevance_score(query, paragraph): if not query.strip() or not paragraph.strip(): return "Please provide both a query and a document paragraph." print(f"Received inputs -> Query: {query}, Paragraph: {paragraph}") # Tokenize inputs inputs = tokenizer(query, paragraph, return_tensors="pt", truncation=True, padding=True) # Perform inference without gradient tracking with torch.no_grad(): score = model(**inputs).logits.squeeze().item() print(f"Calculated score: {score}") return round(score, 4) def test_function(query, paragraph): return f"Received query: {query}, paragraph: {paragraph}" # Define Gradio interface interface = gr.Interface( fn=test_function, inputs=[gr.Textbox(label="Query"), gr.Textbox(label="Document Paragraph")], outputs=gr.Textbox(label="Output"), title="Test App", description="Testing if UI responds to input." ) if __name__ == "__main__": print("Launching Gradio app...") interface.launch(share=True)