File size: 969 Bytes
419f138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import gradio as gr
from transformers import AutoModel, AutoTokenizer
import torch

# Load the model and tokenizer from Hugging Face Hub
model = AutoModel.from_pretrained("comethrusws/finlytic-compliance")
tokenizer = AutoTokenizer.from_pretrained("comethrusws/finlytic-compliance")

# Define a function to handle inference
def predict(input_data):
    inputs = tokenizer(input_data, return_tensors="pt")
    outputs = model(**inputs)
    
    # Assuming the model returns logits (modify this depending on your model's architecture)
    prediction = torch.argmax(outputs.logits, dim=-1).item()
    return prediction

# Create a Gradio interface
interface = gr.Interface(
    fn=predict,
    inputs=gr.inputs.Textbox(label="Input Data"),
    outputs=gr.outputs.Textbox(label="Prediction"),
    title="Fintlytic Compliance Model",
    description="Predict using the Finlytic compliance model",
)

# Launch the Gradio app
if __name__ == "__main__":
    interface.launch()