syedmoinms commited on
Commit
e2621cd
·
verified ·
1 Parent(s): a0119ea

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -0
app.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
+
5
+ # Load fine-tuned model from Hugging Face
6
+ model_name = "syedmoinms/MoinRomanticBot" # ✅ Tumhara sahi model path
7
+
8
+ try:
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
11
+ except Exception as e:
12
+ print(f"❌ Error loading model: {e}")
13
+ exit()
14
+
15
+ # Function to generate response
16
+ def chatbot(input_text):
17
+ inputs = tokenizer(input_text, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
18
+
19
+ with torch.no_grad():
20
+ outputs = model.generate(**inputs, max_length=150)
21
+
22
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
23
+
24
+ return response
25
+
26
+ # Gradio interface
27
+ iface = gr.Interface(fn=chatbot, inputs="text", outputs="text", title="Moin Romantic Bot")
28
+
29
+ # Launch app
30
+ if __name__ == "__main__":
31
+ iface.launch(server_name="0.0.0.0", server_port=7860)