fajarah commited on
Commit
452c0cc
·
verified ·
1 Parent(s): a1860ca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -74
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import gradio as gr
 
2
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoImageProcessor, AutoModelForImageClassification
3
  from torch.nn.functional import sigmoid
4
  import torch
@@ -43,6 +44,21 @@ def analyze_combined(text, threshold, image):
43
  final_label = text_label if img_label is None else img_label
44
  card = islamic_advice.get(final_label, islamic_advice["neutral"])
45
  return card
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
  custom_css = """
48
  body {
@@ -62,80 +78,23 @@ body {
62
  .gr-button:hover {
63
  background-color: #2563eb !important;
64
  }
65
- .notion-card {
66
- background: #ffffff;
67
- border-radius: 12px;
68
- border: 1px solid #e5e7eb;
69
- padding: 16px;
70
- margin-top: 12px;
71
- box-shadow: 0 6px 20px rgba(0,0,0,0.05);
72
- max-width: 600px;
73
- margin-left: auto;
74
- margin-right: auto;
75
- animation: fadeInUp 0.6s ease-out;
76
- }
77
- .notion-card h3 {
78
- margin-top: 0;
79
- color: #111827;
80
- font-size: 1.25rem;
81
- margin-bottom: 8px;
82
- font-weight: 600;
83
- }
84
- .notion-card p {
85
- font-size: 1rem;
86
- color: #374151;
87
- margin: 0;
88
- }
89
- .emoji {
90
- text-align: center;
91
- font-size: 3rem;
92
- margin: 0;
93
- padding: 0;
94
- animation: popIn 0.5s ease;
95
- }
96
- .arabic {
97
- font-size: 1.3rem;
98
- direction: rtl;
99
- display: block;
100
- margin-top: 4px;
101
- margin-bottom: 4px;
102
- text-align: right;
103
- font-family: 'Scheherazade', serif;
104
- }
105
- @keyframes fadeInUp {
106
- 0% {
107
- opacity: 0;
108
- transform: translateY(20px);
109
- }
110
- 100% {
111
- opacity: 1;
112
- transform: translateY(0);
113
- }
114
- }
115
- @keyframes popIn {
116
- 0% {
117
- transform: scale(0.5);
118
- opacity: 0;
119
- }
120
- 100% {
121
- transform: scale(1);
122
- opacity: 1;
123
- }
124
- }
125
  """
126
 
127
- demo = gr.Interface(
128
- fn=analyze_combined,
129
- inputs=[
130
- gr.Textbox(lines=5, placeholder="Write a sentence or a full paragraph...", label="Your Text"),
131
- gr.Slider(minimum=0.1, maximum=0.9, value=0.3, step=0.05, label="Threshold"),
132
- gr.Image(type="pil", label="Upload Face Photo")
133
- ],
134
- outputs=gr.HTML(label="Detected Emotion & Islamic Insight"),
135
- title="🧠 EmotionLens: Understand & Reflect Emotionally",
136
- description="Analyze how you feel through text and facial expression, and receive inspirational Islamic wisdom in a beautifully styled reflection.",
137
- theme="default",
138
- css=custom_css
139
- )
 
 
 
140
 
141
- demo.launch()
 
1
  import gradio as gr
2
+ import pandas as pd
3
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoImageProcessor, AutoModelForImageClassification
4
  from torch.nn.functional import sigmoid
5
  import torch
 
44
  final_label = text_label if img_label is None else img_label
45
  card = islamic_advice.get(final_label, islamic_advice["neutral"])
46
  return card
47
+ return f"<h2 style='font-size:2rem;text-align:center;'>{final_label.capitalize()}</h2><p style='text-align:center;font-size:1.2rem;'>{advice}</p>"
48
+
49
+ def analyze_batch_csv(file):
50
+ df = pd.read_csv(file.name)
51
+ results = []
52
+ for text in df['text']:
53
+ inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
54
+ with torch.no_grad():
55
+ logits = model(**inputs).logits
56
+ probs = sigmoid(logits)[0]
57
+ idx = torch.argmax(probs).item()
58
+ label = model.config.id2label[idx].lower()
59
+ advice = label.capitalize()
60
+ results.append({"text": text, "emotion": label, "advice": advice})
61
+ return pd.DataFrame(results)
62
 
63
  custom_css = """
64
  body {
 
78
  .gr-button:hover {
79
  background-color: #2563eb !important;
80
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  """
82
 
83
+ with gr.Blocks(css=custom_css) as demo:
84
+ gr.Markdown("# 🧠 EmotionLens")
85
+ gr.Markdown("Analyze your text and optionally a facial photo. Receive emotional insight and reflective Islamic advice.")
86
+
87
+ with gr.Tab("Single Input"):
88
+ text_input = gr.Textbox(lines=4, label="Text Input")
89
+ threshold_slider = gr.Slider(0.1, 0.9, value=0.3, step=0.05, label="Threshold")
90
+ image_input = gr.Image(type="pil", label="Upload Face Photo (optional)")
91
+ btn = gr.Button("Analyze Emotion")
92
+ result = gr.HTML()
93
+ btn.click(fn=analyze_combined, inputs=[text_input, threshold_slider, image_input], outputs=result)
94
+
95
+ with gr.Tab("Batch Analysis"):
96
+ file_input = gr.File(file_types=[".csv"], label="Upload CSV with 'text' column")
97
+ table_output = gr.Dataframe()
98
+ file_input.change(fn=analyze_batch_csv, inputs=file_input, outputs=table_output)
99
 
100
+ demo.launch()