Nitin00043 commited on
Commit
adc05de
·
verified ·
1 Parent(s): 07f809d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -17
app.py CHANGED
@@ -1,38 +1,70 @@
1
- from transformers import pipeline
2
  import gradio as gr
 
 
3
 
4
- # Load sentiment and emotion models
5
- sentiment_model = "cardiffnlp/twitter-roberta-base-sentiment"
6
- emotion_model = "bhadresh-savani/bert-base-uncased-emotion"
 
 
7
 
8
- sentiment_pipeline = pipeline("sentiment-analysis", model=sentiment_model, tokenizer=sentiment_model)
9
- emotion_pipeline = pipeline("text-classification", model=emotion_model, tokenizer=emotion_model)
 
 
 
 
 
 
 
 
 
10
 
11
- # Function to analyze sentiment and emotion
12
  def analyze_text(text):
13
- sentiment_result = sentiment_pipeline(text)[0]
14
- emotion_result = emotion_pipeline(text)[0]
 
 
 
 
 
 
 
 
 
15
 
16
- return {
17
- "Sentiment": {sentiment_result['label']: sentiment_result['score']},
18
- "Emotion": {emotion_result['label']: emotion_result['score']}
 
19
  }
 
 
 
 
 
 
 
20
 
21
- # Gradio interface
22
  demo = gr.Interface(
23
  fn=analyze_text,
24
  inputs=gr.Textbox(placeholder="Enter your text here...", label="Input Text"),
25
  outputs=gr.Label(label="Analysis Results"),
26
- title="Sentiment and Emotion Analysis",
27
- description="Analyze the sentiment and emotion of your text using advanced NLP models.",
28
  examples=[
29
  ["I'm thrilled to start this new adventure!"],
30
  ["This situation is making me really frustrated."],
31
  ["I feel so heartbroken and lost."]
32
  ],
33
- theme="soft"
 
34
  )
35
 
36
- # Deploy the application
 
 
37
  if __name__ == "__main__":
38
  demo.launch()
 
1
+ from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer
2
  import gradio as gr
3
+ import torch
4
+ from concurrent.futures import ThreadPoolExecutor
5
 
6
+ # Load models with quantization (8-bit) for faster inference
7
+ def load_quantized_model(model_name):
8
+ model = AutoModelForSequenceClassification.from_pretrained(model_name, load_in_8bit=True)
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ return pipeline("text-classification", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
11
 
12
+ # Load models in parallel during startup
13
+ with ThreadPoolExecutor() as executor:
14
+ sentiment_future = executor.submit(load_quantized_model, "cardiffnlp/twitter-roberta-base-sentiment")
15
+ emotion_future = executor.submit(load_quantized_model, "bhadresh-savani/bert-base-uncased-emotion")
16
+
17
+ sentiment_pipeline = sentiment_future.result()
18
+ emotion_pipeline = emotion_future.result()
19
+
20
+ # Cache recent predictions to avoid recomputation
21
+ CACHE_SIZE = 100
22
+ prediction_cache = {}
23
 
 
24
  def analyze_text(text):
25
+ # Check cache first
26
+ if text in prediction_cache:
27
+ return prediction_cache[text]
28
+
29
+ # Parallel model execution
30
+ with ThreadPoolExecutor() as executor:
31
+ sentiment_future = executor.submit(sentiment_pipeline, text)
32
+ emotion_future = executor.submit(emotion_pipeline, text)
33
+
34
+ sentiment_result = sentiment_future.result()[0]
35
+ emotion_result = emotion_future.result()[0]
36
 
37
+ # Format response
38
+ result = {
39
+ "Sentiment": {sentiment_result['label']: round(sentiment_result['score'], 4)},
40
+ "Emotion": {emotion_result['label']: round(emotion_result['score'], 4)}
41
  }
42
+
43
+ # Update cache
44
+ if len(prediction_cache) >= CACHE_SIZE:
45
+ prediction_cache.pop(next(iter(prediction_cache)))
46
+ prediction_cache[text] = result
47
+
48
+ return result
49
 
50
+ # Optimized Gradio interface with batch processing
51
  demo = gr.Interface(
52
  fn=analyze_text,
53
  inputs=gr.Textbox(placeholder="Enter your text here...", label="Input Text"),
54
  outputs=gr.Label(label="Analysis Results"),
55
+ title="🚀 Fast Sentiment & Emotion Analysis",
56
+ description="Optimized version using quantized models and parallel processing",
57
  examples=[
58
  ["I'm thrilled to start this new adventure!"],
59
  ["This situation is making me really frustrated."],
60
  ["I feel so heartbroken and lost."]
61
  ],
62
+ theme="soft",
63
+ allow_flagging="never"
64
  )
65
 
66
+ # Warm up models with sample input
67
+ analyze_text("Warming up models...")
68
+
69
  if __name__ == "__main__":
70
  demo.launch()