Nitin00043 commited on
Commit
ec7d971
·
verified ·
1 Parent(s): 5924948

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -87
app.py CHANGED
@@ -1,99 +1,38 @@
1
- from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer
2
  import gradio as gr
3
- import torch
4
- from concurrent.futures import ThreadPoolExecutor
5
- from threading import Lock
6
 
7
- # Global cache and lock for thread-safety
8
- CACHE_SIZE = 100
9
- prediction_cache = {}
10
- cache_lock = Lock()
11
 
12
- # Mapping for sentiment labels from cardiffnlp/twitter-roberta-base-sentiment
13
- SENTIMENT_LABEL_MAPPING = {
14
- "LABEL_0": "negative",
15
- "LABEL_1": "neutral",
16
- "LABEL_2": "positive"
17
- }
18
-
19
- def load_model(model_name):
20
- """
21
- Loads the model with 8-bit quantization if a GPU is available;
22
- otherwise, loads the full model.
23
- """
24
- if torch.cuda.is_available():
25
- model = AutoModelForSequenceClassification.from_pretrained(
26
- model_name, load_in_8bit=True, device_map="auto"
27
- )
28
- tokenizer = AutoTokenizer.from_pretrained(model_name)
29
- device = 0 # GPU index
30
- else:
31
- model = AutoModelForSequenceClassification.from_pretrained(model_name)
32
- tokenizer = AutoTokenizer.from_pretrained(model_name)
33
- device = -1
34
- return pipeline("text-classification", model=model, tokenizer=tokenizer, device=device)
35
-
36
- # Load both models concurrently at startup.
37
- with ThreadPoolExecutor() as executor:
38
- sentiment_future = executor.submit(load_model, "cardiffnlp/twitter-roberta-base-sentiment")
39
- emotion_future = executor.submit(load_model, "bhadresh-savani/bert-base-uncased-emotion")
40
-
41
- sentiment_pipeline = sentiment_future.result()
42
- emotion_pipeline = emotion_future.result()
43
-
44
- def analyze_text(text):
45
- # Check cache first (thread-safe)
46
- with cache_lock:
47
- if text in prediction_cache:
48
- return prediction_cache[text]
49
-
50
- try:
51
- # Run both model inferences in parallel.
52
- with ThreadPoolExecutor() as executor:
53
- future_sentiment = executor.submit(sentiment_pipeline, text)
54
- future_emotion = executor.submit(emotion_pipeline, text)
55
- sentiment_result = future_sentiment.result()[0]
56
- emotion_result = future_emotion.result()[0]
57
-
58
- # Remap the sentiment label to a human-readable format if available.
59
- raw_sentiment_label = sentiment_result.get("label", "")
60
- sentiment_label = SENTIMENT_LABEL_MAPPING.get(raw_sentiment_label, raw_sentiment_label)
61
-
62
- # Format the output with rounded scores.
63
- result = {
64
- "Sentiment": {sentiment_label: round(sentiment_result['score'], 4)},
65
- "Emotion": {emotion_result['label']: round(emotion_result['score'], 4)}
66
- }
67
- except Exception as e:
68
- result = {"error": str(e)}
69
 
70
- # Update the cache in a thread-safe manner.
71
- with cache_lock:
72
- if len(prediction_cache) >= CACHE_SIZE:
73
- prediction_cache.pop(next(iter(prediction_cache)))
74
- prediction_cache[text] = result
75
 
76
- return result
 
 
77
 
78
- # Define the Gradio interface.
79
  demo = gr.Interface(
80
- fn=analyze_text,
81
- inputs=gr.Textbox(placeholder="Enter your text here...", label="Input Text"),
82
- outputs=gr.JSON(label="Analysis Results"),
83
- title="🚀 Fast Sentiment & Emotion Analysis",
84
- description="Optimized application that remaps sentiment labels and uses parallel processing.",
85
  examples=[
86
- ["I'm thrilled to start this new adventure!"],
87
- ["This situation is making me really frustrated."],
88
- ["I feel so heartbroken and lost."]
89
  ],
90
- theme="soft",
91
- allow_flagging="never"
92
  )
93
 
94
- # Warm up the models with a sample input.
95
- _ = analyze_text("Warming up models...")
96
-
97
  if __name__ == "__main__":
98
- # Bind to all interfaces for Hugging Face Spaces.
99
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
1
+ from transformers import Pix2StructForConditionalGeneration, Pix2StructProcessor
2
  import gradio as gr
3
+ from PIL import Image
 
 
4
 
5
+ # Load the pre-trained Pix2Struct model and processor
6
+ model_name = "google/pix2struct-mathqa-base"
7
+ model = Pix2StructForConditionalGeneration.from_pretrained(model_name)
8
+ processor = Pix2StructProcessor.from_pretrained(model_name)
9
 
10
+ # Function to solve handwritten math problems
11
+ def solve_math_problem(image):
12
+ # Preprocess the image
13
+ inputs = processor(images=image, text="Solve the math problem:", return_tensors="pt")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+ # Generate the solution
16
+ predictions = model.generate(**inputs, max_new_tokens=100)
 
 
 
17
 
18
+ # Decode the output
19
+ solution = processor.decode(predictions[0], skip_special_tokens=True)
20
+ return solution
21
 
22
+ # Gradio interface
23
  demo = gr.Interface(
24
+ fn=solve_math_problem,
25
+ inputs=gr.Image(type="pil", label="Upload Handwritten Math Problem"),
26
+ outputs=gr.Textbox(label="Solution"),
27
+ title="Handwritten Math Problem Solver",
28
+ description="Upload an image of a handwritten math problem, and the model will solve it.",
29
  examples=[
30
+ ["example1.jpg"], # Add example images
31
+ ["example2.jpg"]
 
32
  ],
33
+ theme="soft"
 
34
  )
35
 
36
+ # Launch the app
 
 
37
  if __name__ == "__main__":
38
+ demo.launch()