Raiff1982 commited on
Commit
ddd25b9
·
verified ·
1 Parent(s): 81449e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -43
app.py CHANGED
@@ -1,26 +1,17 @@
1
  import os
2
  import asyncio
3
- import copy
4
- import inspect
5
- import warnings
6
- import json
7
  import logging
 
8
  from pathlib import Path
9
- from typing import Any, Literal, Optional, Union, List
10
- from cryptography.fernet import Fernet
11
  from pydantic import BaseModel, Field
12
- from gradio import Interface, Blocks
13
- from gradio.components import Component
14
  from gradio.data_classes import FileData, GradioModel, GradioRootModel
15
- from gradio.events import Events
16
- from gradio.exceptions import Error
17
- from gradio_client import utils as client_utils
18
  from transformers import pipeline
19
  from diffusers import DiffusionPipeline
20
  import torch
21
  import gradio as gr
22
 
23
- # Corrected code with closed parenthesis and explicit token handling
24
  image_model = DiffusionPipeline.from_pretrained(
25
  "black-forest-labs/FLUX.1-dev",
26
  torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
@@ -28,7 +19,7 @@ image_model = DiffusionPipeline.from_pretrained(
28
  )
29
  image_model.enable_model_cpu_offload()
30
 
31
- # Define data models for Hugging Face
32
  class FileDataDict(BaseModel):
33
  path: str
34
  url: Optional[str] = None
@@ -36,7 +27,6 @@ class FileDataDict(BaseModel):
36
  orig_name: Optional[str] = None
37
  mime_type: Optional[str] = None
38
  is_stream: Optional[bool] = False
39
-
40
  class Config:
41
  arbitrary_types_allowed = True
42
 
@@ -45,7 +35,6 @@ class MessageDict(BaseModel):
45
  role: Literal["user", "assistant", "system"]
46
  metadata: Optional[dict] = None
47
  options: Optional[List[dict]] = None
48
-
49
  class Config:
50
  arbitrary_types_allowed = True
51
 
@@ -54,7 +43,6 @@ class ChatMessage(GradioModel):
54
  content: Union[str, FileData, Component]
55
  metadata: dict = Field(default_factory=dict)
56
  options: Optional[List[dict]] = None
57
-
58
  class Config:
59
  arbitrary_types_allowed = True
60
 
@@ -65,38 +53,35 @@ class ChatbotDataMessages(GradioRootModel):
65
  class UniversalReasoning:
66
  def __init__(self, config):
67
  self.config = config
68
- self.sentiment_analyzer = pipeline("sentiment-analysis") # Hugging Face sentiment analysis
69
- self.context_history = [] # Maintain context history
70
 
71
- # Load models with explicit truncation
72
  self.deepseek_model = pipeline(
73
- "text-classification",
74
  model="distilbert-base-uncased-finetuned-sst-2-english",
75
  truncation=True
76
- ) # Updated model
77
 
78
  self.davinci_model = pipeline(
79
- "text2text-generation",
80
  model="t5-small",
81
  truncation=True
82
- ) # Replacing text-davinci with T5
83
 
84
  self.additional_model = pipeline(
85
- "text-generation",
86
  model="EleutherAI/gpt-neo-125M",
87
  truncation=True
88
- ) # Example GPT-Neo model
89
 
90
- # Use earlier-defined image model
91
  self.image_model = image_model
92
 
93
  async def generate_response(self, question: str) -> str:
94
- self.context_history.append(question) # Add question to context history
95
  sentiment_score = self.analyze_sentiment(question)
96
-
97
  deepseek_response = self.deepseek_model(question)
98
- davinci_response = self.davinci_model(question, max_length=50, truncation=True)
99
- additional_response = self.additional_model(question, max_length=100, truncation=True)
100
 
101
  responses = [
102
  f"Sentiment score: {sentiment_score}",
@@ -104,7 +89,6 @@ class UniversalReasoning:
104
  f"T5 Response: {davinci_response}",
105
  f"Additional Model Response: {additional_response}"
106
  ]
107
-
108
  return "\n\n".join(responses)
109
 
110
  def generate_image(self, prompt: str):
@@ -115,17 +99,17 @@ class UniversalReasoning:
115
  guidance_scale=3.5,
116
  num_inference_steps=50,
117
  max_sequence_length=512,
118
- generator=torch.Generator("cpu").manual_seed(0)
119
  ).images[0]
120
  image.save("flux-dev.png")
121
  return image
122
 
123
  def analyze_sentiment(self, text: str) -> list:
124
- sentiment_score = self.sentiment_analyzer(text) # Returns a list of dictionaries
125
  logging.info(f"Sentiment analysis result: {sentiment_score}")
126
  return sentiment_score
127
 
128
- # Main Multimodal Chatbot Component
129
  class MultimodalChatbot(Component):
130
  def __init__(
131
  self,
@@ -134,7 +118,6 @@ class MultimodalChatbot(Component):
134
  render: bool = True,
135
  log_file: Optional[Path] = None,
136
  ):
137
- # Ensure value is initialized as an empty list if None
138
  value = value or []
139
  super().__init__(label=label, value=value)
140
  self.log_file = log_file
@@ -143,20 +126,15 @@ class MultimodalChatbot(Component):
143
  self.universal_reasoning = UniversalReasoning({})
144
 
145
  def preprocess(self, payload: Optional[ChatbotDataMessages]) -> List[MessageDict]:
146
- # Handle None payload gracefully
147
- if payload is None:
148
- return []
149
- return payload.root
150
 
151
  def postprocess(self, messages: Optional[List[MessageDict]]) -> ChatbotDataMessages:
152
- # Ensure messages is a valid list
153
  messages = messages or []
154
  return ChatbotDataMessages(root=messages)
155
 
156
- # Hugging Face Integration Class
157
  class HuggingFaceChatbot:
158
  def __init__(self):
159
- # Initialize MultimodalChatbot with a default empty list
160
  self.chatbot = MultimodalChatbot(value=[])
161
 
162
  def setup_interface(self):
@@ -186,7 +164,7 @@ class HuggingFaceChatbot:
186
  interface = self.setup_interface()
187
  interface.launch()
188
 
189
- # If running as standalone
190
  if __name__ == "__main__":
191
  logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
192
  chatbot = HuggingFaceChatbot()
 
1
  import os
2
  import asyncio
 
 
 
 
3
  import logging
4
+ from typing import Optional, List, Union, Literal
5
  from pathlib import Path
 
 
6
  from pydantic import BaseModel, Field
7
+ from gradio import Interface, Blocks, Component
 
8
  from gradio.data_classes import FileData, GradioModel, GradioRootModel
 
 
 
9
  from transformers import pipeline
10
  from diffusers import DiffusionPipeline
11
  import torch
12
  import gradio as gr
13
 
14
+ # Load gated image model
15
  image_model = DiffusionPipeline.from_pretrained(
16
  "black-forest-labs/FLUX.1-dev",
17
  torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
 
19
  )
20
  image_model.enable_model_cpu_offload()
21
 
22
+ # Define data models
23
  class FileDataDict(BaseModel):
24
  path: str
25
  url: Optional[str] = None
 
27
  orig_name: Optional[str] = None
28
  mime_type: Optional[str] = None
29
  is_stream: Optional[bool] = False
 
30
  class Config:
31
  arbitrary_types_allowed = True
32
 
 
35
  role: Literal["user", "assistant", "system"]
36
  metadata: Optional[dict] = None
37
  options: Optional[List[dict]] = None
 
38
  class Config:
39
  arbitrary_types_allowed = True
40
 
 
43
  content: Union[str, FileData, Component]
44
  metadata: dict = Field(default_factory=dict)
45
  options: Optional[List[dict]] = None
 
46
  class Config:
47
  arbitrary_types_allowed = True
48
 
 
53
  class UniversalReasoning:
54
  def __init__(self, config):
55
  self.config = config
56
+ self.context_history = []
57
+ self.sentiment_analyzer = pipeline("sentiment-analysis")
58
 
 
59
  self.deepseek_model = pipeline(
60
+ "text-classification",
61
  model="distilbert-base-uncased-finetuned-sst-2-english",
62
  truncation=True
63
+ )
64
 
65
  self.davinci_model = pipeline(
66
+ "text2text-generation",
67
  model="t5-small",
68
  truncation=True
69
+ )
70
 
71
  self.additional_model = pipeline(
72
+ "text-generation",
73
  model="EleutherAI/gpt-neo-125M",
74
  truncation=True
75
+ )
76
 
 
77
  self.image_model = image_model
78
 
79
  async def generate_response(self, question: str) -> str:
80
+ self.context_history.append(question)
81
  sentiment_score = self.analyze_sentiment(question)
 
82
  deepseek_response = self.deepseek_model(question)
83
+ davinci_response = self.davinci_model(question, max_length=50)
84
+ additional_response = self.additional_model(question, max_length=100)
85
 
86
  responses = [
87
  f"Sentiment score: {sentiment_score}",
 
89
  f"T5 Response: {davinci_response}",
90
  f"Additional Model Response: {additional_response}"
91
  ]
 
92
  return "\n\n".join(responses)
93
 
94
  def generate_image(self, prompt: str):
 
99
  guidance_scale=3.5,
100
  num_inference_steps=50,
101
  max_sequence_length=512,
102
+ generator=torch.Generator('cpu').manual_seed(0)
103
  ).images[0]
104
  image.save("flux-dev.png")
105
  return image
106
 
107
  def analyze_sentiment(self, text: str) -> list:
108
+ sentiment_score = self.sentiment_analyzer(text)
109
  logging.info(f"Sentiment analysis result: {sentiment_score}")
110
  return sentiment_score
111
 
112
+ # Main Component
113
  class MultimodalChatbot(Component):
114
  def __init__(
115
  self,
 
118
  render: bool = True,
119
  log_file: Optional[Path] = None,
120
  ):
 
121
  value = value or []
122
  super().__init__(label=label, value=value)
123
  self.log_file = log_file
 
126
  self.universal_reasoning = UniversalReasoning({})
127
 
128
  def preprocess(self, payload: Optional[ChatbotDataMessages]) -> List[MessageDict]:
129
+ return payload.root if payload else []
 
 
 
130
 
131
  def postprocess(self, messages: Optional[List[MessageDict]]) -> ChatbotDataMessages:
 
132
  messages = messages or []
133
  return ChatbotDataMessages(root=messages)
134
 
135
+ # Gradio Interface
136
  class HuggingFaceChatbot:
137
  def __init__(self):
 
138
  self.chatbot = MultimodalChatbot(value=[])
139
 
140
  def setup_interface(self):
 
164
  interface = self.setup_interface()
165
  interface.launch()
166
 
167
+ # Standalone launch
168
  if __name__ == "__main__":
169
  logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
170
  chatbot = HuggingFaceChatbot()