File size: 4,174 Bytes
bb2b3ad 8bb1e71 22e89ad 8bb1e71 bb2b3ad 2952936 22e89ad 2952936 22e89ad 2952936 22e89ad f5c45a3 22e89ad fe24486 22e89ad 8bb1e71 2952936 fe24486 2952936 fe24486 22e89ad 8bb1e71 bb2b3ad 8bb1e71 22e89ad bb2b3ad 2952936 8bb1e71 2952936 bb2b3ad 22e89ad 2952936 22e89ad 8bb1e71 22e89ad 2952936 bb2b3ad |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 |
import gradio as gr
from src.agents.text_agent import TextAgent
from src.agents.image_agent import ImageAgent
from src.agents.coordinator_agent import CoordinatorAgent
from PIL import Image
async def process_content(text: str = None, image: Image.Image = None):
text_result = None
image_result = None
# Process text if provided
if text and text.strip():
try:
text_agent = TextAgent(confidence_threshold=0.8)
text_result = await text_agent.process(text)
except Exception as e:
return f"Error processing text: {str(e)}"
# Process image if provided
if image is not None:
try:
image_agent = ImageAgent(confidence_threshold=0.8)
image_result = await image_agent.process(image)
except Exception as e:
return f"Error processing image: {str(e)}"
# Coordinate results
try:
coordinator = CoordinatorAgent()
#final_analysis = await coordinator.analyze_content(text_result, image_result)
final_analysis = await coordinator.process({
'text_result': text_result,
'image_result': image_result
})
# Format the output
output_parts = []
# Add original results if available
if text_result:
output_parts.append(f"""
Text Analysis Results:
---------------------
Text: {text_result['text']}
Model Used: {text_result['model_used']}
Label: {text_result['label']}
Confidence: {text_result['confidence']:.2%}
""")
# if image_result:
# output_parts.append(f"""
# Image Analysis Results:
# ----------------------
# Caption: {image_result['caption']}
# Model Used: {image_result['model_used']}
# Confidence: {image_result['confidence']:.2%}
# """)
if image_result:
output_parts.append(f"""
Image Analysis Results:
----------------------
Caption: {image_result['caption']}
Model Used: {image_result['model_used']}
Safety: {"Safe" if image_result['is_safe'] else "Potentially Unsafe"}
Confidence: {image_result['confidence']:.2%}
""")
# Add correlation analysis if both text and image were processed
if final_analysis["correlation"]:
output_parts.append(f"""
Correlation Analysis:
-------------------
Correlation Level: {final_analysis["correlation"]["level"]}
Correlation Score: {final_analysis["correlation"]["score"]:.2f}
""")
# Add final analysis
output_parts.append("""
Combined Analysis:
----------------""")
for analysis_point in final_analysis["analysis"]:
output_parts.append(f"- {analysis_point}")
return "\n".join(output_parts)
except Exception as e:
return f"Error in coordination: {str(e)}"
def create_interface():
with gr.Blocks() as demo:
gr.Markdown("# Content Analysis System")
gr.Markdown("Upload an image and/or enter text for analysis. The system will analyze both individually and provide a combined analysis.")
with gr.Row():
# Left column for text
with gr.Column():
text_input = gr.Textbox(
label="Enter text (max 50 words)",
placeholder="Type your text here...",
max_lines=3
)
# Right column for image
with gr.Column():
image_input = gr.Image(
type="pil",
label="Upload Image"
)
# Submit button
submit_btn = gr.Button("Analyze Content")
# Output area with increased height for detailed analysis
output = gr.Textbox(
label="Analysis Results",
lines=10
)
# Click handler
submit_btn.click(
fn=process_content,
inputs=[text_input, image_input],
outputs=output
)
return demo
# Create and launch the interface
demo = create_interface()
# For HuggingFace Spaces deployment
if __name__ == "__main__":
demo.launch()
|