File size: 4,147 Bytes
3a33882
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f6bd327
 
 
 
3a33882
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8d01df6
3a33882
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7541b65
3a33882
 
7541b65
 
 
3a33882
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import gradio as gr
from azure.ai.inference import ChatCompletionsClient
from azure.ai.inference.models import (
    SystemMessage,
    UserMessage,
    TextContentItem,
    ImageContentItem,
    ImageUrl,
    ImageDetailLevel,
)
from azure.core.credentials import AzureKeyCredential
from gtts import gTTS
from deep_translator import GoogleTranslator
import os

# βœ… Securely load Azure credentials from environment
# Azure API credentials
token = "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"
endpoint = "https://models.inference.ai.azure.com"
model_name = "gpt-4o" # Optional: use secret or default to gpt-4o

# βœ… Validate credentials
if not (isinstance(token, str) and token.strip()) or not (isinstance(endpoint, str) and endpoint.strip()):
    raise ValueError("Azure API credentials are missing. Please set AZURE_API_KEY and AZURE_ENDPOINT in Hugging Face secrets.")

# βœ… Azure Client
client = ChatCompletionsClient(
    endpoint=endpoint,
    credential=AzureKeyCredential(token),
)

# πŸ” Analyze disease
def analyze_leaf_disease(image_path, leaf_type):
    try:
        response = client.complete(
            messages=[
                SystemMessage(
                    content=f"You are a subject matter expert that describes leaf disease in detail for {leaf_type} leaves."
                ),
                UserMessage(
                    content=[
                        TextContentItem(text="What's the name of the leaf disease in this image? what is the confidence Score only?. Explain what is the probable reason? Briefly explain what are the medicine or steps to prevent the disease"),
                        ImageContentItem(
                            image_url=ImageUrl.load(
                                image_file=image_path,
                                image_format="jpg",
                                detail=ImageDetailLevel.LOW,
                            )
                        ),
                    ],
                ),
            ],
            model=model_name,
        )
        return response.choices[0].message.content
    except Exception as e:
        return f"❌ Error: {e}"

# 🌐 Translate to Bangla
def translate_to_bangla(text):
    try:
        return GoogleTranslator(source="auto", target="bn").translate(text)
    except Exception as e:
        return f"❌ Translation error: {e}"

# πŸ”Š Text to Speech
def text_to_speech(text):
    try:
        tts = gTTS(text)
        audio_file = "tts_output.mp3"
        tts.save(audio_file)
        return audio_file
    except Exception as e:
        return f"❌ TTS error: {e}"

# πŸš€ Main Action
def handle_proceed(image_path, leaf_type):
    return "", analyze_leaf_disease(image_path, leaf_type)

# 🌿 Gradio App
with gr.Blocks() as interface:
    gr.Markdown("# πŸƒ Leaf Disease Detector\nUpload an image, select the leaf type, and analyze the disease. Listen or translate the result.")

    with gr.Row():
        image_input = gr.Image(type="filepath", label="πŸ“Έ Upload Leaf Image")
        leaf_type = gr.Dropdown(
            choices=["Tomato", "Tobacco", "Corn", "Paddy", "Maze", "Potato", "Wheat"],
            label="🌿 Select Leaf Type",
        )
        proceed_button = gr.Button("πŸ” Analyze")

    with gr.Row():
        detecting_label = gr.Label("Detecting...", visible=False)
        output_box = gr.Textbox(label="πŸ“‹ Result", placeholder="Analysis will appear here", lines=10)

    with gr.Row():
        tts_button = gr.Button("πŸ”Š Read Aloud")
        

        translate_button = gr.Button("🌐 Translate to Bangla")
        
    with gr.Row():
        tts_audio = gr.Audio(label="🎧 Audio", autoplay=True)
        translated_output = gr.Textbox(label="πŸ“˜ Bangla Translation", placeholder="Translation will appear here", lines=10)
    # Button logic
    proceed_button.click(handle_proceed, inputs=[image_input, leaf_type], outputs=[detecting_label, output_box])
    tts_button.click(text_to_speech, inputs=[output_box], outputs=[tts_audio])
    translate_button.click(translate_to_bangla, inputs=[output_box], outputs=[translated_output])

if __name__ == "__main__":
    interface.launch()