File size: 5,580 Bytes
628ef1e
 
 
 
 
 
 
 
 
cbbec30
925d48c
628ef1e
925d48c
628ef1e
 
 
925d48c
cbbec30
925d48c
 
 
cbbec30
925d48c
cbbec30
628ef1e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cbbec30
628ef1e
 
 
 
cbbec30
628ef1e
 
 
cbbec30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
import sys
import json
from hugchat import hugchat
from hugchat.login import Login
import os
import re
import torch
from transformers import pipeline
import librosa
import gradio as gr
import requests

# Directory to store/load cookies
cookie_path_dir = "./cookies/"
os.makedirs(cookie_path_dir, exist_ok=True)

# Load pre-saved cookies instead of logging in
try:
    print("Attempting to load cookies from:", cookie_path_dir)
    chatbot = hugchat.ChatBot(cookie_path_dir=cookie_path_dir)
    print("Cookies loaded successfully.")
except Exception as e:
    print(f"Failed to load cookies: {str(e)}")
    sys.exit(1)

# Model and device configuration for Whisper transcription
MODEL_NAME = "openai/whisper-large-v3-turbo"
device = 0 if torch.cuda.is_available() else "cpu"

pipe = pipeline(
    task="automatic-speech-recognition",
    model=MODEL_NAME,
    chunk_length_s=30,
    device=device,
)

def transcribe_audio(audio_path):
    try:
        audio, sr = librosa.load(audio_path, sr=16000, mono=True)
        transcription = pipe(audio, batch_size=8, generate_kwargs={"language": "urdu"})["text"]
        return transcription
    except Exception as e:
        return f"Error processing audio: {e}"

def extract_metadata(file_name):
    base = file_name.split(".")[0]
    parts = base.split("_")
    if len(parts) >= 3:
        return {
            "agent_username": parts[0],
            "location": parts[-2]
        }
    return {"agent_username": "Unknown", "location": "Unknown"}

def process_audio(audio, file_name):
    urdu_text = transcribe_audio(audio)
    if "Error" in urdu_text:
        return json.dumps({"error": urdu_text})

    metadata = extract_metadata(file_name)
    location = metadata["location"]

    english_text = chatbot.chat(
        f"The following Urdu text is about crops and their diseases, but it may contain errors or misheard words due to audio transcription issues. Please use context to infer the most likely correct crop names and disease terms, and then translate the text to English:\n\n{urdu_text}"
    ).wait_until_done()

    extraction_prompt = f"""
    Below is an English text about specific crops and possible diseases/pests:

    {english_text}

    Identify each specific Crop (like wheat, rice, cotton, etc.) mentioned and list any Diseases or Pests affecting that crop.

    - If a disease or pest is mentioned without specifying a particular crop, list it under "No crop:".
    - If a crop is mentioned but no diseases or pests are specified for it, include it with an empty diseases list.
    - Do not include general terms like "crops" as a specific crop name.

    Format your answer in this style (one entry at a time):

    For specific crops with diseases:
    1. CropName:
    Diseases:
    - DiseaseName
    - AnotherDisease

    For specific crops with no diseases:
    2. NextCrop:
    Diseases:

    For standalone diseases:
    3. No crop:
    Diseases:
    - StandaloneDisease

    No extra text, just the structured bullet list.
    """
    extraction_response = chatbot.chat(extraction_prompt).wait_until_done()

    lines = extraction_response.splitlines()
    crops_and_diseases = []
    current_crop = None
    current_diseases = []

    for line in lines:
        line = line.strip()
        if not line:
            continue
        match_crop = re.match(r'^(\d+)\.\s*(.+?):$', line)
        if match_crop:
            if current_crop is not None or current_diseases:
                crops_and_diseases.append({
                    "crop": current_crop,
                    "diseases": current_diseases
                })
            crop_name = match_crop.group(2).strip()
            if crop_name.lower() in ["no crop", "crops", "general crops"]:
                current_crop = None
            else:
                current_crop = crop_name
            current_diseases = []
            continue
        if line.lower().startswith("diseases:"):
            continue
        if line.startswith('-'):
            disease_name = line.lstrip('-').strip()
            if disease_name:
                current_diseases.append(disease_name)

    if current_crop is not None or current_diseases:
        crops_and_diseases.append({
            "crop": current_crop,
            "diseases": current_diseases
        })

    temp_prompt = f"Give me weather of {location} in Celsius numeric only."
    temperature_response = chatbot.chat(temp_prompt).wait_until_done()

    temperature = None
    temp_match = re.search(r'(\d+)', temperature_response)
    if temp_match:
        temperature = int(temp_match.group(1))

    output = {
        "urdu_text": urdu_text,
        "english_text": english_text,
        "crops_and_diseases": crops_and_diseases,
        "temperature": temperature,
        "location": location
    }

    return json.dumps(output)

with gr.Blocks(title="Audio to Crop Disease API") as interface:
    gr.Markdown("## Upload Audio to Get Urdu Transcription, English Translation, and Crop Diseases")
    
    with gr.Row():
        audio_input = gr.Audio(type="filepath", label="Upload Audio File (Urdu)")
        file_name_input = gr.Textbox(label="File Name for Metadata (e.g., agent2_5_Multan_Pakistan.mp3)", placeholder="Enter file name")
    
    with gr.Row():
        output_json = gr.JSON(label="Output (Urdu, English, Crops with Diseases)")

    process_button = gr.Button("Process Audio")

    process_button.click(
        fn=process_audio,
        inputs=[audio_input, file_name_input],
        outputs=[output_json],
    )

if __name__ == "__main__":
    interface.launch()