testing / app.py
maliahson's picture
Update app.py
cbbec30 verified
raw
history blame
6.64 kB
import sys
import json
from hugchat import hugchat
from hugchat.login import Login
import os
import re
import torch
from transformers import pipeline
import librosa
import gradio as gr
import requests # Added for debugging network
# HugChat login credentials from environment variables (secrets)
EMAIL = os.environ.get("Email")
PASSWD = os.environ.get("Password")
# Debug: Print credentials to verify they're being read
print("EMAIL from env:", EMAIL)
print("PASSWORD from env:", PASSWD)
# Directory to store cookies
cookie_path_dir = "./cookies/"
os.makedirs(cookie_path_dir, exist_ok=True)
# Test network connectivity to Hugging Face
try:
response = requests.get("https://huggingface.co/login", timeout=10)
print("Network test: Successfully reached https://huggingface.co/login, status code:", response.status_code)
except Exception as e:
print("Network test failed:", str(e))
# Login to HugChat with detailed error handling
try:
sign = Login(EMAIL, PASSWD)
print("Attempting login with hugchat...")
cookies = sign.login(cookie_dir_path=cookie_path_dir, save_cookies=True)
print("Login successful, cookies obtained.")
chatbot = hugchat.ChatBot(cookies=cookies.get_dict())
except Exception as e:
print(f"Login failed with error: {str(e)}")
print("Full traceback:")
import traceback
traceback.print_exc()
sys.exit(1)
# Model and device configuration for Whisper transcription
MODEL_NAME = "openai/whisper-large-v3-turbo"
device = 0 if torch.cuda.is_available() else "cpu"
# Initialize Whisper pipeline
pipe = pipeline(
task="automatic-speech-recognition",
model=MODEL_NAME,
chunk_length_s=30,
device=device,
)
def transcribe_audio(audio_path):
"""
Transcribe a local audio file using the Whisper pipeline.
"""
try:
audio, sr = librosa.load(audio_path, sr=16000, mono=True)
transcription = pipe(audio, batch_size=8, generate_kwargs={"language": "urdu"})["text"]
return transcription
except Exception as e:
return f"Error processing audio: {e}"
def extract_metadata(file_name):
"""
Extract metadata from the file name.
"""
base = file_name.split(".")[0]
parts = base.split("_")
if len(parts) >= 3:
return {
"agent_username": parts[0],
"location": parts[-2]
}
return {"agent_username": "Unknown", "location": "Unknown"}
def process_audio(audio, file_name):
"""
Process the audio file and return Urdu transcription, English translation, and crops with diseases.
"""
urdu_text = transcribe_audio(audio)
if "Error" in urdu_text:
return json.dumps({"error": urdu_text})
metadata = extract_metadata(file_name)
location = metadata["location"]
english_text = chatbot.chat(
f"The following Urdu text is about crops and their diseases, but it may contain errors or misheard words due to audio transcription issues. Please use context to infer the most likely correct crop names and disease terms, and then translate the text to English:\n\n{urdu_text}"
).wait_until_done()
extraction_prompt = f"""
Below is an English text about specific crops and possible diseases/pests:
{english_text}
Identify each specific Crop (like wheat, rice, cotton, etc.) mentioned and list any Diseases or Pests affecting that crop.
- If a disease or pest is mentioned without specifying a particular crop, list it under "No crop:".
- If a crop is mentioned but no diseases or pests are specified for it, include it with an empty diseases list.
- Do not include general terms like "crops" as a specific crop name.
Format your answer in this style (one entry at a time):
For specific crops with diseases:
1. CropName:
Diseases:
- DiseaseName
- AnotherDisease
For specific crops with no diseases:
2. NextCrop:
Diseases:
For standalone diseases:
3. No crop:
Diseases:
- StandaloneDisease
No extra text, just the structured bullet list.
"""
extraction_response = chatbot.chat(extraction_prompt).wait_until_done()
lines = extraction_response.splitlines()
crops_and_diseases = []
current_crop = None
current_diseases = []
for line in lines:
line = line.strip()
if not line:
continue
match_crop = re.match(r'^(\d+)\.\s*(.+?):$', line)
if match_crop:
if current_crop is not None or current_diseases:
crops_and_diseases.append({
"crop": current_crop,
"diseases": current_diseases
})
crop_name = match_crop.group(2).strip()
if crop_name.lower() in ["no crop", "crops", "general crops"]:
current_crop = None
else:
current_crop = crop_name
current_diseases = []
continue
if line.lower().startswith("diseases:"):
continue
if line.startswith('-'):
disease_name = line.lstrip('-').strip()
if disease_name:
current_diseases.append(disease_name)
if current_crop is not None or current_diseases:
crops_and_diseases.append({
"crop": current_crop,
"diseases": current_diseases
})
temp_prompt = f"Give me weather of {location} in Celsius numeric only."
temperature_response = chatbot.chat(temp_prompt).wait_until_done()
temperature = None
temp_match = re.search(r'(\d+)', temperature_response)
if temp_match:
temperature = int(temp_match.group(1))
output = {
"urdu_text": urdu_text,
"english_text": english_text,
"crops_and_diseases": crops_and_diseases,
"temperature": temperature,
"location": location
}
return json.dumps(output)
# Gradio Interface
with gr.Blocks(title="Audio to Crop Disease API") as interface:
gr.Markdown("## Upload Audio to Get Urdu Transcription, English Translation, and Crop Diseases")
with gr.Row():
audio_input = gr.Audio(type="filepath", label="Upload Audio File (Urdu)")
file_name_input = gr.Textbox(label="File Name for Metadata (e.g., agent2_5_Multan_Pakistan.mp3)", placeholder="Enter file name")
with gr.Row():
output_json = gr.JSON(label="Output (Urdu, English, Crops with Diseases)")
process_button = gr.Button("Process Audio")
process_button.click(
fn=process_audio,
inputs=[audio_input, file_name_input],
outputs=[output_json],
)
if __name__ == "__main__":
interface.launch()