maliahson commited on
Commit
24d8ae3
·
verified ·
1 Parent(s): b8a67e0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -148
app.py CHANGED
@@ -1,47 +1,17 @@
1
- import sys
2
- import json
3
- from hugchat import hugchat
4
- from hugchat.login import Login
5
- import os
6
- import re
7
  import torch
8
  from transformers import pipeline
9
  import librosa
 
 
 
 
10
  import gradio as gr
11
 
12
- # Directory to store/load cookies
13
- cookie_path_dir = "./cookies/"
14
- cookie_file_path = os.path.join(cookie_path_dir, "cookies_snapshot.json") # Default file name used by hugchat
15
-
16
- # Load pre-saved cookies
17
- try:
18
- print("Attempting to load cookies from:", cookie_file_path)
19
- if not os.path.exists(cookie_file_path):
20
- # If cookies don't exist, attempt to generate them (for local testing; remove in Spaces)
21
- EMAIL = os.environ.get("EMAIL", "[email protected]") # Fallback for local testing
22
- PASSWD = os.environ.get("PASSWORD", "e.AKsv$3Q4i4KcX") # Fallback for local testing
23
- os.makedirs(cookie_path_dir, exist_ok=True)
24
- sign = Login(EMAIL, PASSWD)
25
- cookies = sign.login(cookie_dir_path=cookie_path_dir, save_cookies=True)
26
- print("Generated new cookies since none were found.")
27
- else:
28
- # Load existing cookies
29
- with open(cookie_file_path, "r") as f:
30
- cookies = json.load(f) # Load the cookie dictionary
31
- print("Cookies loaded from file.")
32
-
33
- chatbot = hugchat.ChatBot(cookies=cookies) # Pass cookies directly
34
- print("ChatBot initialized successfully.")
35
- except Exception as e:
36
- print(f"Failed to initialize ChatBot: {str(e)}")
37
- import traceback
38
- traceback.print_exc()
39
- sys.exit(1)
40
-
41
- # Model and device configuration for Whisper transcription
42
  MODEL_NAME = "openai/whisper-large-v3-turbo"
43
  device = 0 if torch.cuda.is_available() else "cpu"
44
 
 
45
  pipe = pipeline(
46
  task="automatic-speech-recognition",
47
  model=MODEL_NAME,
@@ -49,139 +19,122 @@ pipe = pipeline(
49
  device=device,
50
  )
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  def transcribe_audio(audio_path):
 
 
 
53
  try:
54
- audio, sr = librosa.load(audio_path, sr=16000, mono=True)
55
- transcription = pipe(audio, batch_size=8, generate_kwargs={"language": "urdu"})["text"]
56
- return transcription
57
- except Exception as e:
58
- return f"Error processing audio: {e}"
59
-
60
- def extract_metadata(file_name):
61
- base = file_name.split(".")[0]
62
- parts = base.split("_")
63
- if len(parts) >= 3:
64
- return {
65
- "agent_username": parts[0],
66
- "location": parts[-2]
67
- }
68
- return {"agent_username": "Unknown", "location": "Unknown"}
69
 
70
- def process_audio(audio, file_name):
71
- urdu_text = transcribe_audio(audio)
72
- if "Error" in urdu_text:
73
- return json.dumps({"error": urdu_text})
74
-
75
- metadata = extract_metadata(file_name)
76
- location = metadata["location"]
77
 
78
- english_text = chatbot.chat(
79
- f"The following Urdu text is about crops and their diseases, but it may contain errors or misheard words due to audio transcription issues. Please use context to infer the most likely correct crop names and disease terms, and then translate the text to English:\n\n{urdu_text}"
80
- ).wait_until_done()
81
 
82
- extraction_prompt = f"""
83
- Below is an English text about specific crops and possible diseases/pests:
84
 
85
- {english_text}
86
 
87
- Identify each specific Crop (like wheat, rice, cotton, etc.) mentioned and list any Diseases or Pests affecting that crop.
 
88
 
89
- - If a disease or pest is mentioned without specifying a particular crop, list it under "No crop:".
90
- - If a crop is mentioned but no diseases or pests are specified for it, include it with an empty diseases list.
91
- - Do not include general terms like "crops" as a specific crop name.
 
 
 
 
 
 
 
92
 
93
- Format your answer in this style (one entry at a time):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
- For specific crops with diseases:
96
- 1. CropName:
97
- Diseases:
98
- - DiseaseName
99
- - AnotherDisease
 
 
100
 
101
- For specific crops with no diseases:
102
- 2. NextCrop:
103
- Diseases:
104
 
105
- For standalone diseases:
106
- 3. No crop:
107
- Diseases:
108
- - StandaloneDisease
109
 
110
- No extra text, just the structured bullet list.
111
- """
112
- extraction_response = chatbot.chat(extraction_prompt).wait_until_done()
113
-
114
- lines = extraction_response.splitlines()
115
- crops_and_diseases = []
116
- current_crop = None
117
- current_diseases = []
118
-
119
- for line in lines:
120
- line = line.strip()
121
- if not line:
122
- continue
123
- match_crop = re.match(r'^(\d+)\.\s*(.+?):$', line)
124
- if match_crop:
125
- if current_crop is not None or current_diseases:
126
- crops_and_diseases.append({
127
- "crop": current_crop,
128
- "diseases": current_diseases
129
- })
130
- crop_name = match_crop.group(2).strip()
131
- if crop_name.lower() in ["no crop", "crops", "general crops"]:
132
- current_crop = None
133
- else:
134
- current_crop = crop_name
135
- current_diseases = []
136
- continue
137
- if line.lower().startswith("diseases:"):
138
- continue
139
- if line.startswith('-'):
140
- disease_name = line.lstrip('-').strip()
141
- if disease_name:
142
- current_diseases.append(disease_name)
143
-
144
- if current_crop is not None or current_diseases:
145
- crops_and_diseases.append({
146
- "crop": current_crop,
147
- "diseases": current_diseases
148
- })
149
-
150
- temp_prompt = f"Give me weather of {location} in Celsius numeric only."
151
- temperature_response = chatbot.chat(temp_prompt).wait_until_done()
152
-
153
- temperature = None
154
- temp_match = re.search(r'(\d+)', temperature_response)
155
- if temp_match:
156
- temperature = int(temp_match.group(1))
157
-
158
- output = {
159
- "urdu_text": urdu_text,
160
- "english_text": english_text,
161
- "crops_and_diseases": crops_and_diseases,
162
- "temperature": temperature,
163
- "location": location
164
- }
165
-
166
- return json.dumps(output)
167
-
168
- with gr.Blocks(title="Audio to Crop Disease API") as interface:
169
- gr.Markdown("## Upload Audio to Get Urdu Transcription, English Translation, and Crop Diseases")
170
 
171
  with gr.Row():
172
- audio_input = gr.Audio(type="filepath", label="Upload Audio File (Urdu)")
173
- file_name_input = gr.Textbox(label="File Name for Metadata (e.g., agent2_5_Multan_Pakistan.mp3)", placeholder="Enter file name")
174
 
175
  with gr.Row():
176
- output_json = gr.JSON(label="Output (Urdu, English, Crops with Diseases)")
177
 
178
  process_button = gr.Button("Process Audio")
179
 
180
  process_button.click(
181
- fn=process_audio,
182
- inputs=[audio_input, file_name_input],
183
- outputs=[output_json],
184
  )
185
 
186
  if __name__ == "__main__":
187
- interface.launch()
 
 
 
 
 
 
 
1
  import torch
2
  from transformers import pipeline
3
  import librosa
4
+ from datetime import datetime
5
+ from deep_translator import GoogleTranslator
6
+ from typing import Dict, Union
7
+ from gliner import GLiNER
8
  import gradio as gr
9
 
10
+ # Model and device configuration for transcription
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  MODEL_NAME = "openai/whisper-large-v3-turbo"
12
  device = 0 if torch.cuda.is_available() else "cpu"
13
 
14
+ # Initialize Whisper pipeline
15
  pipe = pipeline(
16
  task="automatic-speech-recognition",
17
  model=MODEL_NAME,
 
19
  device=device,
20
  )
21
 
22
+ # Initialize GLiNER for information extraction
23
+ gliner_model = GLiNER.from_pretrained("xomad/gliner-model-merge-large-v1.0").to("cpu")
24
+
25
+ def merge_entities(entities):
26
+ if not entities:
27
+ return []
28
+ merged = []
29
+ current = entities[0]
30
+ for next_entity in entities[1:]:
31
+ if next_entity['entity'] == current['entity'] and (next_entity['start'] == current['end'] + 1 or next_entity['start'] == current['end']):
32
+ current['word'] += ' ' + next_entity['word']
33
+ current['end'] = next_entity['end']
34
+ else:
35
+ merged.append(current)
36
+ current = next_entity
37
+ merged.append(current)
38
+ return merged
39
+
40
  def transcribe_audio(audio_path):
41
+ """
42
+ Transcribe a local audio file using the Whisper pipeline, log timing, and save transcription to a file.
43
+ """
44
  try:
45
+ # Log start time
46
+ start_time = datetime.now()
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
+ # Ensure audio is mono and resampled to 16kHz
49
+ audio, sr = librosa.load(audio_path, sr=16000, mono=True)
 
 
 
 
 
50
 
51
+ # Perform transcription
52
+ transcription = pipe(audio, batch_size=8, generate_kwargs={"language": "urdu"})["text"]
 
53
 
54
+ # Log end time
55
+ end_time = datetime.now()
56
 
57
+ return transcription
58
 
59
+ except Exception as e:
60
+ return f"Error processing audio: {e}"
61
 
62
+ def translate_text_to_english(text):
63
+ """
64
+ Translate text into English using GoogleTranslator.
65
+ """
66
+ try:
67
+ # Perform translation
68
+ translated_text = GoogleTranslator(source='auto', target='en').translate(text)
69
+ return translated_text
70
+ except Exception as e:
71
+ return f"Error during translation: {e}"
72
 
73
+ def extract_information(prompt: str, text: str, threshold: float, nested_ner: bool) -> Dict[str, Union[str, int, float]]:
74
+ """
75
+ Extract entities from the English text using GLiNER model.
76
+ """
77
+ try:
78
+ text = prompt + "\n" + text
79
+ entities = [
80
+ {
81
+ "entity": entity["label"],
82
+ "word": entity["text"],
83
+ "start": entity["start"],
84
+ "end": entity["end"],
85
+ "score": 0,
86
+ }
87
+ for entity in gliner_model.predict_entities(
88
+ text, ["match"], flat_ner=not nested_ner, threshold=threshold
89
+ )
90
+ ]
91
+ merged_entities = merge_entities(entities)
92
+ return {"text": text, "entities": merged_entities}
93
+ except Exception as e:
94
+ return {"error": f"Information extraction failed: {e}"}
95
 
96
+ def pipeline_fn(audio, prompt, threshold, nested_ner):
97
+ """
98
+ Combine transcription, translation, and information extraction in a single pipeline.
99
+ """
100
+ transcription = transcribe_audio(audio)
101
+ if "Error" in transcription:
102
+ return transcription, "", "", {}
103
 
104
+ translated_text = translate_text_to_english(transcription)
105
+ if "Error" in translated_text:
106
+ return transcription, translated_text, "", {}
107
 
108
+ info_extraction = extract_information(prompt, translated_text, threshold, nested_ner)
109
+ return transcription, translated_text, info_extraction
 
 
110
 
111
+ # Gradio Interface
112
+ with gr.Blocks(title="Audio Processing and Information Extraction") as interface:
113
+ gr.Markdown("## Audio Transcription, Translation, and Information Extraction")
114
+
115
+ with gr.Row():
116
+ # Fixed: removed 'source' argument from gr.Audio
117
+ audio_input = gr.Audio(type="filepath", label="Upload Audio File")
118
+ prompt_input = gr.Textbox(label="Prompt for Information Extraction", placeholder="Enter your prompt here")
119
+
120
+ with gr.Row():
121
+ threshold_slider = gr.Slider(0, 1, value=0.3, step=0.01, label="NER Threshold")
122
+ nested_ner_checkbox = gr.Checkbox(label="Enable Nested NER")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
 
124
  with gr.Row():
125
+ transcription_output = gr.Textbox(label="Transcription (Urdu)", interactive=False) # Corrected to interactive=False
126
+ translation_output = gr.Textbox(label="Translation (English)", interactive=False) # Corrected to interactive=False
127
 
128
  with gr.Row():
129
+ extraction_output = gr.HighlightedText(label="Extracted Information")
130
 
131
  process_button = gr.Button("Process Audio")
132
 
133
  process_button.click(
134
+ fn=pipeline_fn,
135
+ inputs=[audio_input, prompt_input, threshold_slider, nested_ner_checkbox],
136
+ outputs=[transcription_output, translation_output, extraction_output],
137
  )
138
 
139
  if __name__ == "__main__":
140
+ interface.launch()