Raiff1982 commited on
Commit
40c1d16
·
verified ·
1 Parent(s): cf44b00

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +66 -55
app.py CHANGED
@@ -1,63 +1,74 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
- from AICoreAGIX_with_TB import AICoreAGIX# Ensure this imports your AICoreAGIX class
 
4
 
5
- # Initialize the AI core
6
  ai_core = AICoreAGIX()
7
 
8
- def respond(message, history, system_message, max_tokens, temperature, top_p, image, audio):
9
- # Process the uploaded files
10
- if image and audio:
11
- # Save the uploaded files to disk or process them as needed
12
- image_path = "uploaded_image.png"
13
- audio_path = "uploaded_audio.wav"
14
- image.save(image_path)
15
- audio.save(audio_path)
16
-
17
- # Run TB diagnostics
18
- tb_result = ai_core.run_tb_diagnostics(image_path, audio_path, user_id=1) # Replace with actual user_id handling
19
-
20
- # Incorporate TB diagnostic results into the response
21
- tb_message = f"TB Diagnostic Result: {tb_result['tb_risk']}\n"
22
- tb_message += f"Image Analysis: {tb_result['image_analysis']}\n"
23
- tb_message += f"Audio Analysis: {tb_result['audio_analysis']}\n"
24
- tb_message += f"Shareable Link: {tb_result['shareable_link']}\n\n"
25
- else:
26
- tb_message = "No TB diagnostic data provided.\n\n"
27
-
28
- # Existing chat functionality
29
- messages = [{"role": "system", "content": system_message}]
30
- for user_msg, bot_msg in history:
31
- if user_msg:
32
- messages.append({"role": "user", "content": user_msg})
33
- if bot_msg:
34
- messages.append({"role": "assistant", "content": bot_msg})
35
- messages.append({"role": "user", "content": message})
36
-
37
- response = ""
38
- for message in client.chat_completion(
39
- messages,
40
- max_tokens=max_tokens,
41
- stream=True,
42
- temperature=temperature,
43
- top_p=top_p,
44
- ):
45
- token = message.choices[0].delta.content
46
- response += token
47
- yield tb_message + response
48
-
49
- # Define the Gradio interface
50
- demo = gr.ChatInterface(
51
- respond,
52
- additional_inputs=[
53
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
54
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
55
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
56
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
57
- gr.inputs.Image(type="pil", label="Upload Saliva Microscopy Image"),
58
- gr.inputs.Audio(type="file", label="Upload Cough Audio Recording"),
 
 
 
 
 
 
 
 
59
  ],
 
 
60
  )
61
 
62
  if __name__ == "__main__":
63
- demo.launch()
 
1
  import gradio as gr
2
+ import os
3
+ from HuggingFaceHelper import HuggingFaceHelper
4
+ from AICoreAGIX_with_TB import AICoreAGIX
5
 
6
+ # Initialize AI Core for TB analysis
7
  ai_core = AICoreAGIX()
8
 
9
+ # Initialize Hugging Face training helper
10
+ helper = HuggingFaceHelper(model_path="./merged_model")
11
+
12
+ def diagnose_tb(image_file, audio_file):
13
+ user_id = 1 # Placeholder user ID
14
+ result = ai_core.run_tb_diagnostics(image_file.name, audio_file.name, user_id)
15
+ return (
16
+ f"**TB Risk Level:** {result['tb_risk']}\n\n"
17
+ f"**Image Result:** {result['image_analysis']['result']} "
18
+ f"(Confidence: {result['image_analysis']['confidence']:.2f})\n\n"
19
+ f"**Audio Result:** {result['audio_analysis']['result']} "
20
+ f"(Confidence: {result['audio_analysis']['confidence']:.2f})\n\n"
21
+ f"**Ethical Analysis:** {result['ethical_analysis']}\n\n"
22
+ f"**Explanation:** {result['explanation']}\n\n"
23
+ f"**Shareable Link:** {result['shareable_link']}"
24
+ )
25
+
26
+ def upload_and_finetune(jsonl_file):
27
+ save_path = f"./training_data/{jsonl_file.name}"
28
+ os.makedirs("training_data", exist_ok=True)
29
+
30
+ with open(save_path, "wb") as f:
31
+ f.write(jsonl_file.read())
32
+
33
+ # Trigger fine-tuning
34
+ helper.dataset_path = save_path
35
+ helper.fine_tune(output_dir="./codette_finetuned")
36
+
37
+ return f"✅ Fine-tuning complete! Model updated and stored."
38
+
39
+ def get_latest_model():
40
+ return "Download the latest fine-tuned Codriao model here: https://huggingface.co/Raiff1982/codriao-finetuned"
41
+
42
+ # Gradio UI
43
+ demo = gr.TabbedInterface(
44
+ [
45
+ gr.Interface(
46
+ fn=diagnose_tb,
47
+ inputs=[
48
+ gr.File(label="Upload TB Saliva Image"),
49
+ gr.File(label="Upload Cough Audio File (.wav)")
50
+ ],
51
+ outputs="text",
52
+ title="Codriao TB Risk Analyzer",
53
+ description="Upload a microscopy image and cough audio to analyze TB risk with compassionate AI support."
54
+ ),
55
+ gr.Interface(
56
+ fn=upload_and_finetune,
57
+ inputs=[gr.File(label="Upload JSONL Training Data")],
58
+ outputs="text",
59
+ title="Codriao Fine-Tuning Trainer",
60
+ description="Upload JSONL files to teach Codriao new knowledge."
61
+ ),
62
+ gr.Interface(
63
+ fn=get_latest_model,
64
+ inputs=[],
65
+ outputs="text",
66
+ title="Download Codriao's Fine-Tuned Model"
67
+ )
68
  ],
69
+ title="Codriao AI System",
70
+ description="Train Codriao, run TB diagnostics, and download updated models."
71
  )
72
 
73
  if __name__ == "__main__":
74
+ demo.launch()