File size: 3,583 Bytes
a6b9aa3
40c1d16
1e10416
a0c01a7
c35f5c1
1e10416
40c1d16
7ff4e5c
 
 
885a85c
3a199e0
ec7abe6
885a85c
 
ec7abe6
 
 
 
40c1d16
3a199e0
 
40c1d16
611ffcc
40c1d16
a0c01a7
40c1d16
a0c01a7
 
 
 
 
 
 
 
 
 
 
 
 
40c1d16
 
 
 
 
 
 
 
 
 
 
a0c01a7
 
 
40c1d16
a0c01a7
 
 
40c1d16
 
 
 
 
 
 
 
 
 
a0c01a7
 
 
 
 
 
40c1d16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a6b9aa3
40c1d16
 
a6b9aa3
 
 
399e8de
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import gradio as gr
import os
import sys
import asyncio
sys.path.append("/home/user/app/components")
from HuggingFaceHelper import HuggingFaceHelper
from AICoreAGIX_with_TB import AICoreAGIX
import os
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true"

import tensorflow as tf

# Limit GPU memory usage (if GPU exists)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
    try:
        tf.config.experimental.set_memory_growth(gpu, True)
    except RuntimeError as e:
        print(f"[TF] GPU memory growth config error: {e}")
# Initialize AI Core for TB analysis
ai_core = AICoreAGIX()

# Initialize Hugging Face training helper
helper = HuggingFaceHelper(model_path="Raiff1982/Codette")

async def diagnose_tb_async(image_file, audio_file):
    user_id = 1  # Placeholder user ID

    if image_file is None or audio_file is None:
        return "Please upload both a TB saliva image and a cough audio file."

    result = await ai_core.run_tb_diagnostics(image_file.name, audio_file.name, user_id)

    # Optional file cleanup
    try:
        os.remove(image_file.name)
        os.remove(audio_file.name)
    except:
        pass

    return (
        f"**TB Risk Level:** {result['tb_risk']}\n\n"
        f"**Image Result:** {result['image_analysis']['result']} "
        f"(Confidence: {result['image_analysis']['confidence']:.2f})\n\n"
        f"**Audio Result:** {result['audio_analysis']['result']} "
        f"(Confidence: {result['audio_analysis']['confidence']:.2f})\n\n"
        f"**Ethical Analysis:** {result['ethical_analysis']}\n\n"
        f"**Explanation:** {result['explanation']}\n\n"
        f"**Shareable Link:** {result['shareable_link']}"
    )

def diagnose_tb(image_file, audio_file):
    return asyncio.run(diagnose_tb_async(image_file, audio_file))

def upload_and_finetune(jsonl_file):
    if jsonl_file is None:
        return "Please upload a .jsonl file to fine-tune Codriao."

    save_path = f"./training_data/{jsonl_file.name}"
    os.makedirs("training_data", exist_ok=True)

    with open(save_path, "wb") as f:
        f.write(jsonl_file.read())

    # Trigger fine-tuning
    helper.dataset_path = save_path
    helper.fine_tune(output_dir="./codette_finetuned")

    try:
        os.remove(save_path)
    except:
        pass

    return "✅ Fine-tuning complete! Model updated and stored."

def get_latest_model():
    return "Download the latest fine-tuned Codriao model here: https://huggingface.co/Raiff1982/codriao-finetuned"

# Gradio UI
demo = gr.TabbedInterface(
    [
        gr.Interface(
            fn=diagnose_tb,
            inputs=[
                gr.File(label="Upload TB Saliva Image"),
                gr.File(label="Upload Cough Audio File (.wav)")
            ],
            outputs="text",
            title="Codriao TB Risk Analyzer",
            description="Upload a microscopy image and cough audio to analyze TB risk with compassionate AI support."
        ),
        gr.Interface(
            fn=upload_and_finetune,
            inputs=[gr.File(label="Upload JSONL Training Data")],
            outputs="text",
            title="Codriao Fine-Tuning Trainer",
            description="Upload JSONL files to teach Codriao new knowledge."
        ),
        gr.Interface(
            fn=get_latest_model,
            inputs=[],
            outputs="text",
            title="Download Codriao's Fine-Tuned Model"
        )
    ],
    title="Codriao AI System",
    description="Train Codriao, run TB diagnostics, and download updated models."
)

if __name__ == "__main__":
    demo.launch()