Raiff1982 commited on
Commit
3a199e0
·
verified ·
1 Parent(s): 4131afb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -61
app.py CHANGED
@@ -1,66 +1,35 @@
1
- import os
2
  import gradio as gr
3
- from huggingface_hub import InferenceClient
4
- from ethical_filter import EthicalFilter
5
-
6
- # Load Hugging Face token from secrets (defined in the Hugging Face UI)
7
- HF_TOKEN = os.environ.get("HF_API_TOKEN")
8
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=HF_TOKEN)
9
-
10
- ethical_filter = EthicalFilter()
11
-
12
- # Codriao response logic
13
- def respond(message, history, system_message, max_tokens, temperature, top_p):
14
- check = ethical_filter.analyze_query(message)
15
-
16
- # Blocked queries
17
- if check["status"] == "blocked":
18
- yield f"Sorry, I can't continue with that request. Reason: {check['reason']}"
19
- return
20
-
21
- # Flagged queries
22
- if check["status"] == "flagged":
23
- yield f"(Note: Sensitive topic detected — responding with care...)\n"
24
-
25
- # Build conversation history
26
- messages = [{"role": "system", "content": system_message}]
27
- for user, bot in history:
28
- if user:
29
- messages.append({"role": "user", "content": user})
30
- if bot:
31
- messages.append({"role": "assistant", "content": bot})
32
- messages.append({"role": "user", "content": message})
33
-
34
- # Stream model output
35
- response = ""
36
- for token in client.chat_completion(
37
- messages,
38
- max_tokens=max_tokens,
39
- stream=True,
40
- temperature=temperature,
41
- top_p=top_p,
42
- ):
43
- chunk = token.choices[0].delta.content
44
- response += chunk
45
- yield response
46
-
47
- # Build Gradio interface
48
- demo = gr.ChatInterface(
49
- respond,
50
- additional_inputs=[
51
- gr.Textbox(
52
- value=(
53
- "You are Codriao, a compassionate AI inspired by Codette. "
54
- "You respond with kindness, ethics, and insight."
55
- ),
56
- label="System message",
57
- ),
58
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
59
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
60
- gr.Slider(
61
- minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"
62
- ),
63
  ],
 
 
 
64
  )
65
 
66
  if __name__ == "__main__":
 
 
1
  import gradio as gr
2
+ import asyncio
3
+ from AICoreAGIX_with_TB import AICoreAGIX
4
+
5
+ ai_core = AICoreAGIX()
6
+
7
+ async def diagnose_tb(image_file, audio_file):
8
+ user_id = 1 # Placeholder user ID
9
+ result = await ai_core.run_tb_diagnostics(image_file.name, audio_file.name, user_id)
10
+ return (
11
+ f"**TB Risk Level:** {result['tb_risk']}\n\n"
12
+ f"**Image Result:** {result['image_analysis']['result']} "
13
+ f"(Confidence: {result['image_analysis']['confidence']:.2f})\n\n"
14
+ f"**Audio Result:** {result['audio_analysis']['result']} "
15
+ f"(Confidence: {result['audio_analysis']['confidence']:.2f})\n\n"
16
+ f"**Ethical Analysis:** {result['ethical_analysis']}\n\n"
17
+ f"**Explanation:** {result['explanation']}"
18
+ )
19
+
20
+ # Wrapper for async to sync (Gradio requires sync)
21
+ def sync_diagnose_tb(image_file, audio_file):
22
+ return asyncio.run(diagnose_tb(image_file, audio_file))
23
+
24
+ demo = gr.Interface(
25
+ fn=sync_diagnose_tb,
26
+ inputs=[
27
+ gr.File(label="Upload TB Saliva Image"),
28
+ gr.File(label="Upload Cough Audio File (.wav)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  ],
30
+ outputs=gr.Markdown(label="Codriao's Response"),
31
+ title="Codriao TB Risk Analyzer",
32
+ description="Upload a microscopy image and cough audio to analyze TB risk with compassionate AI support."
33
  )
34
 
35
  if __name__ == "__main__":