Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,6 @@ import os
|
|
11 |
import requests
|
12 |
import pickle
|
13 |
import numpy as np
|
14 |
-
import google.generativeai as genai
|
15 |
|
16 |
# Load model once
|
17 |
with open("best_clf.pkl", "rb") as file:
|
@@ -24,11 +23,73 @@ try:
|
|
24 |
load_dotenv()
|
25 |
except:
|
26 |
pass
|
27 |
-
GOOGLE_API_KEY = os.environ.get("GOOGLE_API_KEY")
|
28 |
-
|
29 |
-
genai.configure(api_key=GOOGLE_API_KEY)
|
30 |
|
31 |
# Get the token from environment variables
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
# --- Step 1 ---
|
33 |
if 'name' not in st.session_state:
|
34 |
st.session_state.name = "Ange"
|
@@ -1030,10 +1091,17 @@ with st.container(key = "main"):
|
|
1030 |
st.rerun()
|
1031 |
|
1032 |
elif st.session_state.form5 == "next" :
|
1033 |
-
|
1034 |
-
|
1035 |
-
|
1036 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1037 |
Hi! A person named {st.session_state.name} has just been assessed for heart disease risk.
|
1038 |
|
1039 |
π **Prediction**: {"High Risk" if st.session_state.Risk == 1 else "Low Risk"}
|
@@ -1054,7 +1122,13 @@ with st.container(key = "main"):
|
|
1054 |
- Glucose: {st.session_state.glucose} mg/dL
|
1055 |
|
1056 |
π¬ Please give a personalized, kind, and easy-to-understand explanation of this result. Include practical lifestyle advice and possible early warning signs to watch out for. Use an encouraging, empathetic tone.
|
1057 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
1058 |
st.markdown(f"""
|
1059 |
<div style="
|
1060 |
font-size: 18px;
|
@@ -1077,7 +1151,7 @@ with st.container(key = "main"):
|
|
1077 |
</div>
|
1078 |
""", unsafe_allow_html=True)
|
1079 |
|
1080 |
-
st.
|
1081 |
|
1082 |
|
1083 |
|
|
|
11 |
import requests
|
12 |
import pickle
|
13 |
import numpy as np
|
|
|
14 |
|
15 |
# Load model once
|
16 |
with open("best_clf.pkl", "rb") as file:
|
|
|
23 |
load_dotenv()
|
24 |
except:
|
25 |
pass
|
|
|
|
|
|
|
26 |
|
27 |
# Get the token from environment variables
|
28 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
29 |
+
|
30 |
+
|
31 |
+
def query_huggingface_model(selected_model: dict, input_data, input_type="text",max_tokens=512,task="text-classification",temperature=0.7, top_p=0.9 ):
|
32 |
+
API_URL = selected_model.get("url")
|
33 |
+
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
|
34 |
+
|
35 |
+
try:
|
36 |
+
if input_type == "text":
|
37 |
+
if task == "text-generation":
|
38 |
+
payload = {
|
39 |
+
"messages": [
|
40 |
+
{
|
41 |
+
"role": "user",
|
42 |
+
"content": input_data
|
43 |
+
}
|
44 |
+
],
|
45 |
+
"model":selected_model.get("model")
|
46 |
+
}
|
47 |
+
|
48 |
+
else:
|
49 |
+
payload = {
|
50 |
+
"inputs": input_data ,
|
51 |
+
|
52 |
+
}
|
53 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
54 |
+
|
55 |
+
elif input_type == "image":
|
56 |
+
with open(input_data, "rb") as f:
|
57 |
+
data = f.read()
|
58 |
+
response = requests.post(API_URL, headers=headers, data=data)
|
59 |
+
|
60 |
+
else:
|
61 |
+
return {"error": f"Unsupported input_type: {input_type}"}
|
62 |
+
|
63 |
+
response.raise_for_status()
|
64 |
+
return response.json()
|
65 |
+
|
66 |
+
except requests.exceptions.RequestException as e:
|
67 |
+
return {"error": str(e)}
|
68 |
+
def extract_response_content(response):
|
69 |
+
print(f"Response is: {response}")
|
70 |
+
|
71 |
+
# For text generation or image captioning
|
72 |
+
if isinstance(response, list):
|
73 |
+
if response and isinstance(response[0], dict) and "generated_text" in response[0]:
|
74 |
+
return response[0]["generated_text"]
|
75 |
+
|
76 |
+
elif response and isinstance(response[0], list) and "label" in response[0][0]:
|
77 |
+
# For text classification
|
78 |
+
return [(item["label"], round(item["score"], 3)) for item in response[0]]
|
79 |
+
|
80 |
+
# For OpenAI-style chat responses
|
81 |
+
elif isinstance(response, dict):
|
82 |
+
if "choices" in response and isinstance(response["choices"], list):
|
83 |
+
try:
|
84 |
+
return response["choices"][0]["message"]["content"]
|
85 |
+
except (KeyError, IndexError, TypeError):
|
86 |
+
return "Error: Could not extract message from choices"
|
87 |
+
|
88 |
+
elif "error" in response:
|
89 |
+
return f"Error: {response['error']}"
|
90 |
+
|
91 |
+
return "Unknown response format"
|
92 |
+
|
93 |
# --- Step 1 ---
|
94 |
if 'name' not in st.session_state:
|
95 |
st.session_state.name = "Ange"
|
|
|
1091 |
st.rerun()
|
1092 |
|
1093 |
elif st.session_state.form5 == "next" :
|
1094 |
+
def generate_stream_response(text):
|
1095 |
+
# Yield the string one character at a time (for streaming)
|
1096 |
+
for char in text:
|
1097 |
+
yield char
|
1098 |
+
time.sleep(0.02)
|
1099 |
+
selected_model = {
|
1100 |
+
"url": "https://router.huggingface.co/nebius/v1/chat/completions", # Replace with the Hugging Face API URL for your model
|
1101 |
+
"model": "deepseek-ai/DeepSeek-V3" # Replace with the model name
|
1102 |
+
}
|
1103 |
+
task = "text-generation"
|
1104 |
+
prompt = f"""
|
1105 |
Hi! A person named {st.session_state.name} has just been assessed for heart disease risk.
|
1106 |
|
1107 |
π **Prediction**: {"High Risk" if st.session_state.Risk == 1 else "Low Risk"}
|
|
|
1122 |
- Glucose: {st.session_state.glucose} mg/dL
|
1123 |
|
1124 |
π¬ Please give a personalized, kind, and easy-to-understand explanation of this result. Include practical lifestyle advice and possible early warning signs to watch out for. Use an encouraging, empathetic tone.
|
1125 |
+
"""
|
1126 |
+
|
1127 |
+
with st.container(key = "expert"):
|
1128 |
+
with st.spinner("Model is Analysing your Results..."):
|
1129 |
+
result = query_huggingface_model(selected_model, prompt , input_type="text",task=task)
|
1130 |
+
response = extract_response_content(result)
|
1131 |
+
|
1132 |
st.markdown(f"""
|
1133 |
<div style="
|
1134 |
font-size: 18px;
|
|
|
1151 |
</div>
|
1152 |
""", unsafe_allow_html=True)
|
1153 |
|
1154 |
+
st.write_stream(generate_stream_response(response)) # This will stream the text one character at a time
|
1155 |
|
1156 |
|
1157 |
|