Aishwarya Solanki commited on
Commit
dfd4731
·
1 Parent(s): 3fee699

Refining UI

Browse files
Files changed (1) hide show
  1. app.py +98 -28
app.py CHANGED
@@ -3,14 +3,56 @@ import requests
3
  import google.generativeai as genai
4
  import openai
5
  from collections import Counter
6
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  def generate_text_chatgpt(key, prompt, temperature, top_p):
 
9
  openai.api_key = key
10
 
11
  response = openai.chat.completions.create(
12
- model="gpt-4-0613",
13
- messages=[{"role": "system", "content": "Suppose that you are a talented diagnostician"},
14
  {"role": "user", "content": prompt}],
15
  temperature=temperature,
16
  max_tokens=50,
@@ -35,28 +77,24 @@ def generate_text_gemini(key, prompt, temperature, top_p):
35
 
36
 
37
  def generate_text_llama(key, prompt, temperature, top_p):
38
- model_name = "meta-llama/Llama-3.1-8B-Instruct"
39
-
40
- API_URL = f"https://api-inference.huggingface.co/models/{model_name}"
41
- headers = {"Authorization": f"Bearer {key}"}
42
- payload = {
43
- "inputs": prompt,
44
- "parameters": {
45
- "temperature": temperature,
46
- "max_new_tokens": 50,
47
- "top_p": top_p,
48
- }
49
- }
50
- response = requests.post(API_URL, headers=headers, json=payload)
51
- resp_obj = response.json()
52
- if isinstance(resp_obj, list):
53
- resp = resp_obj[0]
54
- if 'generated_text' in resp:
55
- if len(resp['generated_text']) > len(prompt):
56
- return resp['generated_text'][len(prompt):]
57
- return resp['generated_text']
58
- return resp
59
- return resp_obj
60
 
61
 
62
  def diagnose(key, model, top_k, temperature, symptom_prompt):
@@ -76,6 +114,28 @@ def diagnose(key, model, top_k, temperature, symptom_prompt):
76
 
77
  return majority_output, confidence
78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
 
81
  with gr.Blocks() as ui:
@@ -86,6 +146,15 @@ with gr.Blocks() as ui:
86
  llama_key = gr.Textbox(label="Please input your Llama key", type="password")
87
  gemini_key = gr.Textbox(label="Please input your Gemini key", type="password")
88
 
 
 
 
 
 
 
 
 
 
89
  gr.Button(value="Don't have an LLM key? Get one through the below links.")
90
  gr.Button(value="OpenAi Key", link="https://platform.openai.com/account/api-keys")
91
  gr.Button(value="Meta Llama Key", link="https://platform.openai.com/account/api-keys")
@@ -95,12 +164,13 @@ with gr.Blocks() as ui:
95
  with gr.Column(scale=2, min_width=600):
96
  gr.Markdown("### Hello, Welcome to the GUI by Team #9.")
97
  temperature = gr.Slider(0.0, 1.0, value=0.7, step = 0.01, label="Temperature", info="Set the Temperature")
98
- top_k = gr.Slider(1, 10, value=3, step = 1, label="top-k value", info="Set the 'k' for top-k LLM responses")
99
  symptoms = gr.Textbox(label="Add the symptom data in the input to receive diagnosis")
100
 
101
- llm_btn = gr.Button(value="Diagnose Disease", variant="primary", elem_id="diagnose")
 
102
  output = gr.Textbox(label="LLM output with majority vote and confidence", interactive=False, placeholder="Output will appear here...")
103
- llm_btn.click(fn=diagnose, inputs=[gpt_key, llama_key, gemini_key, top_k, temperature, symptoms], outputs=output, api_name="auditor")
104
 
105
 
106
  ui.launch(share=True)
 
3
  import google.generativeai as genai
4
  import openai
5
  from collections import Counter
6
+ from huggingface_hub import InferenceClient
7
+
8
+ def api_check_msg(api_key, selected_model):
9
+ res = validate_api_key(api_key, selected_model)
10
+ return res["message"]
11
+
12
+ def validate_api_key(api_key, selected_model):
13
+ # Check if the API key is valid for GPT-3.5-Turbo
14
+ if "GPT" in selected_model:
15
+ url = "https://api.openai.com/v1/models"
16
+ headers = {
17
+ "Authorization": f"Bearer {api_key}"
18
+ }
19
+ try:
20
+ response = requests.get(url, headers=headers)
21
+ if response.status_code == 200:
22
+ return {"is_valid": True, "message": '<p style="color: green;">API Key is valid!</p>'}
23
+ else:
24
+ return {"is_valid": False, "message": f'<p style="color: red;">Invalid OpenAI API Key. Status code: {response.status_code}</p>'}
25
+ except requests.exceptions.RequestException as e:
26
+ return {"is_valid": False, "message": f'<p style="color: red;">Invalid OpenAI API Key. Error: {e}</p>'}
27
+ elif "Llama" in selected_model:
28
+ url = "https://huggingface.co/api/whoami-v2"
29
+ headers = {
30
+ "Authorization": f"Bearer {api_key}"
31
+ }
32
+ try:
33
+ response = requests.get(url, headers=headers)
34
+ if response.status_code == 200:
35
+ return {"is_valid": True, "message": '<p style="color: green;">API Key is valid!</p>'}
36
+ else:
37
+ return {"is_valid": False, "message": f'<p style="color: red;">Invalid Hugging Face API Key. Status code: {response.status_code}</p>'}
38
+ except requests.exceptions.RequestException as e:
39
+ return {"is_valid": False, "message": f'<p style="color: red;">Invalid Hugging Face API Key. Error: {e}</p>'}
40
+ elif "Gemini" in selected_model:
41
+ try:
42
+ genai.configure(api_key=api_key)
43
+ model = genai.GenerativeModel("gemini-1.5-flash")
44
+ response = model.generate_content("Help me diagnose the patient.")
45
+ return {"is_valid": True, "message": '<p style="color: green;">API Key is valid!</p>'}
46
+ except Exception as e:
47
+ return {"is_valid": False, "message": f'<p style="color: red;">Invalid Google API Key. Error: {e}</p>'}
48
 
49
  def generate_text_chatgpt(key, prompt, temperature, top_p):
50
+
51
  openai.api_key = key
52
 
53
  response = openai.chat.completions.create(
54
+ model="gpt-3.5-turbo-1106",
55
+ messages=[{"role": "system", "content": "You are a talented diagnostician who is diagnosing a patient."},
56
  {"role": "user", "content": prompt}],
57
  temperature=temperature,
58
  max_tokens=50,
 
77
 
78
 
79
  def generate_text_llama(key, prompt, temperature, top_p):
80
+ model_name = "meta-llama/Meta-Llama-3-8B-Instruct"
81
+ client = InferenceClient(api_key=key)
82
+
83
+ messages = [{"role": "system", "content": "You are a talented diagnostician who is diagnosing a patient."},
84
+ {"role": "user","content": prompt}]
85
+
86
+ completion = client.chat.completions.create(
87
+ model=model_name,
88
+ messages=messages,
89
+ max_tokens=len(prompt)+50,
90
+ temperature=temperature,
91
+ top_p=top_p
92
+ )
93
+
94
+ response = completion.choices[0].message.content
95
+ if len(response) > len(prompt):
96
+ return response[len(prompt):]
97
+ return response
 
 
 
 
98
 
99
 
100
  def diagnose(key, model, top_k, temperature, symptom_prompt):
 
114
 
115
  return majority_output, confidence
116
 
117
+ def update_model_components(selected_model):
118
+ model_map = {
119
+ "GPT-3.5-Turbo": "GPT",
120
+ "Llama-3": "Llama",
121
+ "Gemini-1.5": "Gemini"
122
+ }
123
+
124
+ link_map = {
125
+ "GPT-3.5-Turbo": "https://platform.openai.com/account/api-keys",
126
+ "Llama-3": "https://hf.co/settings/tokens",
127
+ "Gemini-1.5": "https://aistudio.google.com/apikey"
128
+ }
129
+ textbox_label = f"Please input the API key for your {model_map[selected_model]} model"
130
+ button_value = f"Don't have an API key? Get one for the {model_map[selected_model]} model here."
131
+ button_link = link_map[selected_model]
132
+ return gr.update(label=textbox_label), gr.update(value=button_value, link=button_link)
133
+
134
+ def toggle_button(symptoms_text, gpt_key, llama_key, gemini_key):
135
+ if symptoms_text.strip() and validate_api_key(gpt_key, "GPT") and \
136
+ validate_api_key(llama_key, "Llama") and validate_api_key(gemini_key, "Gemini"):
137
+ return gr.update(interactive=True)
138
+ return gr.update(interactive=False)
139
 
140
 
141
  with gr.Blocks() as ui:
 
146
  llama_key = gr.Textbox(label="Please input your Llama key", type="password")
147
  gemini_key = gr.Textbox(label="Please input your Gemini key", type="password")
148
 
149
+ status_message = gr.HTML(label="Validation Status")
150
+ gpt_key.input(fn=api_check_msg, inputs=[gpt_key, "GPT"], outputs=status_message)
151
+
152
+ status_message = gr.HTML(label="Validation Status")
153
+ llama_key.input(fn=api_check_msg, inputs=[llama_key, "Llama"], outputs=status_message)
154
+
155
+ status_message = gr.HTML(label="Validation Status")
156
+ gemini_key.input(fn=api_check_msg, inputs=[gemini_key, "Gemini"], outputs=status_message)
157
+
158
  gr.Button(value="Don't have an LLM key? Get one through the below links.")
159
  gr.Button(value="OpenAi Key", link="https://platform.openai.com/account/api-keys")
160
  gr.Button(value="Meta Llama Key", link="https://platform.openai.com/account/api-keys")
 
164
  with gr.Column(scale=2, min_width=600):
165
  gr.Markdown("### Hello, Welcome to the GUI by Team #9.")
166
  temperature = gr.Slider(0.0, 1.0, value=0.7, step = 0.01, label="Temperature", info="Set the Temperature")
167
+ top_p = gr.Slider(1, 10, value=3, step = 1, label="top-p value", info="Set the sampling nucleus parameter")
168
  symptoms = gr.Textbox(label="Add the symptom data in the input to receive diagnosis")
169
 
170
+ llm_btn = gr.Button(value="Diagnose Disease", variant="primary", elem_id="diagnose", interactive=False)
171
+ symptoms.input(toggle_button, inputs=[symptoms, gpt_key, llama_key, gemini_key], outputs=llm_btn)
172
  output = gr.Textbox(label="LLM output with majority vote and confidence", interactive=False, placeholder="Output will appear here...")
173
+ llm_btn.click(fn=diagnose, inputs=[gpt_key, llama_key, gemini_key, top_p, temperature, symptoms], outputs=output, api_name="LLM_Comparator")
174
 
175
 
176
  ui.launch(share=True)