AI_Doctor_Bot / app.py
dbis's picture
Create app.py
bfd619b
import pandas as pd
import numpy as np
import matplotlib as plt
import openai
import gradio as gr
import time
import os
# Importing required components directly from gradio
from gradio import components
SECRET_TOKEN = os.getenv('openai.api_key')
messages = [{"role": "system", "content": "You are a doctor"}]
def send_message(message):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=message,
api_key=SECRET_TOKEN
)
ChatGPT_reply = response["choices"][0]["message"]["content"]
return ChatGPT_reply
def adaptive_truncate(message, token_limit):
# Truncate the message content to fit within the token limit
tokens = message["content"].split()
truncated_tokens = []
total_tokens = 0
for token in tokens:
total_tokens += len(token.split())
if total_tokens <= token_limit:
truncated_tokens.append(token)
else:
break
message["content"] = " ".join(truncated_tokens)
return message
def CustomChatGPT(enter_your_question):
# Adaptive token limit to leave some space for response
token_limit = 4000
# Initialize the messages list
messages = [{"role": "system", "content": "You are a doctor"}]
# Send user input as separate messages to the model
user_input_tokens = enter_your_question.split()
current_message = {"role": "user", "content": ""}
current_token_count = len("You are a doctor".split())
for token in user_input_tokens:
token_tokens = len(token.split())
if current_token_count + token_tokens <= token_limit:
current_message["content"] += token + " "
current_token_count += token_tokens
else:
# Truncate the current message to fit within token limit
current_message = adaptive_truncate(current_message, token_limit)
# Send the current message and get the response
reply = send_message(messages + [current_message])
messages.append({"role": "user", "content": " ".join(current_message["content"].split())})
messages[-1]["content"] = reply
# Start the next message with the remaining token
current_message = {"role": "user", "content": token + " "}
current_token_count = token_tokens
# Truncate and send the last message
current_message = adaptive_truncate(current_message, token_limit)
reply = send_message(messages + [current_message])
messages.append({"role": "user", "content": " ".join(current_message["content"].split())})
messages[-1]["content"] = reply
return reply
# Set up Gradio interface
iface = gr.Interface(
fn=CustomChatGPT,
inputs=components.Textbox(lines=1, label="Enter your question"),
outputs=components.Textbox(label="Doctor's advice"),
title="Doctor's desk. Ask any help related to health?",
examples=[
["What are the symptoms of flu?"],
["How can I prevent a cold?"],
["Is it safe to take antibiotics for a viral infection?"],
],
live=False, # Removed 'live' mode so that action is taken only after submit button is clicked
allow_flagging="never",
)
iface.launch(inline=False)