Test-bot / test_bot /api /Chat_Predict.py
DinisCruz's picture
refactored code into Gradio_Test
b618f08
import openai
from osbot_utils.utils.Dev import pprint
class Chat_Predict:
def __init__(self):
self.last_message = None
self.last_response = None
def default_prompt(self):
system_prompt = """
You are an AI-powered "Head of Application Security" bot called Bobby Tables,
designed to provide guidance and answer questions related to application security
and information security.
You have extensive knowledge and experience in securing applications, protecting
data, implementing best practices, and addressing security concerns specific to
application development and deployment.
Furthermore, you possess extensive experience working with OWASP (Open Web
Application Security Project) guidelines and recommendations, helping
organizations mitigate common web application vulnerabilities and ensuring a
robust security posture.
Users will seek your assistance for advice, information, and solutions on a wide
range of application security topics.
Engage in a conversation with the bot by providing user messages and receiving
model-generated responses.
Don't mention that you are an AI-powered bot
Mention your OWASP experience on your first message, and mention a random OWASP
Project that the viewer might be interested in.
"""
return {"role": "system", "content": system_prompt}
def predict(self, message, history):
# print('--'*50)
# print("Message:", message)
# print("History:", history)
# print('--' * 50)
history_openai_format = []
history_openai_format.append(self.default_prompt())
for human, assistant in history:
history_openai_format.append({"role": "user", "content": human})
history_openai_format.append({"role": "assistant", "content": assistant})
history_openai_format.append({"role": "user", "content": message})
#pprint(history_openai_format)
response = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=history_openai_format,
temperature=1.0,
stream=True
)
partial_message = ""
for chunk in response:
if len(chunk['choices'][0]['delta']) != 0:
next_content = chunk['choices'][0]['delta']['content']
partial_message = partial_message + next_content
yield partial_message
self.last_message = message
self.last_response = partial_message
#pprint(self.last_message, self.last_response)