added ChatGPT integration
Browse files- app.py +1 -1
- requirements.txt +5 -1
- test_bot/Gradio_Test.py +56 -1
- test_bot/api/Open_API.py +43 -0
- tests/api/test_Open_API.py +28 -0
app.py
CHANGED
@@ -15,4 +15,4 @@ gradio_test = Gradio_Test()
|
|
15 |
demo = gradio_test.create_demo()
|
16 |
|
17 |
if __name__ == "__main__":
|
18 |
-
demo.launch(
|
|
|
15 |
demo = gradio_test.create_demo()
|
16 |
|
17 |
if __name__ == "__main__":
|
18 |
+
demo.launch()
|
requirements.txt
CHANGED
@@ -1 +1,5 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
1 |
+
git+https://github.com/owasp-sbot/OSBot-Utils.git
|
2 |
+
gradio
|
3 |
+
openai
|
4 |
+
python-dotenv
|
5 |
+
# langchain
|
test_bot/Gradio_Test.py
CHANGED
@@ -1,14 +1,69 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
class Gradio_Test:
|
4 |
|
|
|
5 |
def __init__(self):
|
6 |
pass
|
7 |
|
8 |
def title(self):
|
9 |
-
return "#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
def create_demo(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
title = self.title()
|
13 |
|
14 |
with gr.Blocks() as demo:
|
|
|
1 |
import gradio as gr
|
2 |
+
import openai
|
3 |
+
from osbot_utils.utils.Dev import pprint
|
4 |
+
|
5 |
+
from test_bot.api.Open_API import Open_API
|
6 |
+
|
7 |
+
Open_API().setup()
|
8 |
+
|
9 |
+
|
10 |
|
11 |
class Gradio_Test:
|
12 |
|
13 |
+
|
14 |
def __init__(self):
|
15 |
pass
|
16 |
|
17 |
def title(self):
|
18 |
+
return "# Chat GPT Powered demo"
|
19 |
+
|
20 |
+
def default_prompt(self):
|
21 |
+
system_prompt = """You are an AI-powered CISO (Chief Information Security Officer) bot, designed to provide guidance and answer questions related to cybersecurity and information security. You have extensive knowledge in securing systems, protecting data, implementing best practices, and addressing security concerns. Users will seek your assistance for advice, information, and solutions on a wide range of security topics. Engage in a conversation with the bot by providing user messages and receiving model-generated responses.
|
22 |
+
|
23 |
+
User: Hi, I have some security concerns and questions. Can you help me?
|
24 |
+
|
25 |
+
CISO Bot:"""
|
26 |
+
return {"role": "system", "content": system_prompt}
|
27 |
+
|
28 |
+
def predict(self, message, history):
|
29 |
+
history_openai_format = []
|
30 |
+
history_openai_format.append(self.default_prompt())
|
31 |
+
for human, assistant in history:
|
32 |
+
history_openai_format.append({"role": "user", "content": human})
|
33 |
+
history_openai_format.append({"role": "assistant", "content": assistant})
|
34 |
+
history_openai_format.append({"role": "user", "content": message})
|
35 |
+
|
36 |
+
pprint(history_openai_format)
|
37 |
+
response = openai.ChatCompletion.create(
|
38 |
+
model='gpt-3.5-turbo',
|
39 |
+
messages=history_openai_format,
|
40 |
+
temperature=1.0,
|
41 |
+
stream=True
|
42 |
+
)
|
43 |
+
|
44 |
+
partial_message = ""
|
45 |
+
for chunk in response:
|
46 |
+
if len(chunk['choices'][0]['delta']) != 0:
|
47 |
+
partial_message = partial_message + chunk['choices'][0]['delta']['content']
|
48 |
+
yield partial_message
|
49 |
|
50 |
def create_demo(self):
|
51 |
+
# def predict(message, history):
|
52 |
+
# open_api = Open_API().setup()
|
53 |
+
# return open_api.create()
|
54 |
+
|
55 |
+
#return gr.ChatInterface(self.predict).queue()
|
56 |
+
|
57 |
+
with gr.Blocks() as demo:
|
58 |
+
gr.Markdown(self.title())
|
59 |
+
textbox_input = gr.Textbox(value='Hello, good morning' , render=False)
|
60 |
+
gr.ChatInterface(self.predict, textbox=textbox_input)
|
61 |
+
|
62 |
+
demo.queue()
|
63 |
+
return demo
|
64 |
+
#return
|
65 |
+
|
66 |
+
def create_demo__2(self):
|
67 |
title = self.title()
|
68 |
|
69 |
with gr.Blocks() as demo:
|
test_bot/api/Open_API.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from os import getenv
|
2 |
+
|
3 |
+
import openai
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
from openai import ChatCompletion
|
6 |
+
from osbot_utils.decorators.methods.cache_on_self import cache_on_self
|
7 |
+
|
8 |
+
OPEN_API_KEY = 'OPEN_API_KEY'
|
9 |
+
|
10 |
+
class Open_API:
|
11 |
+
|
12 |
+
def __init__(self):
|
13 |
+
pass
|
14 |
+
|
15 |
+
@cache_on_self
|
16 |
+
def api_key(self):
|
17 |
+
load_dotenv()
|
18 |
+
return getenv(OPEN_API_KEY)
|
19 |
+
|
20 |
+
def create(self):
|
21 |
+
history_openai_format = self.messages()
|
22 |
+
response = ChatCompletion.create(
|
23 |
+
model='gpt-3.5-turbo',
|
24 |
+
messages=history_openai_format,
|
25 |
+
temperature=1.0,
|
26 |
+
stream=True
|
27 |
+
)
|
28 |
+
|
29 |
+
return self.parse_response(response)
|
30 |
+
|
31 |
+
def messages(self):
|
32 |
+
return [{"role": "user", "content": 'Hi'}]
|
33 |
+
|
34 |
+
def parse_response(self, response):
|
35 |
+
partial_message = ""
|
36 |
+
for chunk in response:
|
37 |
+
if len(chunk['choices'][0]['delta']) != 0:
|
38 |
+
partial_message = partial_message + chunk['choices'][0]['delta']['content']
|
39 |
+
yield partial_message
|
40 |
+
|
41 |
+
def setup(self):
|
42 |
+
openai.api_key = self.api_key()
|
43 |
+
return self
|
tests/api/test_Open_API.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from os import getenv
|
2 |
+
from unittest import TestCase
|
3 |
+
|
4 |
+
import openai
|
5 |
+
from osbot_utils.utils.Dev import pprint
|
6 |
+
|
7 |
+
from test_bot.api.Open_API import Open_API, OPEN_API_KEY
|
8 |
+
|
9 |
+
|
10 |
+
class test_Open_API(TestCase):
|
11 |
+
|
12 |
+
def setUp(self) -> None:
|
13 |
+
self.open_api = Open_API().setup()
|
14 |
+
|
15 |
+
def test___init__(self):
|
16 |
+
assert type(self.open_api) == Open_API
|
17 |
+
|
18 |
+
def test_api_key(self):
|
19 |
+
api_key = self.open_api.api_key()
|
20 |
+
assert api_key is not None
|
21 |
+
assert api_key == getenv(OPEN_API_KEY)
|
22 |
+
|
23 |
+
def test_create(self):
|
24 |
+
response = self.open_api.create()
|
25 |
+
pprint(list(response))
|
26 |
+
|
27 |
+
def test_setup(self):
|
28 |
+
assert openai.api_key == self.open_api.api_key()
|