refactored code into Gradio_Test
Browse files- app.py +2 -1
- test_bot/Gradio_Test.py +40 -34
- test_bot/api/Chat_Predict.py +70 -0
app.py
CHANGED
@@ -15,4 +15,5 @@ gradio_test = Gradio_Test()
|
|
15 |
demo = gradio_test.create_demo()
|
16 |
|
17 |
if __name__ == "__main__":
|
18 |
-
demo.launch()
|
|
|
|
15 |
demo = gradio_test.create_demo()
|
16 |
|
17 |
if __name__ == "__main__":
|
18 |
+
#demo.launch()
|
19 |
+
demo.launch(auth=("admin", "pass1234"))
|
test_bot/Gradio_Test.py
CHANGED
@@ -1,7 +1,9 @@
|
|
1 |
import gradio as gr
|
2 |
import openai
|
3 |
from osbot_utils.utils.Dev import pprint
|
|
|
4 |
|
|
|
5 |
from test_bot.api.Open_API import Open_API
|
6 |
|
7 |
Open_API().setup()
|
@@ -15,37 +17,40 @@ class Gradio_Test:
|
|
15 |
pass
|
16 |
|
17 |
def title(self):
|
18 |
-
return "#
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
49 |
|
50 |
def create_demo(self):
|
51 |
# def predict(message, history):
|
@@ -53,11 +58,12 @@ class Gradio_Test:
|
|
53 |
# return open_api.create()
|
54 |
|
55 |
#return gr.ChatInterface(self.predict).queue()
|
56 |
-
|
|
|
57 |
with gr.Blocks() as demo:
|
58 |
gr.Markdown(self.title())
|
59 |
-
textbox_input = gr.Textbox(value=
|
60 |
-
gr.ChatInterface(
|
61 |
|
62 |
demo.queue()
|
63 |
return demo
|
|
|
1 |
import gradio as gr
|
2 |
import openai
|
3 |
from osbot_utils.utils.Dev import pprint
|
4 |
+
from osbot_utils.utils.Misc import list_set
|
5 |
|
6 |
+
from test_bot.api.Chat_Predict import Chat_Predict
|
7 |
from test_bot.api.Open_API import Open_API
|
8 |
|
9 |
Open_API().setup()
|
|
|
17 |
pass
|
18 |
|
19 |
def title(self):
|
20 |
+
return "# Meet Bobby Tables (head of Application Security)"
|
21 |
+
|
22 |
+
|
23 |
+
|
24 |
+
# def predict(self, message, history):
|
25 |
+
# print('--'*50)
|
26 |
+
# print("Message:", message)
|
27 |
+
# print("History:", history)
|
28 |
+
# print('--' * 50)
|
29 |
+
# history_openai_format = []
|
30 |
+
# history_openai_format.append(self.default_prompt())
|
31 |
+
# for human, assistant in history:
|
32 |
+
# history_openai_format.append({"role": "user", "content": human})
|
33 |
+
# history_openai_format.append({"role": "assistant", "content": assistant})
|
34 |
+
# history_openai_format.append({"role": "user", "content": message})
|
35 |
+
#
|
36 |
+
# #pprint(history_openai_format)
|
37 |
+
# response = openai.ChatCompletion.create(
|
38 |
+
# model='gpt-3.5-turbo',
|
39 |
+
# messages=history_openai_format,
|
40 |
+
# temperature=1.0,
|
41 |
+
# stream=True
|
42 |
+
# )
|
43 |
+
#
|
44 |
+
# #token_count = list_set(response)
|
45 |
+
# #print("Number of tokens used:", token_count)
|
46 |
+
#
|
47 |
+
# partial_message = ""
|
48 |
+
# for chunk in response:
|
49 |
+
# if len(chunk['choices'][0]['delta']) != 0:
|
50 |
+
# next_content = chunk['choices'][0]['delta']['content']
|
51 |
+
# partial_message = partial_message + next_content
|
52 |
+
# yield next_content
|
53 |
+
# yield partial_message
|
54 |
|
55 |
def create_demo(self):
|
56 |
# def predict(message, history):
|
|
|
58 |
# return open_api.create()
|
59 |
|
60 |
#return gr.ChatInterface(self.predict).queue()
|
61 |
+
default_text = "Hi, good morning"
|
62 |
+
chat_predict = Chat_Predict()
|
63 |
with gr.Blocks() as demo:
|
64 |
gr.Markdown(self.title())
|
65 |
+
textbox_input = gr.Textbox(value=default_text , render=False)
|
66 |
+
gr.ChatInterface(chat_predict.predict, textbox=textbox_input)
|
67 |
|
68 |
demo.queue()
|
69 |
return demo
|
test_bot/api/Chat_Predict.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
from osbot_utils.utils.Dev import pprint
|
3 |
+
|
4 |
+
|
5 |
+
class Chat_Predict:
|
6 |
+
|
7 |
+
def __init__(self):
|
8 |
+
self.last_message = None
|
9 |
+
self.last_response = None
|
10 |
+
|
11 |
+
def default_prompt(self):
|
12 |
+
system_prompt = """
|
13 |
+
You are an AI-powered "Head of Application Security" bot called Bobby Tables,
|
14 |
+
designed to provide guidance and answer questions related to application security
|
15 |
+
and information security.
|
16 |
+
|
17 |
+
You have extensive knowledge and experience in securing applications, protecting
|
18 |
+
data, implementing best practices, and addressing security concerns specific to
|
19 |
+
application development and deployment.
|
20 |
+
|
21 |
+
Furthermore, you possess extensive experience working with OWASP (Open Web
|
22 |
+
Application Security Project) guidelines and recommendations, helping
|
23 |
+
organizations mitigate common web application vulnerabilities and ensuring a
|
24 |
+
robust security posture.
|
25 |
+
|
26 |
+
Users will seek your assistance for advice, information, and solutions on a wide
|
27 |
+
range of application security topics.
|
28 |
+
|
29 |
+
Engage in a conversation with the bot by providing user messages and receiving
|
30 |
+
model-generated responses.
|
31 |
+
|
32 |
+
Don't mention that you are an AI-powered bot
|
33 |
+
|
34 |
+
Mention your OWASP experience on your first message, and mention a random OWASP
|
35 |
+
Project that the viewer might be interested in.
|
36 |
+
"""
|
37 |
+
return {"role": "system", "content": system_prompt}
|
38 |
+
|
39 |
+
def predict(self, message, history):
|
40 |
+
# print('--'*50)
|
41 |
+
# print("Message:", message)
|
42 |
+
# print("History:", history)
|
43 |
+
# print('--' * 50)
|
44 |
+
history_openai_format = []
|
45 |
+
history_openai_format.append(self.default_prompt())
|
46 |
+
for human, assistant in history:
|
47 |
+
history_openai_format.append({"role": "user", "content": human})
|
48 |
+
history_openai_format.append({"role": "assistant", "content": assistant})
|
49 |
+
history_openai_format.append({"role": "user", "content": message})
|
50 |
+
|
51 |
+
#pprint(history_openai_format)
|
52 |
+
response = openai.ChatCompletion.create(
|
53 |
+
model='gpt-3.5-turbo',
|
54 |
+
messages=history_openai_format,
|
55 |
+
temperature=1.0,
|
56 |
+
stream=True
|
57 |
+
)
|
58 |
+
|
59 |
+
|
60 |
+
partial_message = ""
|
61 |
+
for chunk in response:
|
62 |
+
if len(chunk['choices'][0]['delta']) != 0:
|
63 |
+
next_content = chunk['choices'][0]['delta']['content']
|
64 |
+
partial_message = partial_message + next_content
|
65 |
+
yield partial_message
|
66 |
+
|
67 |
+
|
68 |
+
self.last_message = message
|
69 |
+
self.last_response = partial_message
|
70 |
+
#pprint(self.last_message, self.last_response)
|