Spaces:
Sleeping
Sleeping
Commit
·
b258c4d
1
Parent(s):
8a5c82b
made app support multiple users at once
Browse files- README.md +3 -1
- app.py +23 -17
- gpt_mavplot.py +0 -85
- llm/gptPlotCreator.py +15 -9
README.md
CHANGED
@@ -13,6 +13,8 @@ pinned: false
|
|
13 |
|
14 |
MAVPlot is a Python-based project which uses Gradio as an interface and GPT-X powered by OpenAI as a chatbot to generate and plot MAVLink data. It provides an easy-to-use, chatbot-like interface for users to describe the plot they would like to generate.
|
15 |
|
|
|
|
|
16 |

|
17 |
|
18 |
## Architecture
|
@@ -55,7 +57,7 @@ Copy the `template.env` file to a file named `.env` in your root directory. Add
|
|
55 |
After installing all dependencies, run the main script using:
|
56 |
|
57 |
```shell
|
58 |
-
python
|
59 |
```
|
60 |
|
61 |
A web-based Gradio interface will launch. You can upload a mavlink tlog then prompt the bot to generate plots from the log. The chatbot will process your request and generate the corresponding plot, which will be displayed in the chat interface. The script use to generate the log will also be posted to the chat interface.
|
|
|
13 |
|
14 |
MAVPlot is a Python-based project which uses Gradio as an interface and GPT-X powered by OpenAI as a chatbot to generate and plot MAVLink data. It provides an easy-to-use, chatbot-like interface for users to describe the plot they would like to generate.
|
15 |
|
16 |
+
Demo is available at: https://huggingface.co/spaces/ericjohnson97/gpt_mavplot
|
17 |
+
|
18 |

|
19 |
|
20 |
## Architecture
|
|
|
57 |
After installing all dependencies, run the main script using:
|
58 |
|
59 |
```shell
|
60 |
+
python app.py
|
61 |
```
|
62 |
|
63 |
A web-based Gradio interface will launch. You can upload a mavlink tlog then prompt the bot to generate plots from the log. The chatbot will process your request and generate the corresponding plot, which will be displayed in the chat interface. The script use to generate the log will also be posted to the chat interface.
|
app.py
CHANGED
@@ -4,36 +4,38 @@ from llm.gptPlotCreator import PlotCreator
|
|
4 |
|
5 |
plot_creator = PlotCreator()
|
6 |
|
7 |
-
def add_text(history, text):
|
8 |
history = history + [(text, None)]
|
9 |
-
return history, ""
|
10 |
|
11 |
-
def add_file(history, file):
|
|
|
12 |
history = history + [((file.name,), None)]
|
13 |
-
return history
|
14 |
|
15 |
def format_history(history):
|
16 |
return "\n".join([f"Human: {entry[0]}\nAI: {entry[1]}" for entry in history ])
|
17 |
|
18 |
-
def bot(history):
|
19 |
# Get the last input from the user
|
20 |
user_input = history[-1][0] if history and history[-1][0] else None
|
21 |
|
22 |
print(user_input)
|
|
|
23 |
|
24 |
# Check if it is a string
|
25 |
if isinstance(user_input, str):
|
26 |
|
27 |
history[-1][1] = "I am figuring out what data types are relevant for the plot...\n"
|
28 |
-
yield history
|
29 |
data_types_str = plot_creator.find_relevant_data_types(user_input)
|
30 |
|
31 |
history[-1][1] += "I am now generating a script to plot the data...\n"
|
32 |
-
yield history
|
33 |
plot_creator.create_plot(user_input, data_types_str)
|
34 |
|
35 |
history[-1][1] += "I am now running the script I just Generated...\n"
|
36 |
-
yield history
|
37 |
response = plot_creator.run_script()
|
38 |
|
39 |
history = history + [(None, f"Here is the code used to generate the plot:")]
|
@@ -41,8 +43,9 @@ def bot(history):
|
|
41 |
history = history + response[0]
|
42 |
|
43 |
|
44 |
-
yield history
|
45 |
else:
|
|
|
46 |
file_path = user_input[0]
|
47 |
plot_creator.set_logfile_name(file_path)
|
48 |
|
@@ -51,17 +54,18 @@ def bot(history):
|
|
51 |
|
52 |
history[-1][0] = f"user uploaded file: {filename}{extension}"
|
53 |
history[-1][1] = "processing file..."
|
54 |
-
yield history
|
55 |
|
56 |
data_types = plot_creator.parse_mavlink_log()
|
57 |
history = history + [(None, f"I am done processing the file. Now you can ask me to generate a plot.")]
|
58 |
-
yield history
|
59 |
|
60 |
-
return history
|
61 |
|
62 |
|
63 |
with gr.Blocks() as demo:
|
64 |
gr.Markdown("# GPT MAVPlot\n\nThis web-based tool allows users to upload mavlink tlogs in which the chat bot will use to generate plots from. It does this by creating a python script using pymavlink and matplotlib. The output includes the plot and the code used to generate it. ")
|
|
|
65 |
chatbot = gr.Chatbot([], elem_id="chatbot").style(height=750)
|
66 |
|
67 |
with gr.Row():
|
@@ -72,12 +76,14 @@ with gr.Blocks() as demo:
|
|
72 |
).style(container=False)
|
73 |
with gr.Column(scale=0.15, min_width=0):
|
74 |
btn = gr.UploadButton("📁", file_types=["file"])
|
75 |
-
|
76 |
-
|
77 |
-
|
|
|
|
|
78 |
)
|
79 |
-
btn.upload(add_file, [chatbot, btn], [chatbot]).then(
|
80 |
-
bot, chatbot, chatbot
|
81 |
)
|
82 |
|
83 |
if __name__ == "__main__":
|
|
|
4 |
|
5 |
plot_creator = PlotCreator()
|
6 |
|
7 |
+
def add_text(history, text, plot_creator):
|
8 |
history = history + [(text, None)]
|
9 |
+
return history, plot_creator, ""
|
10 |
|
11 |
+
def add_file(history, file, plot_creator):
|
12 |
+
print(type(plot_creator))
|
13 |
history = history + [((file.name,), None)]
|
14 |
+
return history, plot_creator
|
15 |
|
16 |
def format_history(history):
|
17 |
return "\n".join([f"Human: {entry[0]}\nAI: {entry[1]}" for entry in history ])
|
18 |
|
19 |
+
def bot(history, plot_creator):
|
20 |
# Get the last input from the user
|
21 |
user_input = history[-1][0] if history and history[-1][0] else None
|
22 |
|
23 |
print(user_input)
|
24 |
+
print(type(plot_creator))
|
25 |
|
26 |
# Check if it is a string
|
27 |
if isinstance(user_input, str):
|
28 |
|
29 |
history[-1][1] = "I am figuring out what data types are relevant for the plot...\n"
|
30 |
+
yield history, plot_creator
|
31 |
data_types_str = plot_creator.find_relevant_data_types(user_input)
|
32 |
|
33 |
history[-1][1] += "I am now generating a script to plot the data...\n"
|
34 |
+
yield history, plot_creator
|
35 |
plot_creator.create_plot(user_input, data_types_str)
|
36 |
|
37 |
history[-1][1] += "I am now running the script I just Generated...\n"
|
38 |
+
yield history, plot_creator
|
39 |
response = plot_creator.run_script()
|
40 |
|
41 |
history = history + [(None, f"Here is the code used to generate the plot:")]
|
|
|
43 |
history = history + response[0]
|
44 |
|
45 |
|
46 |
+
yield history, plot_creator
|
47 |
else:
|
48 |
+
plot_creator = PlotCreator() # access the state variable through `.value`
|
49 |
file_path = user_input[0]
|
50 |
plot_creator.set_logfile_name(file_path)
|
51 |
|
|
|
54 |
|
55 |
history[-1][0] = f"user uploaded file: {filename}{extension}"
|
56 |
history[-1][1] = "processing file..."
|
57 |
+
yield history, plot_creator
|
58 |
|
59 |
data_types = plot_creator.parse_mavlink_log()
|
60 |
history = history + [(None, f"I am done processing the file. Now you can ask me to generate a plot.")]
|
61 |
+
yield history, plot_creator
|
62 |
|
63 |
+
return history, plot_creator
|
64 |
|
65 |
|
66 |
with gr.Blocks() as demo:
|
67 |
gr.Markdown("# GPT MAVPlot\n\nThis web-based tool allows users to upload mavlink tlogs in which the chat bot will use to generate plots from. It does this by creating a python script using pymavlink and matplotlib. The output includes the plot and the code used to generate it. ")
|
68 |
+
plot_creator = gr.State(PlotCreator())
|
69 |
chatbot = gr.Chatbot([], elem_id="chatbot").style(height=750)
|
70 |
|
71 |
with gr.Row():
|
|
|
76 |
).style(container=False)
|
77 |
with gr.Column(scale=0.15, min_width=0):
|
78 |
btn = gr.UploadButton("📁", file_types=["file"])
|
79 |
+
|
80 |
+
var = "test"
|
81 |
+
|
82 |
+
txt.submit(add_text, [chatbot, txt, plot_creator], [chatbot, plot_creator, txt]).then(
|
83 |
+
bot, [chatbot, plot_creator], [chatbot, plot_creator]
|
84 |
)
|
85 |
+
btn.upload(add_file, [chatbot, btn, plot_creator], [chatbot, plot_creator]).then(
|
86 |
+
bot, [chatbot, plot_creator], [chatbot, plot_creator]
|
87 |
)
|
88 |
|
89 |
if __name__ == "__main__":
|
gpt_mavplot.py
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import os
|
3 |
-
from llm.gptPlotCreator import PlotCreator
|
4 |
-
|
5 |
-
plot_creator = PlotCreator()
|
6 |
-
|
7 |
-
def add_text(history, text):
|
8 |
-
history = history + [(text, None)]
|
9 |
-
return history, ""
|
10 |
-
|
11 |
-
def add_file(history, file):
|
12 |
-
history = history + [((file.name,), None)]
|
13 |
-
return history
|
14 |
-
|
15 |
-
def format_history(history):
|
16 |
-
return "\n".join([f"Human: {entry[0]}\nAI: {entry[1]}" for entry in history ])
|
17 |
-
|
18 |
-
def bot(history):
|
19 |
-
# Get the last input from the user
|
20 |
-
user_input = history[-1][0] if history and history[-1][0] else None
|
21 |
-
|
22 |
-
print(user_input)
|
23 |
-
|
24 |
-
# Check if it is a string
|
25 |
-
if isinstance(user_input, str):
|
26 |
-
|
27 |
-
history[-1][1] = "I am figuring out what data types are relevant for the plot...\n"
|
28 |
-
yield history
|
29 |
-
data_types_str = plot_creator.find_relevant_data_types(user_input)
|
30 |
-
|
31 |
-
history[-1][1] += "I am now generating a script to plot the data...\n"
|
32 |
-
yield history
|
33 |
-
plot_creator.create_plot(user_input, data_types_str)
|
34 |
-
|
35 |
-
history[-1][1] += "I am now running the script I just Generated...\n"
|
36 |
-
yield history
|
37 |
-
response = plot_creator.run_script()
|
38 |
-
|
39 |
-
history = history + [(None, f"Here is the code used to generate the plot:")]
|
40 |
-
history = history + [(None, f"{response[1]}")]
|
41 |
-
history = history + response[0]
|
42 |
-
|
43 |
-
|
44 |
-
yield history
|
45 |
-
else:
|
46 |
-
file_path = user_input[0]
|
47 |
-
plot_creator.set_logfile_name(file_path)
|
48 |
-
|
49 |
-
# get only base name
|
50 |
-
filename, extension = os.path.splitext(os.path.basename(file_path))
|
51 |
-
|
52 |
-
history[-1][0] = f"user uploaded file: {filename}{extension}"
|
53 |
-
history[-1][1] = "processing file..."
|
54 |
-
yield history
|
55 |
-
|
56 |
-
data_types = plot_creator.parse_mavlink_log()
|
57 |
-
history = history + [(None, f"I am done processing the file. Now you can ask me to generate a plot.")]
|
58 |
-
yield history
|
59 |
-
|
60 |
-
return history
|
61 |
-
|
62 |
-
|
63 |
-
with gr.Blocks() as demo:
|
64 |
-
gr.Markdown("# GPT MAVPlot\n\nThis web-based tool allows users to upload mavlink tlogs in which the chat bot will use to generate plots from. It does this by creating a python script using pymavlink and matplotlib. The output includes the plot and the code used to generate it. ")
|
65 |
-
chatbot = gr.Chatbot([], elem_id="chatbot").style(height=750)
|
66 |
-
|
67 |
-
with gr.Row():
|
68 |
-
with gr.Column(scale=0.85):
|
69 |
-
txt = gr.Textbox(
|
70 |
-
show_label=False,
|
71 |
-
placeholder="Enter text and press enter, or upload an image",
|
72 |
-
).style(container=False)
|
73 |
-
with gr.Column(scale=0.15, min_width=0):
|
74 |
-
btn = gr.UploadButton("📁", file_types=["file"])
|
75 |
-
|
76 |
-
txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then(
|
77 |
-
bot, chatbot, chatbot
|
78 |
-
)
|
79 |
-
btn.upload(add_file, [chatbot, btn], [chatbot]).then(
|
80 |
-
bot, chatbot, chatbot
|
81 |
-
)
|
82 |
-
|
83 |
-
if __name__ == "__main__":
|
84 |
-
demo.queue()
|
85 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llm/gptPlotCreator.py
CHANGED
@@ -44,8 +44,8 @@ class PlotCreator:
|
|
44 |
|
45 |
# define the input variables and template for the prompt to generate Python scripts
|
46 |
mavlink_data_prompt = PromptTemplate(
|
47 |
-
input_variables=["data_types", "history", "human_input", "file"],
|
48 |
-
template="You are an AI conversation agent that will be used for generating python scripts to plot mavlink data provided by the user. Please create a python script using matplotlib and pymavlink's mavutil to plot the data provided by the user. Please do not explain the code just return the script. Please plot each independent variable over time in seconds. Please save the plot to file named
|
49 |
)
|
50 |
|
51 |
# create an instance of LLMChain with the defined prompt and verbosity
|
@@ -122,7 +122,7 @@ class PlotCreator:
|
|
122 |
|
123 |
# run the fixed script
|
124 |
try:
|
125 |
-
subprocess.check_output(["python",
|
126 |
except:
|
127 |
code[0] = "Sorry I was unable to fix the script.\nThis is my attempt to fix it:\n\n" + code[0]
|
128 |
return code
|
@@ -134,7 +134,13 @@ class PlotCreator:
|
|
134 |
:param filename: The name of the log file.
|
135 |
:type filename: str
|
136 |
"""
|
|
|
|
|
|
|
137 |
self.logfile_name = filename
|
|
|
|
|
|
|
138 |
|
139 |
def find_relevant_data_types(self, human_input):
|
140 |
# Search the database for documents that are similar to the human input
|
@@ -150,20 +156,20 @@ class PlotCreator:
|
|
150 |
def run_script(self):
|
151 |
# Run the script and if it doesn't work, capture the output and call attempt_to_fix_script
|
152 |
try:
|
153 |
-
subprocess.check_output(["python",
|
154 |
except subprocess.CalledProcessError as e:
|
155 |
print(e.output.decode())
|
156 |
-
code = self.attempt_to_fix_sctript(
|
157 |
self.last_code = code[0]
|
158 |
|
159 |
except Exception as e:
|
160 |
print(e)
|
161 |
-
code = self.attempt_to_fix_sctript(
|
162 |
self.last_code = code[0]
|
163 |
|
164 |
|
165 |
# Return a list containing the filename of the plot and the code used to generate it
|
166 |
-
return [[(None, (
|
167 |
|
168 |
def create_plot(self, human_input, data_type_info_text):
|
169 |
"""
|
@@ -181,14 +187,14 @@ class PlotCreator:
|
|
181 |
|
182 |
|
183 |
# Generate a response by running the chain with the relevant data types, history, file name and human input
|
184 |
-
response = self.chain.run({"data_types" : data_type_info_text, "history" : history, "file": self.logfile_name, "human_input": human_input})
|
185 |
print(response)
|
186 |
|
187 |
# Parse the code from the response
|
188 |
code = self.extract_code_snippets(response)
|
189 |
|
190 |
# Write the code to a file named "plot.py"
|
191 |
-
self.write_plot_script(
|
192 |
|
193 |
# Store the code for the next iteration
|
194 |
self.last_code = code[0]
|
|
|
44 |
|
45 |
# define the input variables and template for the prompt to generate Python scripts
|
46 |
mavlink_data_prompt = PromptTemplate(
|
47 |
+
input_variables=["data_types", "history", "human_input", "file", "output_file"],
|
48 |
+
template="You are an AI conversation agent that will be used for generating python scripts to plot mavlink data provided by the user. Please create a python script using matplotlib and pymavlink's mavutil to plot the data provided by the user. Please do not explain the code just return the script. Please plot each independent variable over time in seconds. Please save the plot to file named {output_file} with at least 400 dpi. please use blocking=false in your call to recv_match and be sure to break the loop if a msg in None. here are the relevant data types in the log:\n\n{data_types} \n\nChat History:\n{history} \n\nHUMAN: {human_input} \n\nplease read this data from the file {file}.",
|
49 |
)
|
50 |
|
51 |
# create an instance of LLMChain with the defined prompt and verbosity
|
|
|
122 |
|
123 |
# run the fixed script
|
124 |
try:
|
125 |
+
subprocess.check_output(["python", self.script_path], stderr=subprocess.STDOUT)
|
126 |
except:
|
127 |
code[0] = "Sorry I was unable to fix the script.\nThis is my attempt to fix it:\n\n" + code[0]
|
128 |
return code
|
|
|
134 |
:param filename: The name of the log file.
|
135 |
:type filename: str
|
136 |
"""
|
137 |
+
# extract the path to the log file
|
138 |
+
|
139 |
+
path = os.path.dirname(filename)
|
140 |
self.logfile_name = filename
|
141 |
+
self.script_path = os.path.join(path, "plot.py")
|
142 |
+
self.plot_path = os.path.join(path, "plot.png")
|
143 |
+
|
144 |
|
145 |
def find_relevant_data_types(self, human_input):
|
146 |
# Search the database for documents that are similar to the human input
|
|
|
156 |
def run_script(self):
|
157 |
# Run the script and if it doesn't work, capture the output and call attempt_to_fix_script
|
158 |
try:
|
159 |
+
subprocess.check_output(["python", self.script_path], stderr=subprocess.STDOUT)
|
160 |
except subprocess.CalledProcessError as e:
|
161 |
print(e.output.decode())
|
162 |
+
code = self.attempt_to_fix_sctript(self.script_path, e.output.decode())
|
163 |
self.last_code = code[0]
|
164 |
|
165 |
except Exception as e:
|
166 |
print(e)
|
167 |
+
code = self.attempt_to_fix_sctript(self.script_path, str(e))
|
168 |
self.last_code = code[0]
|
169 |
|
170 |
|
171 |
# Return a list containing the filename of the plot and the code used to generate it
|
172 |
+
return [[(None, (self.plot_path,))], self.last_code]
|
173 |
|
174 |
def create_plot(self, human_input, data_type_info_text):
|
175 |
"""
|
|
|
187 |
|
188 |
|
189 |
# Generate a response by running the chain with the relevant data types, history, file name and human input
|
190 |
+
response = self.chain.run({"data_types" : data_type_info_text, "history" : history, "file": self.logfile_name, "human_input": human_input, "output_file": self.plot_path})
|
191 |
print(response)
|
192 |
|
193 |
# Parse the code from the response
|
194 |
code = self.extract_code_snippets(response)
|
195 |
|
196 |
# Write the code to a file named "plot.py"
|
197 |
+
self.write_plot_script(self.script_path, code[0])
|
198 |
|
199 |
# Store the code for the next iteration
|
200 |
self.last_code = code[0]
|