Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,330 +1,354 @@
|
|
1 |
import os
|
2 |
import subprocess
|
3 |
-
import
|
4 |
-
from
|
5 |
-
import
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
"""
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
""
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
{
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
action:
|
69 |
-
"""
|
70 |
-
|
71 |
-
TASK_PROMPT = """
|
72 |
-
You are a helpful AI assistant. Your current history is:
|
73 |
-
{history}
|
74 |
-
What is the next task?
|
75 |
-
task:
|
76 |
-
"""
|
77 |
-
|
78 |
-
UNDERSTAND_TEST_RESULTS_PROMPT = """
|
79 |
-
You are a helpful AI assistant. The test results are:
|
80 |
-
{test_results}
|
81 |
-
What do you want to know about the test results?
|
82 |
-
thought:
|
83 |
-
"""
|
84 |
-
|
85 |
-
def format_prompt(message, history, max_history_turns=5):
|
86 |
-
prompt = "<s>"
|
87 |
-
# Keep only the last 'max_history_turns' turns
|
88 |
-
for user_prompt, bot_response in history[-max_history_turns:]:
|
89 |
-
prompt += f"[INST] {user_prompt} [/INST]"
|
90 |
-
prompt += f" {bot_response}</s> "
|
91 |
-
prompt += f"[INST] {message} [/INST]"
|
92 |
-
return prompt
|
93 |
-
|
94 |
-
def run_gpt(
|
95 |
-
prompt_template,
|
96 |
-
stop_tokens,
|
97 |
-
max_tokens,
|
98 |
-
purpose,
|
99 |
-
**prompt_kwargs,
|
100 |
-
):
|
101 |
-
seed = random.randint(1,1111111111111111)
|
102 |
-
logging.info(f"Seed: {seed}") # Log the seed
|
103 |
-
|
104 |
-
content = PREFIX.format(
|
105 |
-
date_time_str=date_time_str,
|
106 |
-
purpose=purpose,
|
107 |
-
safe_search=safe_search,
|
108 |
-
) + prompt_template.format(**prompt_kwargs)
|
109 |
-
if VERBOSE:
|
110 |
-
logging.info(LOG_PROMPT.format(content)) # Log the prompt
|
111 |
-
|
112 |
-
resp = client.text_generation(content, max_new_tokens=max_tokens, stop_sequences=stop_tokens, temperature=0.7, top_p=0.8, repetition_penalty=1.5)
|
113 |
-
if VERBOSE:
|
114 |
-
logging.info(LOG_RESPONSE.format(resp=resp)) # Log the response
|
115 |
-
return resp
|
116 |
-
|
117 |
-
def generate(
|
118 |
-
prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.7, max_new_tokens=2048, top_p=0.8, repetition_penalty=1.5, model="mistralai/Mixtral-8x7B-Instruct-v0.1"
|
119 |
-
):
|
120 |
-
seed = random.randint(1,1111111111111111)
|
121 |
-
|
122 |
-
if agent_name == "WEB_DEV":
|
123 |
-
agent = "You are a helpful AI assistant. You are a web developer."
|
124 |
-
elif agent_name == "AI_SYSTEM_PROMPT":
|
125 |
-
agent = "You are a helpful AI assistant. You are an AI system."
|
126 |
-
elif agent_name == "PYTHON_CODE_DEV":
|
127 |
-
agent = "You are a helpful AI assistant. You are a Python code developer."
|
128 |
else:
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
)
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
return action_name, action_input, history, task
|
205 |
-
else:
|
206 |
-
history += "{}\n".format(line)
|
207 |
-
logging.info(f"Other Output: {line}")
|
208 |
-
return "MAIN", None, history, task
|
209 |
-
|
210 |
-
def call_set_task(purpose, task, history, directory, action_input):
|
211 |
-
logging.info(f"CALLING SET_TASK: {action_input}")
|
212 |
-
task = run_gpt(
|
213 |
-
TASK_PROMPT,
|
214 |
-
stop_tokens=[],
|
215 |
-
max_tokens=64,
|
216 |
-
purpose=purpose,
|
217 |
-
task=task,
|
218 |
-
history=history,
|
219 |
-
).strip("\n")
|
220 |
-
history += "observation: task has been updated to: {}\n".format(task)
|
221 |
-
return "MAIN", None, history, task
|
222 |
-
|
223 |
-
def end_fn(purpose, task, history, directory, action_input):
|
224 |
-
logging.info(f"CALLING END_FN: {action_input}")
|
225 |
-
task = "END"
|
226 |
-
return "COMPLETE", "COMPLETE", history, task
|
227 |
-
|
228 |
-
NAME_TO_FUNC = {
|
229 |
-
"MAIN": call_main,
|
230 |
-
"UPDATE-TASK": call_set_task,
|
231 |
-
"SEARCH": call_search,
|
232 |
-
"COMPLETE": end_fn,
|
233 |
-
}
|
234 |
-
|
235 |
-
def run_action(purpose, task, history, directory, action_name, action_input):
|
236 |
-
logging.info(f"RUNNING ACTION: {action_name} - {action_input}")
|
237 |
-
try:
|
238 |
-
if "RESPONSE" in action_name or "COMPLETE" in action_name:
|
239 |
-
action_name="COMPLETE"
|
240 |
-
task="END"
|
241 |
-
return action_name, "COMPLETE", history, task
|
242 |
-
|
243 |
-
# compress the history when it is long
|
244 |
-
if len(history.split("\n")) > MAX_HISTORY:
|
245 |
-
logging.info("COMPRESSING HISTORY")
|
246 |
-
history = compress_history(purpose, task, history, directory)
|
247 |
-
if not action_name in NAME_TO_FUNC:
|
248 |
-
action_name="MAIN"
|
249 |
-
if action_name == "" or action_name == None:
|
250 |
-
action_name="MAIN"
|
251 |
-
assert action_name in NAME_TO_FUNC
|
252 |
-
|
253 |
-
logging.info(f"RUN: {action_name} - {action_input}")
|
254 |
-
return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
|
255 |
-
except Exception as e:
|
256 |
-
history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n"
|
257 |
-
logging.error(f"Error in run_action: {e}")
|
258 |
-
return "MAIN", None, history, task
|
259 |
-
|
260 |
-
def run(purpose, history):
|
261 |
-
task = None
|
262 |
-
directory = "./"
|
263 |
-
if history:
|
264 |
-
history = str(history).strip("[]")
|
265 |
-
if not history:
|
266 |
-
history = ""
|
267 |
-
|
268 |
-
action_name = "UPDATE-TASK" if task is None else "MAIN"
|
269 |
-
action_input = None
|
270 |
-
while True:
|
271 |
-
logging.info(f"---")
|
272 |
-
logging.info(f"Purpose: {purpose}")
|
273 |
-
logging.info(f"Task: {task}")
|
274 |
-
logging.info(f"---")
|
275 |
-
logging.info(f"History: {history}")
|
276 |
-
logging.info(f"---")
|
277 |
-
|
278 |
-
action_name, action_input, history, task = run_action(
|
279 |
-
purpose,
|
280 |
-
task,
|
281 |
-
history,
|
282 |
-
directory,
|
283 |
-
action_name,
|
284 |
-
action_input,
|
285 |
-
)
|
286 |
-
yield (history)
|
287 |
-
if task == "END":
|
288 |
-
return (history)
|
289 |
-
|
290 |
-
def generate_text_chunked(input_text, model, generation_parameters, max_tokens_to_generate):
|
291 |
-
"""Generates text in chunks to avoid token limit errors."""
|
292 |
-
sentences = nltk.sent_tokenize(input_text)
|
293 |
-
generated_text = []
|
294 |
-
generator = pipeline('text-generation', model=model)
|
295 |
-
|
296 |
-
for sentence in sentences:
|
297 |
-
# Tokenize the sentence and check if it's within the limit
|
298 |
-
tokens = generator.tokenizer(sentence).input_ids
|
299 |
-
if len(tokens) + max_tokens_to_generate <= 32768:
|
300 |
-
# Generate text for this chunk
|
301 |
-
response = generator(sentence, max_length=max_tokens_to_generate, **generation_parameters)
|
302 |
-
generated_text.append(response[0]['generated_text'])
|
303 |
-
else:
|
304 |
-
# Handle cases where the sentence is too long
|
305 |
-
print(f"Sentence too long: {sentence}")
|
306 |
-
|
307 |
-
return ''.join(generated_text)
|
308 |
-
|
309 |
-
# Gradio Interface
|
310 |
-
def gradio_interface(purpose, history):
|
311 |
-
try:
|
312 |
-
history = json.loads(history) if history else []
|
313 |
-
except json.JSONDecodeError:
|
314 |
-
history = []
|
315 |
-
result = run(purpose, history)
|
316 |
-
return next(result)
|
317 |
-
|
318 |
-
iface = gr.Interface(
|
319 |
-
fn=gradio_interface,
|
320 |
-
inputs=[
|
321 |
-
gr.Textbox(lines=2, placeholder="Enter the purpose here..."),
|
322 |
-
gr.Textbox(lines=10, placeholder="Enter the history here (JSON format)...")
|
323 |
-
],
|
324 |
-
outputs="text",
|
325 |
-
title="AI Assistant",
|
326 |
-
description="An AI assistant that helps with various tasks."
|
327 |
-
)
|
328 |
|
329 |
if __name__ == "__main__":
|
330 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import subprocess
|
3 |
+
import streamlit as st
|
4 |
+
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
5 |
+
import openai
|
6 |
+
|
7 |
+
# Constants
|
8 |
+
HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
|
9 |
+
PROJECT_ROOT = "projects"
|
10 |
+
AGENT_DIRECTORY = "agents"
|
11 |
+
|
12 |
+
# Initialize session state
|
13 |
+
if 'chat_history' not in st.session_state:
|
14 |
+
st.session_state.chat_history = []
|
15 |
+
if 'terminal_history' not in st.session_state:
|
16 |
+
st.session_state.terminal_history = []
|
17 |
+
if 'workspace_projects' not in st.session_state:
|
18 |
+
st.session_state.workspace_projects = {}
|
19 |
+
if 'available_agents' not in st.session_state:
|
20 |
+
st.session_state.available_agents = []
|
21 |
+
if 'current_state' not in st.session_state:
|
22 |
+
st.session_state.current_state = {
|
23 |
+
'toolbox': {},
|
24 |
+
'workspace_chat': {}
|
25 |
+
}
|
26 |
+
|
27 |
+
# AI Agent class
|
28 |
+
class AIAgent:
|
29 |
+
def __init__(self, name, description, skills):
|
30 |
+
self.name = name
|
31 |
+
self.description = description
|
32 |
+
self.skills = skills
|
33 |
+
|
34 |
+
def create_agent_prompt(self):
|
35 |
+
skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
|
36 |
+
agent_prompt = f"""
|
37 |
+
As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
|
38 |
+
{skills_str}
|
39 |
+
I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
|
40 |
+
"""
|
41 |
+
return agent_prompt
|
42 |
+
|
43 |
+
def autonomous_build(self, chat_history, workspace_projects):
|
44 |
+
summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
|
45 |
+
summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
|
46 |
+
next_step = "Based on the current state, the next logical step is to implement the main application logic."
|
47 |
+
return summary, next_step
|
48 |
+
|
49 |
+
# Functions for agent management
|
50 |
+
def save_agent_to_file(agent):
|
51 |
+
if not os.path.exists(AGENT_DIRECTORY):
|
52 |
+
os.makedirs(AGENT_DIRECTORY)
|
53 |
+
file_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}.txt")
|
54 |
+
config_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}Config.txt")
|
55 |
+
with open(file_path, "w") as file:
|
56 |
+
file.write(agent.create_agent_prompt())
|
57 |
+
with open(config_path, "w") as file:
|
58 |
+
file.write(f"Agent Name: {agent.name}\nDescription: {agent.description}")
|
59 |
+
st.session_state.available_agents.append(agent.name)
|
60 |
+
commit_and_push_changes(f"Add agent {agent.name}")
|
61 |
+
|
62 |
+
def load_agent_prompt(agent_name):
|
63 |
+
file_path = os.path.join(AGENT_DIRECTORY, f"{agent_name}.txt")
|
64 |
+
if os.path.exists(file_path):
|
65 |
+
with open(file_path, "r") as file:
|
66 |
+
agent_prompt = file.read()
|
67 |
+
return agent_prompt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
else:
|
69 |
+
return None
|
70 |
+
|
71 |
+
def create_agent_from_text(name, text):
|
72 |
+
skills = text.split('\n')
|
73 |
+
agent = AIAgent(name, "AI agent created from text input.", skills)
|
74 |
+
save_agent_to_file(agent)
|
75 |
+
return agent.create_agent_prompt()
|
76 |
+
|
77 |
+
# OpenAI GPT-3 API setup for text generation
|
78 |
+
openai.api_key = st.secrets["OPENAI_API_KEY"]
|
79 |
+
|
80 |
+
# Initialize the Hugging Face model and tokenizer
|
81 |
+
model_name = "gpt2"
|
82 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
83 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
84 |
+
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
|
85 |
+
|
86 |
+
# Tool Box UI elements
|
87 |
+
def toolbox():
|
88 |
+
st.header("Tool Box")
|
89 |
+
|
90 |
+
# List available agents
|
91 |
+
for agent in st.session_state.available_agents:
|
92 |
+
st.markdown(f"### {agent}")
|
93 |
+
st.write(agent.description)
|
94 |
+
if st.button(f'Chat with {agent}'):
|
95 |
+
chat_with_agent(agent)
|
96 |
+
|
97 |
+
# Add new agents
|
98 |
+
if st.session_state['toolbox'].get('new_agent') is None:
|
99 |
+
st.session_state['toolbox']['new_agent'] = {}
|
100 |
+
|
101 |
+
st.text_input("Agent Name", key='name', on_change=update_agent)
|
102 |
+
st.text_area("Agent Description", key='description', on_change=update_agent)
|
103 |
+
st.text_input("Skills (comma-separated)", key='skills', on_change=update_agent)
|
104 |
+
|
105 |
+
if st.button('Create New Agent'):
|
106 |
+
skills = [s.strip() for s in st.session_state['toolbox']['new_agent'].get('skills', '').split(',')]
|
107 |
+
new_agent = AIAgent(st.session_state['toolbox']['new_agent'].get('name'),
|
108 |
+
st.session_state['toolbox']['new_agent'].get('description'), skills)
|
109 |
+
st.session_state.available_agents.append(new_agent)
|
110 |
+
|
111 |
+
def update_agent():
|
112 |
+
st.session_state['toolbox']['new_agent'] = {
|
113 |
+
'name': st.session_state.name,
|
114 |
+
'description': st.session_state.description,
|
115 |
+
'skills': st.session_state.skills
|
116 |
+
}
|
117 |
+
|
118 |
+
def chat_with_agent(agent_name):
|
119 |
+
st.subheader(f"Chat with {agent_name}")
|
120 |
+
chat_input = st.text_area("Enter your message:")
|
121 |
+
if st.button("Send"):
|
122 |
+
chat_response = chat_interface_with_agent(chat_input, agent_name)
|
123 |
+
st.session_state.chat_history.append((chat_input, chat_response))
|
124 |
+
st.write(f"{agent_name}: {chat_response}")
|
125 |
+
|
126 |
+
# Workspace UI elements
|
127 |
+
def workspace():
|
128 |
+
st.header("Workspace")
|
129 |
+
|
130 |
+
# Project selection and interaction
|
131 |
+
for project, details in st.session_state.workspace_projects.items():
|
132 |
+
st.write(f"Project: {project}")
|
133 |
+
for file in details['files']:
|
134 |
+
st.write(f" - {file}")
|
135 |
+
|
136 |
+
if st.button('Add New Project'):
|
137 |
+
new_project = {'name': '', 'description': '', 'files': []}
|
138 |
+
st.session_state.workspace_projects[new_project['name']] = new_project
|
139 |
+
|
140 |
+
# Main function to display the app
|
141 |
+
def main():
|
142 |
+
toolbox()
|
143 |
+
workspace()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
|
145 |
if __name__ == "__main__":
|
146 |
+
main()
|
147 |
+
|
148 |
+
# Additional functionalities
|
149 |
+
def commit_and_push_changes(commit_message):
|
150 |
+
commands = [
|
151 |
+
"git add .",
|
152 |
+
f"git commit -m '{commit_message}'",
|
153 |
+
"git push"
|
154 |
+
]
|
155 |
+
for command in commands:
|
156 |
+
result = subprocess.run(command, shell=True, capture_output=True, text=True)
|
157 |
+
if result.returncode != 0:
|
158 |
+
st.error(f"Error executing command '{command}': {result.stderr}")
|
159 |
+
break
|
160 |
+
|
161 |
+
def chat_interface_with_agent(input_text, agent_name):
|
162 |
+
agent_prompt = load_agent_prompt(agent_name)
|
163 |
+
if agent_prompt is None:
|
164 |
+
return f"Agent {agent_name} not found."
|
165 |
+
|
166 |
+
combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
|
167 |
+
max_input_length = 900
|
168 |
+
input_ids = tokenizer.encode(combined_input, return_tensors="pt")
|
169 |
+
if input_ids.shape[1] > max_input_length:
|
170 |
+
input_ids = input_ids[:, :max_input_length]
|
171 |
+
|
172 |
+
outputs = model.generate(input_ids, max_new_tokens=50, num_return_sequences=1, do_sample=True, pad_token_id=tokenizer.eos_token_id)
|
173 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
174 |
+
return response
|
175 |
+
|
176 |
+
def workspace_interface(project_name):
|
177 |
+
project_path = os.path.join(PROJECT_ROOT, project_name)
|
178 |
+
if not os.path.exists(PROJECT_ROOT):
|
179 |
+
os.makedirs(PROJECT_ROOT)
|
180 |
+
if not os.path.exists(project_path):
|
181 |
+
os.makedirs(project_path)
|
182 |
+
st.session_state.workspace_projects[project_name] = {"files": []}
|
183 |
+
st.session_state.current_state['workspace_chat']['project_name'] = project_name
|
184 |
+
commit_and_push_changes(f"Create project {project_name}")
|
185 |
+
return f"Project {project_name} created successfully."
|
186 |
+
else:
|
187 |
+
return f"Project {project_name} already exists."
|
188 |
+
|
189 |
+
def add_code_to_workspace(project_name, code, file_name):
|
190 |
+
project_path = os.path.join(PROJECT_ROOT, project_name)
|
191 |
+
if os.path.exists(project_path):
|
192 |
+
file_path = os.path.join(project_path, file_name)
|
193 |
+
with open(file_path, "w") as file:
|
194 |
+
file.write(code)
|
195 |
+
st.session_state.workspace_projects[project_name]["files"].append(file_name)
|
196 |
+
st.session_state.current_state['workspace_chat']['added_code'] = {"file_name": file_name, "code": code}
|
197 |
+
commit_and_push_changes(f"Add code to {file_name} in project {project_name}")
|
198 |
+
return f"Code added to {file_name} in project {project_name} successfully."
|
199 |
+
else:
|
200 |
+
return f"Project {project_name} does not exist."
|
201 |
+
|
202 |
+
def terminal_interface(command, project_name=None):
|
203 |
+
if project_name:
|
204 |
+
project_path = os.path.join(PROJECT_ROOT, project_name)
|
205 |
+
if not os.path.exists(project_path):
|
206 |
+
return f"Project {project_name} does not exist."
|
207 |
+
result = subprocess.run(command, cwd=project_path, shell=True, capture_output=True, text=True)
|
208 |
+
else:
|
209 |
+
result = subprocess.run(command, shell=True, capture_output=True, text=True)
|
210 |
+
if result.returncode == 0:
|
211 |
+
st.session_state.current_state['toolbox']['terminal_output'] = result.stdout
|
212 |
+
return result.stdout
|
213 |
+
else:
|
214 |
+
st.session_state.current_state['toolbox']['terminal_output'] = result.stderr
|
215 |
+
return result.stderr
|
216 |
+
|
217 |
+
def summarize_text(text):
|
218 |
+
summarizer = pipeline("summarization")
|
219 |
+
summary = summarizer(text, max_length=50, min_length=25, do_sample=False)
|
220 |
+
st.session_state.current_state['toolbox']['summary'] = summary[0]['summary_text']
|
221 |
+
return summary[0]['summary_text']
|
222 |
+
|
223 |
+
def sentiment_analysis(text):
|
224 |
+
analyzer = pipeline("sentiment-analysis")
|
225 |
+
sentiment = analyzer(text)
|
226 |
+
st.session_state.current_state['toolbox']['sentiment'] = sentiment[0]
|
227 |
+
return sentiment[0]
|
228 |
+
|
229 |
+
def generate_code(code_idea):
|
230 |
+
response = openai.ChatCompletion.create(
|
231 |
+
model="gpt-4",
|
232 |
+
messages=[
|
233 |
+
{"role": "system", "content": "You are an expert software developer."},
|
234 |
+
{"role": "user", "content": f"Generate a Python code snippet for the following idea:\n\n{code_idea}"}
|
235 |
+
]
|
236 |
+
)
|
237 |
+
generated_code = response.choices[0].message['content'].strip()
|
238 |
+
st.session_state.current_state['toolbox']['generated_code'] = generated_code
|
239 |
+
return generated_code
|
240 |
+
|
241 |
+
def translate_code(code, input_language, output_language):
|
242 |
+
language_extensions = {
|
243 |
+
"Python": ".py",
|
244 |
+
"JavaScript": ".js",
|
245 |
+
# Add more languages and their extensions here
|
246 |
+
}
|
247 |
+
if input_language not in language_extensions:
|
248 |
+
raise ValueError(f"Invalid input language: {input_language}")
|
249 |
+
if output_language not in language_extensions:
|
250 |
+
raise ValueError(f"Invalid output language: {output_language}")
|
251 |
+
|
252 |
+
prompt = f"Translate this code from {input_language} to {output_language}:\n\n{code}"
|
253 |
+
response = openai.ChatCompletion.create(
|
254 |
+
model="gpt-4",
|
255 |
+
messages=[
|
256 |
+
{"role": "system", "content": "You are an expert software developer."},
|
257 |
+
{"role": "user", "content": prompt}
|
258 |
+
]
|
259 |
+
)
|
260 |
+
translated_code = response.choices[0].message['content'].strip()
|
261 |
+
st.session_state.current_state['toolbox']['translated_code'] = translated_code
|
262 |
+
return translated_code
|
263 |
+
|
264 |
+
# Streamlit App
|
265 |
+
st.title("AI Agent Creator")
|
266 |
+
|
267 |
+
# Sidebar navigation
|
268 |
+
st.sidebar.title("Navigation")
|
269 |
+
app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
|
270 |
+
|
271 |
+
if app_mode == "AI Agent Creator":
|
272 |
+
# AI Agent Creator
|
273 |
+
st.header("Create an AI Agent from Text")
|
274 |
+
|
275 |
+
st.subheader("From Text")
|
276 |
+
agent_name = st.text_input("Enter agent name:")
|
277 |
+
text_input = st.text_area("Enter skills (one per line):")
|
278 |
+
if st.button("Create Agent"):
|
279 |
+
agent_prompt = create_agent_from_text(agent_name, text_input)
|
280 |
+
st.success(f"Agent '{agent_name}' created and saved successfully.")
|
281 |
+
st.session_state.available_agents.append(agent_name)
|
282 |
+
|
283 |
+
elif app_mode == "Tool Box":
|
284 |
+
# Tool Box
|
285 |
+
st.header("AI-Powered Tools")
|
286 |
+
|
287 |
+
# Chat Interface
|
288 |
+
st.subheader("Chat with CodeCraft")
|
289 |
+
chat_input = st.text_area("Enter your message:")
|
290 |
+
if st.button("Send"):
|
291 |
+
if chat_input.startswith("@"):
|
292 |
+
agent_name = chat_input.split(" ")[0][1:] # Extract agent_name from @agent_name
|
293 |
+
chat_input = " ".join(chat_input.split(" ")[1:]) # Remove agent_name from input
|
294 |
+
chat_response = chat_interface_with_agent(chat_input, agent_name)
|
295 |
+
st.session_state.chat_history.append((chat_input, chat_response))
|
296 |
+
st.write(f"{agent_name}: {chat_response}")
|
297 |
+
|
298 |
+
# Code Generation
|
299 |
+
st.subheader("Generate Code")
|
300 |
+
code_idea = st.text_area("Enter your code idea:")
|
301 |
+
if st.button("Generate Code"):
|
302 |
+
generated_code = generate_code(code_idea)
|
303 |
+
st.code(generated_code, language='python')
|
304 |
+
|
305 |
+
# Code Translation
|
306 |
+
st.subheader("Translate Code")
|
307 |
+
code = st.text_area("Enter your code:")
|
308 |
+
input_language = st.selectbox("Input Language", ["Python", "JavaScript"])
|
309 |
+
output_language = st.selectbox("Output Language", ["Python", "JavaScript"])
|
310 |
+
if st.button("Translate Code"):
|
311 |
+
translated_code = translate_code(code, input_language, output_language)
|
312 |
+
st.code(translated_code, language=output_language.lower())
|
313 |
+
|
314 |
+
# Summarization
|
315 |
+
st.subheader("Summarize Text")
|
316 |
+
text_to_summarize = st.text_area("Enter text to summarize:")
|
317 |
+
if st.button("Summarize"):
|
318 |
+
summary = summarize_text(text_to_summarize)
|
319 |
+
st.write(summary)
|
320 |
+
|
321 |
+
# Sentiment Analysis
|
322 |
+
st.subheader("Sentiment Analysis")
|
323 |
+
text_to_analyze = st.text_area("Enter text for sentiment analysis:")
|
324 |
+
if st.button("Analyze Sentiment"):
|
325 |
+
sentiment = sentiment_analysis(text_to_analyze)
|
326 |
+
st.write(sentiment)
|
327 |
+
|
328 |
+
elif app_mode == "Workspace Chat App":
|
329 |
+
# Workspace Chat App
|
330 |
+
st.header("Workspace Chat App")
|
331 |
+
|
332 |
+
# Project Management
|
333 |
+
st.subheader("Manage Projects")
|
334 |
+
project_name = st.text_input("Enter project name:")
|
335 |
+
if st.button("Create Project"):
|
336 |
+
project_message = workspace_interface(project_name)
|
337 |
+
st.success(project_message)
|
338 |
+
|
339 |
+
# Add Code to Project
|
340 |
+
st.subheader("Add Code to Project")
|
341 |
+
project_name_for_code = st.text_input("Enter project name for code:")
|
342 |
+
code_content = st.text_area("Enter code content:")
|
343 |
+
file_name = st.text_input("Enter file name:")
|
344 |
+
if st.button("Add Code"):
|
345 |
+
add_code_message = add_code_to_workspace(project_name_for_code, code_content, file_name)
|
346 |
+
st.success(add_code_message)
|
347 |
+
|
348 |
+
# Terminal Interface
|
349 |
+
st.subheader("Terminal Interface")
|
350 |
+
terminal_command = st.text_area("Enter terminal command:")
|
351 |
+
project_name_for_terminal = st.text_input("Enter project name for terminal (optional):")
|
352 |
+
if st.button("Run Command"):
|
353 |
+
terminal_output = terminal_interface(terminal_command, project_name_for_terminal)
|
354 |
+
st.text(terminal_output)
|