Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -130,7 +130,7 @@ def get_moon_info(date_time: str) -> dict:
|
|
130 |
# Calculate Moon's ecliptic longitude
|
131 |
astrometric = earth.at(t).observe(moon)
|
132 |
ecliptic_lat, ecliptic_lon, distance = astrometric.ecliptic_latlon()
|
133 |
-
|
134 |
|
135 |
# Calculate the phase angle using almanac.moon_phase
|
136 |
phase = almanac.moon_phase(planets, t)
|
@@ -238,12 +238,12 @@ memory = SimpleMemory(
|
|
238 |
prompt_templates = {
|
239 |
"main_prompt": """
|
240 |
Current state:
|
241 |
-
- location_provided: {memory
|
242 |
-
- plant: {memory
|
243 |
-
- root_crop: {memory
|
244 |
-
- location_cautions: {memory
|
245 |
-
- answer: {memory
|
246 |
-
- last_question: {memory
|
247 |
|
248 |
User's input: {input}
|
249 |
|
@@ -270,14 +270,13 @@ model = HfApiModel(
|
|
270 |
)
|
271 |
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
|
272 |
|
273 |
-
# Initialize the agent
|
274 |
agent = CodeAgent(
|
275 |
model=model,
|
276 |
tools=[final_answer, get_moon_info, get_current_time_in_timezone, get_current_time_raw, image_generation_tool],
|
277 |
max_steps=10,
|
278 |
verbosity_level=1,
|
279 |
prompt_templates=prompt_templates,
|
280 |
-
memory=memory
|
281 |
)
|
282 |
|
283 |
# Conversation handler for multi-turn interactions
|
@@ -302,7 +301,15 @@ def conversation_handler(user_input, history):
|
|
302 |
memory["location_cautions"] = ""
|
303 |
memory["last_question"] = None
|
304 |
|
305 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
306 |
if "Action: Ask user" in output:
|
307 |
question = output.split("Question: ")[1].strip()
|
308 |
if "plant" in question.lower():
|
|
|
130 |
# Calculate Moon's ecliptic longitude
|
131 |
astrometric = earth.at(t).observe(moon)
|
132 |
ecliptic_lat, ecliptic_lon, distance = astrometric.ecliptic_latlon()
|
133 |
+
lon_deg = ecliptic_lon.degrees % 360
|
134 |
|
135 |
# Calculate the phase angle using almanac.moon_phase
|
136 |
phase = almanac.moon_phase(planets, t)
|
|
|
238 |
prompt_templates = {
|
239 |
"main_prompt": """
|
240 |
Current state:
|
241 |
+
- location_provided: {memory[location_provided]}
|
242 |
+
- plant: {memory[plant]}
|
243 |
+
- root_crop: {memory[root_crop]}
|
244 |
+
- location_cautions: {memory[location_cautions]}
|
245 |
+
- answer: {memory[answer]}
|
246 |
+
- last_question: {memory[last_question]}
|
247 |
|
248 |
User's input: {input}
|
249 |
|
|
|
270 |
)
|
271 |
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
|
272 |
|
273 |
+
# Initialize the agent without the 'memory' parameter
|
274 |
agent = CodeAgent(
|
275 |
model=model,
|
276 |
tools=[final_answer, get_moon_info, get_current_time_in_timezone, get_current_time_raw, image_generation_tool],
|
277 |
max_steps=10,
|
278 |
verbosity_level=1,
|
279 |
prompt_templates=prompt_templates,
|
|
|
280 |
)
|
281 |
|
282 |
# Conversation handler for multi-turn interactions
|
|
|
301 |
memory["location_cautions"] = ""
|
302 |
memory["last_question"] = None
|
303 |
|
304 |
+
# Update the prompt with current memory state
|
305 |
+
current_prompt = prompt_templates["main_prompt"].format(
|
306 |
+
memory=memory.memory, # Pass the memory dictionary directly
|
307 |
+
input=user_input
|
308 |
+
)
|
309 |
+
|
310 |
+
# Run the agent with the updated prompt
|
311 |
+
output = agent.run(current_prompt)
|
312 |
+
|
313 |
if "Action: Ask user" in output:
|
314 |
question = output.split("Question: ")[1].strip()
|
315 |
if "plant" in question.lower():
|