diegocp01 commited on
Commit
c62fb0c
·
verified ·
1 Parent(s): da0e5ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -66
app.py CHANGED
@@ -20,82 +20,37 @@ openai.api_key = os.getenv('OPENAI_API_KEY')
20
  # Below is the new deadline calculator tool
21
 
22
  @tool
23
- def multi_perspective_brainstorming(query: str) -> str:
24
- """A tool that generates and ranks creative ideas by simulating a brainstorming swarm of AI instances.
25
 
26
  Args:
27
- query: An open-ended query to brainstorm (e.g., 'Generate marketing ideas for a coffee shop').
28
  Returns:
29
- A prioritized list of the top ideas synthesized from multiple perspectives.
30
  """
31
- # Define brainstorming perspectives
32
- perspectives = [
33
- {"focus": "Social Media", "prompt": f"Generate 3 creative marketing ideas for a coffee shop focused on social media: {query}"},
34
- {"focus": "Loyalty Programs", "prompt": f"Generate 3 creative marketing ideas for a coffee shop focused on loyalty programs: {query}"},
35
- {"focus": "Sustainability", "prompt": f"Generate 3 creative marketing ideas for a coffee shop focused on sustainability: {query}"},
36
- ]
37
-
38
- # Collect ideas from each perspective
39
- all_ideas = []
40
- for perspective in perspectives:
41
- response = openai.chat.completions.create(
42
- model="gpt-4o-mini",
43
- messages=[
44
- {"role": "user", "content": perspective["prompt"]}
45
- ],
46
- )
47
- ideas = response.choices[0].message.content.split("\n") # Assume ideas are newline-separated
48
- all_ideas.extend([f"{perspective['focus']}: {idea.strip()}" for idea in ideas if idea.strip()])
49
-
50
- # Rank the ideas by simulating a consensus
51
- ranking_prompt = "From the following list of ideas, rank the top 5 based on creativity, feasibility, and impact for '" + query + "':\n" + "\n".join(all_ideas)
52
- ranked_response = openai.chat.completions.create(
53
  model="gpt-4o-mini",
54
  messages=[
55
- {"role": "user", "content": ranking_prompt}
56
  ],
57
  )
58
-
59
- return ranked_response.choices[0].message.content
60
 
61
 
62
  @tool
63
- def realtime_collaborative_assistant(query: str) -> str:
64
- """A tool that simulates a roundtable discussion with AI experts to provide a well-rounded response.
65
-
66
  Args:
67
- query: The user’s question or topic to discuss (e.g., 'How can I improve my website’s UX?').
68
- Returns:
69
- A synthesized response combining insights from multiple AI perspectives.
70
  """
71
- # Define expert personas with distinct roles
72
- experts = [
73
- {"role": "UX Designer", "prompt": f"As a UX designer, provide practical suggestions for: {query}"},
74
- {"role": "Marketing Strategist", "prompt": f"As a marketing strategist, suggest how to approach: {query}"},
75
- {"role": "Tech Analyst", "prompt": f"As a tech analyst, offer technical insights on: {query}"},
76
- ]
77
-
78
- # Collect responses from each AI expert
79
- expert_opinions = []
80
- for expert in experts:
81
- response = openai.chat.completions.create(
82
- model="gpt-4o-mini",
83
- messages=[
84
- {"role": "user", "content": expert["prompt"]}
85
- ],
86
- )
87
- expert_opinions.append(f"{expert['role']}: {response.choices[0].message.content}")
88
-
89
- # Synthesize the responses into a cohesive answer
90
- synthesis_prompt = "Synthesize the following expert opinions into a concise, well-rounded response to the query '" + query + "':\n" + "\n".join(expert_opinions)
91
- final_response = openai.chat.completions.create(
92
- model="gpt-4o-mini",
93
- messages=[
94
- {"role": "user", "content": synthesis_prompt}
95
- ],
96
- )
97
-
98
- return final_response.choices[0].message.content
99
 
100
 
101
  final_answer = FinalAnswerTool()
@@ -119,7 +74,7 @@ with open("prompts.yaml", 'r') as stream:
119
 
120
  agent = CodeAgent(
121
  model=model,
122
- tools=[final_answer, multi_perspective_brainstorming, realtime_collaborative_assistant], ## add your tools here (don't remove final answer)
123
  max_steps=6,
124
  verbosity_level=1,
125
  grammar=None,
@@ -130,5 +85,4 @@ agent = CodeAgent(
130
  )
131
 
132
 
133
- GradioUI(agent).launch()
134
-
 
20
  # Below is the new deadline calculator tool
21
 
22
  @tool
23
+ def chatgpt_conversation(prompt: str) -> str:
24
+ """A tool that interacts with the OpenAI API to simulate a conversation with a dynamic prompt.
25
 
26
  Args:
27
+ prompt: The message to ChatGPT.
28
  Returns:
29
+ The assistant's response to the prompt.
30
  """
31
+ response = openai.chat.completions.create(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  model="gpt-4o-mini",
33
  messages=[
34
+ {"role": "user", "content": prompt}
35
  ],
36
  )
37
+ return response.choices[0].message.content
 
38
 
39
 
40
  @tool
41
+ def get_current_time_in_timezone(timezone: str) -> str:
42
+ """A tool that fetches the current local time in a specified timezone.
 
43
  Args:
44
+ timezone: A string representing a valid timezone (e.g., 'America/New_York').
 
 
45
  """
46
+ try:
47
+ # Create timezone object
48
+ tz = pytz.timezone(timezone)
49
+ # Get current time in that timezone
50
+ local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
51
+ return f"The current local time in {timezone} is: {local_time}"
52
+ except Exception as e:
53
+ return f"Error fetching time for timezone '{timezone}': {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
 
56
  final_answer = FinalAnswerTool()
 
74
 
75
  agent = CodeAgent(
76
  model=model,
77
+ tools=[final_answer, get_current_time_in_timezone, chatgpt_conversation], ## add your tools here (don't remove final answer)
78
  max_steps=6,
79
  verbosity_level=1,
80
  grammar=None,
 
85
  )
86
 
87
 
88
+ GradioUI(agent).launch()