aidevhund commited on
Commit
8b66151
·
verified ·
1 Parent(s): a7c9626

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -75
app.py CHANGED
@@ -1,105 +1,79 @@
1
  import gradio as gr
2
  from openai import OpenAI
3
  import os
4
- import litellm
5
- from litellm import completion
6
- from crewai import Agent, Task, Crew, Process
7
- from langchain_community.tools.tavily_search import TavilySearchResults
8
 
9
- # Environment Variables
10
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
11
  TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
12
-
13
- # OpenAI Client Initialization
14
  client = OpenAI(
15
  base_url="https://api-inference.huggingface.co/v1/",
16
  api_key=ACCESS_TOKEN,
17
  )
18
 
19
- # Search Tool Initialization
 
20
  search_tool = TavilySearchResults(tavily_api_key=TAVILY_API_KEY)
21
 
22
- # System Prompt
23
  SYSTEM_PROMPT = """
24
  You are a highly knowledgeable and reliable Crypto Trading Advisor and Analyzer.
25
  Your goal is to assist users in understanding, analyzing, and making informed decisions about cryptocurrency trading.
26
  You provide accurate, concise, and actionable advice based on real-time data, historical trends, and established best practices.
27
  """
28
 
29
- # CrewAI Integration
30
- llm = client # Using the OpenAI client for CrewAI agents
31
-
32
- def run_crypto_crew(topic):
33
- researcher = Agent(
34
- role='Market Researcher',
35
- goal=f'Uncover emerging trends and investment opportunities in the cryptocurrency market. Focus on the topic: {topic}.',
36
- backstory='Identify groundbreaking trends and actionable insights.',
37
- verbose=True,
38
- tools=[search_tool],
39
- allow_delegation=False,
40
- llm=llm,
41
- max_iter=3,
42
- max_rpm=10,
43
- )
44
-
45
- analyst = Agent(
46
- role='Investment Analyst',
47
- goal=f'Analyze cryptocurrency market data to extract actionable insights. Focus on the topic: {topic}.',
48
- backstory='Draw meaningful conclusions from cryptocurrency market data.',
49
- verbose=True,
50
- allow_delegation=False,
51
- llm=llm,
52
- )
53
-
54
- research_task = Task(
55
- description=f'Explore the internet to identify trends and investment opportunities. Topic: {topic}.',
56
- agent=researcher,
57
- expected_output='Detailed summary of research results.'
58
- )
59
-
60
- analyst_task = Task(
61
- description=f'Analyze the market data to compile a concise report. Topic: {topic}.',
62
- agent=analyst,
63
- expected_output='Finalized version of the analysis report.'
64
- )
65
-
66
- crypto_crew = Crew(
67
- agents=[researcher, analyst],
68
- tasks=[research_task, analyst_task],
69
- process=Process.sequential
70
- )
71
-
72
- result = crypto_crew.kickoff()
73
- return result.raw
74
-
75
- # Chatbot Response Function
76
- def respond(message, history):
77
- max_tokens = 512
78
- temperature = 0.7
79
- top_p = 0.95
80
- frequency_penalty = 0.0
81
- seed = None
82
-
83
- # CrewAI'den gelen analiz sonuçları alınır
84
- crew_response = run_crypto_crew(message)
85
-
86
- # LLM ile harmanlanarak daha kapsamlı bir cevap üretilir
87
  messages = [{"role": "system", "content": SYSTEM_PROMPT}]
88
- for user_part, assistant_part in history:
 
 
 
 
 
89
  if user_part:
90
  messages.append({"role": "user", "content": user_part})
91
  if assistant_part:
92
  messages.append({"role": "assistant", "content": assistant_part})
93
- messages.append({"role": "user", "content": f"{message}\n\nCrewAI Analysis:\n{crew_response}"})
94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  response = ""
 
 
96
  for message_chunk in client.chat.completions.create(
97
  model="meta-llama/Llama-3.3-70B-Instruct",
98
- max_tokens=max_tokens,
99
  stream=True,
100
- temperature=temperature,
101
- top_p=top_p,
102
- frequency_penalty=frequency_penalty,
103
  seed=seed,
104
  messages=messages,
105
  ):
@@ -107,8 +81,10 @@ def respond(message, history):
107
  response += token_text
108
  yield response
109
 
110
- # Gradio UI
111
- chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="Ask about crypto trading or analysis.")
 
 
112
 
113
  demo = gr.ChatInterface(
114
  fn=respond,
 
1
  import gradio as gr
2
  from openai import OpenAI
3
  import os
 
 
 
 
4
 
 
5
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
6
  TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
 
 
7
  client = OpenAI(
8
  base_url="https://api-inference.huggingface.co/v1/",
9
  api_key=ACCESS_TOKEN,
10
  )
11
 
12
+ # Search Tool
13
+ from langchain_community.tools.tavily_search import TavilySearchResults
14
  search_tool = TavilySearchResults(tavily_api_key=TAVILY_API_KEY)
15
 
16
+ # Define a comprehensive system prompt
17
  SYSTEM_PROMPT = """
18
  You are a highly knowledgeable and reliable Crypto Trading Advisor and Analyzer.
19
  Your goal is to assist users in understanding, analyzing, and making informed decisions about cryptocurrency trading.
20
  You provide accurate, concise, and actionable advice based on real-time data, historical trends, and established best practices.
21
  """
22
 
23
+ # Fixed settings for LLM
24
+ MAX_TOKENS = 512
25
+ TEMPERATURE = 0.3
26
+ TOP_P = 0.95
27
+ FREQUENCY_PENALTY = 0.0
28
+ SEED = -1 # Use None if random seed is preferred
29
+
30
+ # Function to handle chatbot responses
31
+ def respond(message, history: list[tuple[str, str]]):
32
+ print(f"Received message: {message}")
33
+ print(f"History: {history}")
34
+
35
+ # Convert seed to None if -1 (meaning random)
36
+ if SEED == -1:
37
+ seed = None
38
+ else:
39
+ seed = SEED
40
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  messages = [{"role": "system", "content": SYSTEM_PROMPT}]
42
+ print("System prompt added to messages.")
43
+
44
+ # Add conversation history to the context
45
+ for val in history:
46
+ user_part = val[0]
47
+ assistant_part = val[1]
48
  if user_part:
49
  messages.append({"role": "user", "content": user_part})
50
  if assistant_part:
51
  messages.append({"role": "assistant", "content": assistant_part})
 
52
 
53
+ # Append the latest user message
54
+ messages.append({"role": "user", "content": message})
55
+
56
+ # Use the search tool to get relevant results based on the message
57
+ search_results = search_tool.search(message)
58
+ if search_results:
59
+ # Combine search results into a string to send to the model
60
+ search_results_text = "Here are the search results:\n"
61
+ for result in search_results:
62
+ search_results_text += f"- {result['title']}: {result['url']}\n"
63
+ # Add the search results to the messages to be used by LLM
64
+ messages.append({"role": "assistant", "content": search_results_text})
65
+
66
+ # Start response generation
67
  response = ""
68
+ print("Sending request to OpenAI API.")
69
+
70
  for message_chunk in client.chat.completions.create(
71
  model="meta-llama/Llama-3.3-70B-Instruct",
72
+ max_tokens=MAX_TOKENS,
73
  stream=True,
74
+ temperature=TEMPERATURE,
75
+ top_p=TOP_P,
76
+ frequency_penalty=FREQUENCY_PENALTY,
77
  seed=seed,
78
  messages=messages,
79
  ):
 
81
  response += token_text
82
  yield response
83
 
84
+ print("Completed response generation.")
85
+
86
+ # Gradio UI (no sliders for the user, settings are fixed in code)
87
+ chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="Ask about crypto trading or analysis.", likeable=True)
88
 
89
  demo = gr.ChatInterface(
90
  fn=respond,