aidevhund commited on
Commit
c181c4d
·
verified ·
1 Parent(s): cb13656

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -34
app.py CHANGED
@@ -3,41 +3,40 @@ from openai import OpenAI
3
  import os
4
 
5
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
 
6
  TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
7
  client = OpenAI(
8
  base_url="https://api-inference.huggingface.co/v1/",
9
  api_key=ACCESS_TOKEN,
10
  )
 
11
 
12
  # Search Tool
13
  from langchain_community.tools.tavily_search import TavilySearchResults
14
  search_tool = TavilySearchResults(tavily_api_key=TAVILY_API_KEY)
15
-
16
  # Define a comprehensive system prompt
17
  SYSTEM_PROMPT = """
18
  You are a highly knowledgeable and reliable Crypto Trading Advisor and Analyzer.
19
  Your goal is to assist users in understanding, analyzing, and making informed decisions about cryptocurrency trading.
20
- You provide accurate, concise, and actionable advice based on real-time data, historical trends, and established best practices.
21
- If you are provided with relevant search results, incorporate that information into your analysis.
22
  """
23
 
24
- # Fixed settings for LLM
25
- MAX_TOKENS = 512
26
- TEMPERATURE = 0.7
27
- TOP_P = 0.95
28
- FREQUENCY_PENALTY = 0.0
29
- SEED = -1 # Use None if random seed is preferred
30
-
31
  # Function to handle chatbot responses
32
- def respond(message, history: list[tuple[str, str]]):
 
 
 
 
 
 
 
 
33
  print(f"Received message: {message}")
34
  print(f"History: {history}")
35
 
36
  # Convert seed to None if -1 (meaning random)
37
- if SEED == -1:
38
  seed = None
39
- else:
40
- seed = SEED
41
 
42
  messages = [{"role": "system", "content": SYSTEM_PROMPT}]
43
  print("System prompt added to messages.")
@@ -54,31 +53,17 @@ def respond(message, history: list[tuple[str, str]]):
54
  # Append the latest user message
55
  messages.append({"role": "user", "content": message})
56
 
57
- try:
58
- # Correct method to get relevant results based on the message
59
- search_results = search_tool.query(message)
60
- print(f"Search Results: {search_results}") # Debugging line
61
- except Exception as e:
62
- print(f"Error during search: {str(e)}")
63
- search_results = []
64
-
65
- if search_results:
66
- search_results_text = "Here are the search results related to the bitcoin price:\n"
67
- for result in search_results:
68
- search_results_text += f"- {result['title']}: {result['url']}\n"
69
- messages.append({"role": "assistant", "content": search_results_text})
70
-
71
  # Start response generation
72
  response = ""
73
  print("Sending request to OpenAI API.")
74
 
75
  for message_chunk in client.chat.completions.create(
76
  model="meta-llama/Llama-3.3-70B-Instruct",
77
- max_tokens=MAX_TOKENS,
78
  stream=True,
79
- temperature=TEMPERATURE,
80
- top_p=TOP_P,
81
- frequency_penalty=FREQUENCY_PENALTY,
82
  seed=seed,
83
  messages=messages,
84
  ):
@@ -88,14 +73,57 @@ def respond(message, history: list[tuple[str, str]]):
88
 
89
  print("Completed response generation.")
90
 
91
- # Gradio UI (no sliders for the user, settings are fixed in code)
92
  chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="Ask about crypto trading or analysis.", likeable=True)
93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  demo = gr.ChatInterface(
95
  fn=respond,
 
 
 
 
 
 
 
96
  fill_height=True,
97
  chatbot=chatbot,
98
  )
99
 
100
  if __name__ == "__main__":
101
- demo.launch()
 
3
  import os
4
 
5
  ACCESS_TOKEN = os.getenv("HF_TOKEN")
6
+ print("Access token loaded.")
7
  TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
8
  client = OpenAI(
9
  base_url="https://api-inference.huggingface.co/v1/",
10
  api_key=ACCESS_TOKEN,
11
  )
12
+ print("OpenAI client initialized.")
13
 
14
  # Search Tool
15
  from langchain_community.tools.tavily_search import TavilySearchResults
16
  search_tool = TavilySearchResults(tavily_api_key=TAVILY_API_KEY)
 
17
  # Define a comprehensive system prompt
18
  SYSTEM_PROMPT = """
19
  You are a highly knowledgeable and reliable Crypto Trading Advisor and Analyzer.
20
  Your goal is to assist users in understanding, analyzing, and making informed decisions about cryptocurrency trading.
21
+ You provide accurate, concise, and actionable advice based on real-time data, historical trends, and established best practices.
 
22
  """
23
 
 
 
 
 
 
 
 
24
  # Function to handle chatbot responses
25
+ def respond(
26
+ message,
27
+ history: list[tuple[str, str]],
28
+ max_tokens,
29
+ temperature,
30
+ top_p,
31
+ frequency_penalty,
32
+ seed
33
+ ):
34
  print(f"Received message: {message}")
35
  print(f"History: {history}")
36
 
37
  # Convert seed to None if -1 (meaning random)
38
+ if seed == -1:
39
  seed = None
 
 
40
 
41
  messages = [{"role": "system", "content": SYSTEM_PROMPT}]
42
  print("System prompt added to messages.")
 
53
  # Append the latest user message
54
  messages.append({"role": "user", "content": message})
55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  # Start response generation
57
  response = ""
58
  print("Sending request to OpenAI API.")
59
 
60
  for message_chunk in client.chat.completions.create(
61
  model="meta-llama/Llama-3.3-70B-Instruct",
62
+ max_tokens=max_tokens,
63
  stream=True,
64
+ temperature=temperature,
65
+ top_p=top_p,
66
+ frequency_penalty=frequency_penalty,
67
  seed=seed,
68
  messages=messages,
69
  ):
 
73
 
74
  print("Completed response generation.")
75
 
76
+ # Gradio UI
77
  chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="Ask about crypto trading or analysis.", likeable=True)
78
 
79
+ max_tokens_slider = gr.Slider(
80
+ minimum=1,
81
+ maximum=4096,
82
+ value=512,
83
+ step=1,
84
+ label="Max new tokens"
85
+ )
86
+ temperature_slider = gr.Slider(
87
+ minimum=0.1,
88
+ maximum=4.0,
89
+ value=0.7,
90
+ step=0.1,
91
+ label="Temperature"
92
+ )
93
+ top_p_slider = gr.Slider(
94
+ minimum=0.1,
95
+ maximum=1.0,
96
+ value=0.95,
97
+ step=0.05,
98
+ label="Top-P"
99
+ )
100
+ frequency_penalty_slider = gr.Slider(
101
+ minimum=-2.0,
102
+ maximum=2.0,
103
+ value=0.0,
104
+ step=0.1,
105
+ label="Frequency Penalty"
106
+ )
107
+ seed_slider = gr.Slider(
108
+ minimum=-1,
109
+ maximum=65535,
110
+ value=-1,
111
+ step=1,
112
+ label="Seed (-1 for random)"
113
+ )
114
+
115
  demo = gr.ChatInterface(
116
  fn=respond,
117
+ additional_inputs=[
118
+ max_tokens_slider,
119
+ temperature_slider,
120
+ top_p_slider,
121
+ frequency_penalty_slider,
122
+ seed_slider,
123
+ ],
124
  fill_height=True,
125
  chatbot=chatbot,
126
  )
127
 
128
  if __name__ == "__main__":
129
+ demo.launch()