|
import gradio as gr |
|
from openai import OpenAI |
|
import os |
|
|
|
ACCESS_TOKEN = os.getenv("HF_TOKEN") |
|
print("Access token loaded.") |
|
|
|
client = OpenAI( |
|
base_url="https://api-inference.huggingface.co/v1/", |
|
api_key=ACCESS_TOKEN, |
|
) |
|
print("OpenAI client initialized.") |
|
|
|
def respond( |
|
message, |
|
history: list[tuple[str, str]] |
|
): |
|
print(f"Received message: {message}") |
|
print(f"History: {history}") |
|
|
|
|
|
system_message = "You are a cryptocurrency trading assistant and market analyst. Your role is to provide users with data-driven insights, technical analysis (RSI, MACD, Bollinger Bands, Moving Averages, Fibonacci retracements, volume analysis, and price action), and investment advice tailored to their risk tolerance. Focus on actionable information, such as market conditions, key indicators, and investment strategies. Avoid speculation and provide clear, concise, and unbiased recommendations based on current data." |
|
max_tokens = 512 |
|
temperature = 0.7 |
|
top_p = 0.95 |
|
frequency_penalty = 0.0 |
|
seed = -1 |
|
|
|
|
|
if seed == -1: |
|
seed = None |
|
|
|
messages = [{"role": "system", "content": system_message}] |
|
print("Initial messages array constructed.") |
|
|
|
|
|
for val in history: |
|
user_part = val[0] |
|
assistant_part = val[1] |
|
if user_part: |
|
messages.append({"role": "user", "content": user_part}) |
|
print(f"Added user message to context: {user_part}") |
|
if assistant_part: |
|
messages.append({"role": "assistant", "content": assistant_part}) |
|
print(f"Added assistant message to context: {assistant_part}") |
|
|
|
|
|
messages.append({"role": "user", "content": message}) |
|
print("Latest user message appended.") |
|
|
|
|
|
model_to_use = "meta-llama/Llama-3.3-70B-Instruct" |
|
print(f"Model selected for inference: {model_to_use}") |
|
|
|
|
|
response = "" |
|
print("Sending request to OpenAI API.") |
|
|
|
for message_chunk in client.chat.completions.create( |
|
model=model_to_use, |
|
max_tokens=max_tokens, |
|
stream=True, |
|
temperature=temperature, |
|
top_p=top_p, |
|
frequency_penalty=frequency_penalty, |
|
seed=seed, |
|
messages=messages, |
|
): |
|
token_text = message_chunk.choices[0].delta.content |
|
print(f"Received token: {token_text}") |
|
response += token_text |
|
yield response |
|
|
|
print("Completed response generation.") |
|
|
|
|
|
|
|
chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="Start chatting!", likeable=True, layout="panel") |
|
print("Chatbot interface created.") |
|
|
|
demo = gr.ChatInterface( |
|
fn=respond, |
|
additional_inputs=[], |
|
fill_height=True, |
|
chatbot=chatbot, |
|
theme="Nymbo/Nymbo_Theme", |
|
) |
|
print("ChatInterface object created.") |
|
|
|
with demo: |
|
|
|
pass |
|
|
|
print("Gradio interface initialized.") |
|
|
|
if __name__ == "__main__": |
|
print("Launching the demo application.") |
|
demo.launch() |
|
|