rwayz commited on
Commit
e31868e
·
verified ·
1 Parent(s): bd1e98a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -0
app.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ import os
4
+
5
+ # Pegando o token da Hugging Face do ambiente
6
+ HF_TOKEN = os.getenv("HF_TOKEN")
7
+
8
+ # Inicializando o cliente de inferência
9
+ client = InferenceClient(
10
+ provider="sambanova",
11
+ api_key=HF_TOKEN,
12
+ )
13
+
14
+ def chatbot_response(user_input):
15
+ messages = [{"role": "user", "content": user_input}]
16
+
17
+ try:
18
+ completion = client.chat.completions.create(
19
+ model="meta-llama/Llama-3.3-70B-Instruct",
20
+ messages=messages,
21
+ max_tokens=500,
22
+ )
23
+ return completion.choices[0].message['content']
24
+ except Exception as e:
25
+ return f"Erro ao gerar resposta: {str(e)}"
26
+
27
+ # Criando interface Gradio
28
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
29
+ gr.Markdown("# 🤖 Llama-70B Chatbot - SambaNova")
30
+
31
+ chatbot = gr.Chatbot()
32
+ msg = gr.Textbox(placeholder="Digite sua mensagem aqui...")
33
+ btn = gr.Button("Enviar")
34
+
35
+ def respond(message, chat_history):
36
+ response = chatbot_response(message)
37
+ chat_history.append((message, response))
38
+ return "", chat_history
39
+
40
+ btn.click(respond, [msg, chatbot], [msg, chatbot])
41
+
42
+ # Rodando a aplicação
43
+ if __name__ == "__main__":
44
+ demo.launch()