Spaces:
Sleeping
Sleeping
import gradio as gr | |
from huggingface_hub import InferenceClient | |
from typing import List, Tuple, Dict | |
client = InferenceClient("AuriLab/gpt-bi-instruct-cesar") | |
def format_messages(history: List[Tuple[str, str]], system_message: str, user_message: str) -> List[Dict[str, str]]: | |
messages = [{"role": "system", "content": system_message}] | |
messages.extend([ | |
{"role": "user" if i % 2 == 0 else "assistant", "content": msg} | |
for turn in history | |
for i, msg in enumerate(turn) | |
if msg | |
]) | |
messages.append({"role": "user", "content": user_message}) | |
return messages | |
def respond(message: str, history: List[Tuple[str, str]], system_message: str, max_tokens: int, temperature: float, top_p: float) -> str: | |
messages = format_messages(history, system_message, message) | |
response = "" | |
for msg in client.chat_completion( | |
messages, | |
max_tokens=max_tokens, | |
stream=True, | |
temperature=temperature, | |
top_p=top_p, | |
repetition_penalty=1.2, # Add repetition penalty | |
presence_penalty=0.5, # Penalize presence of repeated tokens | |
frequency_penalty=0.5, # Penalize frequency of repeated tokens | |
): | |
token = msg.choices[0].delta.content | |
response += token | |
yield response | |
demo = gr.ChatInterface( | |
respond, | |
additional_inputs=[ | |
gr.Textbox(value="You are a friendly Chatbot.", label="System message"), | |
gr.Slider(minimum=1, maximum=256, value=512, step=1, label="Max new tokens"), | |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"), | |
], | |
) | |
if __name__ == "__main__": | |
demo.launch() | |