File size: 1,546 Bytes
d5b0eac |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
from huggingface_hub import InferenceClient
import gradio as gr
client = InferenceClient(
"mistralai/Mistral-7B-Instruct-v0.1"
)
def format_prompt(message, history):
prompt = "<s>"
for user_prompt, bot_response in history:
prompt += f"[INST] {user_prompt} [/INST]"
prompt += f" {bot_response}</s> "
prompt += f"[INST] {message} [/INST]"
return prompt
def generate(
prompt, history, max_tokens=2000,
):
'''temperature = float(temperature)
if temperature < 1e-2:
temperature = 1e-2
top_p = float(top_p)'''
generate_kwargs = dict(
#temperature=temperature,
max_tokens=max_tokens,
#top_p=top_p,
#repetition_penalty=repetition_penalty,
#do_sample=True,
#seed=42,
)
formatted_prompt = format_prompt(prompt, history)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
output = ""
for response in stream:
output += response.token.text
yield output
return output
additional_inputs=[
gr.Slider(
label="Max tokens",
value=2000,
minimum=0,
maximum=2048,
step=64,
interactive=True,
info="The maximum numbers of new tokens",
),
]
gr.ChatInterface(
fn=generate,
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
additional_inputs=additional_inputs,
title="""Mistral 7B"""
).launch(show_api=False) |