Spaces:
Runtime error
Runtime error
File size: 3,850 Bytes
09e3a4b 3e478f7 a61d193 03bb47d 18b083a 03bb47d 9d5fdc7 4e1fd51 9d5fdc7 03bb47d 4e1fd51 568458d a61d193 03bb47d 4e1fd51 03bb47d c64b0c0 03bb47d 18b083a d1b3dae 1b3417a d1b3dae 69afa65 7cccb39 69afa65 1b3417a 03bb47d 09e3a4b 8bc3462 f1ba07f c64b0c0 03bb47d fc3f68a c64b0c0 03bb47d 1b3417a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
from gradio_client import Client
from huggingface_hub import InferenceClient
import gradio as gr
import random
ss_client = Client("https://omnibus-html-image-current-tab.hf.space/")
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
def format_prompt(message, history):
prompt = "<s>"
if history:
for user_prompt, bot_response in history:
prompt += f"[INST] {user_prompt} [/INST]"
prompt += f" {bot_response}</s> "
prompt += f"[INST] {message} [/INST]"
return prompt
def chat_inf(system_prompt,prompt,history):
if not history:
history = []
seed = random.randint(1,1111111111111111)
generate_kwargs = dict(
temperature=0.9,
max_new_tokens=10480,
top_p=0.95,
repetition_penalty=1.0,
do_sample=True,
seed=seed,
)
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
output = ""
for response in stream:
output += response.token.text
yield [(prompt,output)]
def get_screenshot(chat: list,height=5000,width=600,chatblock=[1],theme="light",wait=3000,header=True):
result = ss_client.predict(chat,height,width,chatblock,header,theme,wait,api_name="/run_script")
# str in 'Chat: [('user','bot'),('user','bot')]' Textbox component
# float in 'Height' Number component
# float in 'Width' Number component
# List[Literal['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20']] in 'Chatblocks' Checkboxgroup component
# bool in 'Show Header' Checkbox component
# Literal['light', 'dark'] in 'Theme' Radio component
# float (numeric value between 1 and 10000) in 'Wait time' Slider component
# api_name="/run_script"
## return types:
# filepath representing output in 'value_25' Image component,
# str representing output in 'value_20' Html component,
# List[Dict(image: filepath, caption: str | None)] representing output in 'value_24' Gallery component,
# filepath representing output in 'value_23' Image component,
out = f'https://omnibus-html-image-current-tab.hf.space/file={result[0]}'
print(out)
return out
chat=[('user','bot'),('user','bot')]
#get_screenshot(chat=[('user','bot'),('user','bot')])
with gr.Blocks() as app:
with gr.Row():
with gr.Column(scale=3):
with gr.Group():
chat_b = gr.Chatbot()
with gr.Row():
with gr.Column(scale=3):
inp = gr.Textbox(label="Prompt")
sys_inp = gr.Textbox(label="System Prompt (optional)")
btn = gr.Button("Chat")
with gr.Column(scale=1):
with gr.Group():
stop_btn=gr.Button("Stop")
clear_btn=gr.Button("Clear")
with gr.Column(scale=1):
with gr.Group():
with gr.Row():
im_height=gr.Number(label="Height",value=5000)
im_width=gr.Number(label="Width",value=500)
wait_time=gr.Number(label="Wait Time",value=3000)
theme=gr.Radio(label="Theme", choices=["light","dark"])
chatblock=gr.Dropdown(label="Chatblocks",choices=[c for c in range(1,20)])
im_btn=gr.Button("Screenshot")
img=gr.Image(type='filepath')
btn.click(chat_inf,[sys_inp,inp,chat_b],chat_b)
im_btn.click(get_screenshot,[chat_b,im_height,im_width,chatblock,theme,wait_time],img)
#app.load(get_screenshot,inp,img)
app.launch() |