Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -45,12 +45,12 @@ def respond(
|
|
45 |
|
46 |
for message_chunk in client.chat.completions.create(
|
47 |
model=model_to_use,
|
48 |
-
max_tokens=
|
49 |
stream=True,
|
50 |
-
temperature=
|
51 |
-
top_p=
|
52 |
-
frequency_penalty=
|
53 |
-
seed
|
54 |
messages=messages,
|
55 |
):
|
56 |
token_text = message_chunk.choices[0].delta.content
|
@@ -61,11 +61,7 @@ chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="ChatGPT is
|
|
61 |
|
62 |
system_message_box = gr.Label(value="You can select Max Tokens, Temperature, Top-P, Seed")
|
63 |
|
64 |
-
|
65 |
-
temperature_slider = gr.Slider(0.1, 1.0, value=0.7, step=0.1, label="Temperature")
|
66 |
-
top_p_slider = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-P")
|
67 |
-
frequency_penalty_slider = gr.Slider(-2.0, 2.0, value=0.0, step=0.1, label="Frequency Penalty")
|
68 |
-
seed_slider = gr.Slider(-1, 65535, value=-1, step=1, label="Seed (-1 for random)")
|
69 |
|
70 |
custom_model_box = gr.Textbox(value="meta-llama/Llama-3.2-3B-Instruct", label="AI Mode is ")
|
71 |
|
|
|
45 |
|
46 |
for message_chunk in client.chat.completions.create(
|
47 |
model=model_to_use,
|
48 |
+
max_tokens=2048,
|
49 |
stream=True,
|
50 |
+
temperature=0.7,
|
51 |
+
top_p=0.95,
|
52 |
+
frequency_penalty=0.0,
|
53 |
+
seed=-1,
|
54 |
messages=messages,
|
55 |
):
|
56 |
token_text = message_chunk.choices[0].delta.content
|
|
|
61 |
|
62 |
system_message_box = gr.Label(value="You can select Max Tokens, Temperature, Top-P, Seed")
|
63 |
|
64 |
+
|
|
|
|
|
|
|
|
|
65 |
|
66 |
custom_model_box = gr.Textbox(value="meta-llama/Llama-3.2-3B-Instruct", label="AI Mode is ")
|
67 |
|