Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -33,7 +33,6 @@ def generate_text(prompt, max_length, temperature):
|
|
33 |
|
34 |
return tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
|
35 |
|
36 |
-
# Custom CSS
|
37 |
css = """
|
38 |
body {
|
39 |
background: linear-gradient(135deg, #f5f7fa, #c3cfe2);
|
@@ -84,63 +83,39 @@ body {
|
|
84 |
.llama-image:hover .llama-description {
|
85 |
opacity: 1;
|
86 |
}
|
87 |
-
.
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
}
|
92 |
-
.artifact.large {
|
93 |
-
width: 300px;
|
94 |
-
height: 300px;
|
95 |
-
top: -50px;
|
96 |
-
left: -150px;
|
97 |
-
}
|
98 |
-
.artifact.medium {
|
99 |
-
width: 200px;
|
100 |
-
height: 200px;
|
101 |
-
bottom: -50px;
|
102 |
-
right: -100px;
|
103 |
}
|
104 |
-
.
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
}
|
111 |
"""
|
112 |
|
113 |
-
|
114 |
-
|
115 |
-
<div id="custom-header">
|
116 |
-
|
117 |
-
|
118 |
-
|
|
|
|
|
|
|
|
|
|
|
119 |
|
120 |
-
|
121 |
-
|
|
|
|
|
|
|
|
|
122 |
|
123 |
-
|
124 |
-
<img src="https://cdn-uploads.huggingface.co/production/uploads/64c75c1237333ccfef30a602/tmOlbERGKP7JSODa6T06J.jpeg" alt="Llama">
|
125 |
-
<div class="llama-description">Llama-3.1-Storm-8B Model</div>
|
126 |
-
</div>
|
127 |
-
</div>
|
128 |
-
"""
|
129 |
-
|
130 |
-
# Create Gradio interface
|
131 |
-
iface = gr.Interface(
|
132 |
-
fn=generate_text,
|
133 |
-
inputs=[
|
134 |
-
gr.Textbox(lines=5, label="Prompt"),
|
135 |
-
gr.Slider(minimum=1, maximum=500, value=128, step=1, label="Max Length"),
|
136 |
-
gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"),
|
137 |
-
],
|
138 |
-
outputs=gr.Textbox(lines=10, label="Generated Text"),
|
139 |
-
title="Llama-3.1-Storm-8B Text Generation",
|
140 |
-
description="Enter a prompt to generate text using the Llama-3.1-Storm-8B model.",
|
141 |
-
css=css,
|
142 |
-
article=custom_html
|
143 |
-
)
|
144 |
|
145 |
-
# Launch the app
|
146 |
iface.launch()
|
|
|
33 |
|
34 |
return tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
|
35 |
|
|
|
36 |
css = """
|
37 |
body {
|
38 |
background: linear-gradient(135deg, #f5f7fa, #c3cfe2);
|
|
|
83 |
.llama-image:hover .llama-description {
|
84 |
opacity: 1;
|
85 |
}
|
86 |
+
.gradio-container {
|
87 |
+
max-width: 900px !important;
|
88 |
+
margin: auto;
|
89 |
+
padding-top: 1.5rem;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
}
|
91 |
+
.container {
|
92 |
+
background-color: #ffffff;
|
93 |
+
border-radius: 10px;
|
94 |
+
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
|
95 |
+
padding: 20px;
|
96 |
+
margin-top: 20px;
|
97 |
}
|
98 |
"""
|
99 |
|
100 |
+
with gr.Blocks(css=css) as iface:
|
101 |
+
gr.HTML("""
|
102 |
+
<div id="custom-header">
|
103 |
+
<h1>Llama-3.1-Storm-8B Text Generation</h1>
|
104 |
+
<p>Generate text using the powerful Llama-3.1-Storm-8B model. Enter a prompt and let the AI create!</p>
|
105 |
+
<div class="llama-image">
|
106 |
+
<img src="https://cdn-uploads.huggingface.co/production/uploads/64c75c1237333ccfef30a602/tmOlbERGKP7JSODa6T06J.jpeg" alt="Llama">
|
107 |
+
<div class="llama-description">Llama-3.1-Storm-8B Model</div>
|
108 |
+
</div>
|
109 |
+
</div>
|
110 |
+
""")
|
111 |
|
112 |
+
with gr.Column(elem_classes="container"):
|
113 |
+
prompt = gr.Textbox(lines=5, label="Prompt")
|
114 |
+
max_length = gr.Slider(minimum=1, maximum=500, value=128, step=1, label="Max Length")
|
115 |
+
temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature")
|
116 |
+
submit_btn = gr.Button("Generate", variant="primary")
|
117 |
+
output = gr.Textbox(lines=10, label="Generated Text")
|
118 |
|
119 |
+
submit_btn.click(generate_text, inputs=[prompt, max_length, temperature], outputs=output)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
|
|
|
121 |
iface.launch()
|