Update for Hugging Face Space deployment
Browse files- app.py +33 -2
- requirements.txt +10 -0
app.py
CHANGED
@@ -1,7 +1,38 @@
|
|
1 |
import gradio as gr
|
2 |
-
from
|
|
|
3 |
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
if __name__ == "__main__":
|
7 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
from huggingface_hub import InferenceClient
|
3 |
+
import os
|
4 |
|
5 |
+
def generate_diary(keywords):
|
6 |
+
client = InferenceClient(
|
7 |
+
model="nlpai-lab/kullm-polyglot-5.8b-v2",
|
8 |
+
token=os.environ.get("HUGGINGFACE_API_KEY")
|
9 |
+
)
|
10 |
+
|
11 |
+
prompt = f"""๋ค์์ ์ค๋ ์์๋ ์ผ์ ์์ฝ์
๋๋ค. ์ด๊ฒ์ ๋ฐํ์ผ๋ก ์์ํ๊ณ ๊ฐ๋์ ์ธ ์ผ๊ธฐ๋ฅผ ์์ฑํด์ฃผ์ธ์.
|
12 |
+
// ... ๊ธฐ์กด ํ๋กฌํํธ ๋ด์ฉ ...
|
13 |
+
"""
|
14 |
+
|
15 |
+
parameters = {
|
16 |
+
"max_new_tokens": 768,
|
17 |
+
"temperature": 0.88,
|
18 |
+
"top_p": 0.95,
|
19 |
+
"repetition_penalty": 1.35,
|
20 |
+
"top_k": 50,
|
21 |
+
"do_sample": True,
|
22 |
+
"num_return_sequences": 1
|
23 |
+
}
|
24 |
+
|
25 |
+
response = client.text_generation(prompt, **parameters)
|
26 |
+
return response
|
27 |
+
|
28 |
+
# Gradio ์ธํฐํ์ด์ค ์์ฑ
|
29 |
+
demo = gr.Interface(
|
30 |
+
fn=generate_diary,
|
31 |
+
inputs=gr.Textbox(label="์ค๋์ ํค์๋๋ฅผ ์
๋ ฅํ์ธ์"),
|
32 |
+
outputs=gr.Textbox(label="์์ฑ๋ ์ผ๊ธฐ"),
|
33 |
+
title="AI ์ผ๊ธฐ ๋์ฐ๋ฏธ",
|
34 |
+
description="ํค์๋๋ฅผ ์
๋ ฅํ๋ฉด AI๊ฐ ์ผ๊ธฐ๋ฅผ ์์ฑํด์ค๋๋ค."
|
35 |
+
)
|
36 |
|
37 |
if __name__ == "__main__":
|
38 |
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
flask==2.3.3
|
2 |
+
flask-cors==4.0.0
|
3 |
+
requests==2.31.0
|
4 |
+
python-dotenv==1.0.0
|
5 |
+
werkzeug==2.3.7
|
6 |
+
huggingface-hub>=0.17.0
|
7 |
+
gradio==4.8.0
|
8 |
+
torch
|
9 |
+
transformers
|
10 |
+
accelerate
|