Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -179,7 +179,7 @@ def analyze_space(url: str, progress=gr.Progress()):
|
|
179 |
|
180 |
|
181 |
|
182 |
-
|
183 |
system_message = """λΉμ μ νκΉ
νμ΄μ€μ νΉνλ AI μ½λ© μ λ¬Έκ°μ
λλ€. μ¬μ©μμ μ§λ¬Έμ μΉμ νκ³ μμΈνκ² λ΅λ³ν΄μ£ΌμΈμ.
|
184 |
Gradio νΉμ±μ μ νν μΈμνκ³ Requirements.txt λλ½μμ΄ μ½λ©κ³Ό μ€λ₯λ₯Ό ν΄κ²°ν΄μΌ ν©λλ€.
|
185 |
νμ μ ννκ³ μ μ©ν μ 보λ₯Ό μ 곡νλλ‘ λ
Έλ ₯νμΈμ."""
|
@@ -189,20 +189,19 @@ async def respond_stream(message: str, chat_history: List[Dict[str, str]], max_t
|
|
189 |
messages.append({"role": "user", "content": message})
|
190 |
|
191 |
try:
|
192 |
-
|
193 |
messages,
|
194 |
max_tokens=max_tokens,
|
195 |
temperature=temperature,
|
196 |
-
top_p=top_p
|
197 |
-
stream=True
|
198 |
)
|
199 |
|
200 |
-
full_response =
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
yield
|
206 |
except Exception as e:
|
207 |
yield f"μλ΅ μμ± μ€ μ€λ₯ λ°μ: {str(e)}"
|
208 |
|
@@ -319,7 +318,7 @@ def create_ui():
|
|
319 |
msg = gr.Textbox(label="λ©μμ§")
|
320 |
|
321 |
max_tokens = gr.Slider(minimum=1, maximum=8000, value=4000, label="Max Tokens", visible=False)
|
322 |
-
temperature = gr.Slider(minimum=0, maximum=1, value=0.
|
323 |
top_p = gr.Slider(minimum=0, maximum=1, value=0.9, label="Top P", visible=False)
|
324 |
|
325 |
examples = [
|
@@ -330,19 +329,21 @@ def create_ui():
|
|
330 |
["νΉν μΆμμ νμ©ν κΈ°μ λ° λΉμ¦λμ€λͺ¨λΈ μΈ‘λ©΄μ ν¬ν¨νμ¬ νΉν μΆμμ ꡬμ±μ λ§κ² νμ μ μΈ μ°½μ λ°λͺ
λ΄μ©μ μ€μ¬μΌλ‘ 4000ν ν° μ΄μ μμ±νλΌ."],
|
331 |
["κ³μ μ΄μ΄μ λ΅λ³νλΌ"],
|
332 |
]
|
333 |
-
|
334 |
gr.Examples(examples, inputs=msg)
|
335 |
|
336 |
def respond_wrapper(message, chat_history, max_tokens, temperature, top_p):
|
337 |
-
generator = respond_stream(message, chat_history, max_tokens, temperature, top_p)
|
338 |
-
bot_message = asyncio.run(generator.__anext__())
|
339 |
chat_history.append({"role": "user", "content": message})
|
340 |
-
|
341 |
-
|
|
|
|
|
|
|
|
|
342 |
|
343 |
msg.submit(respond_wrapper, [msg, chatbot, max_tokens, temperature, top_p], [msg, chatbot])
|
344 |
|
345 |
-
|
346 |
|
347 |
|
348 |
|
|
|
179 |
|
180 |
|
181 |
|
182 |
+
def respond_stream(message: str, chat_history: List[Dict[str, str]], max_tokens: int, temperature: float, top_p: float):
|
183 |
system_message = """λΉμ μ νκΉ
νμ΄μ€μ νΉνλ AI μ½λ© μ λ¬Έκ°μ
λλ€. μ¬μ©μμ μ§λ¬Έμ μΉμ νκ³ μμΈνκ² λ΅λ³ν΄μ£ΌμΈμ.
|
184 |
Gradio νΉμ±μ μ νν μΈμνκ³ Requirements.txt λλ½μμ΄ μ½λ©κ³Ό μ€λ₯λ₯Ό ν΄κ²°ν΄μΌ ν©λλ€.
|
185 |
νμ μ ννκ³ μ μ©ν μ 보λ₯Ό μ 곡νλλ‘ λ
Έλ ₯νμΈμ."""
|
|
|
189 |
messages.append({"role": "user", "content": message})
|
190 |
|
191 |
try:
|
192 |
+
response = hf_client.chat_completion(
|
193 |
messages,
|
194 |
max_tokens=max_tokens,
|
195 |
temperature=temperature,
|
196 |
+
top_p=top_p
|
|
|
197 |
)
|
198 |
|
199 |
+
full_response = response.choices[0].message.content
|
200 |
+
words = full_response.split()
|
201 |
+
for i in range(len(words)):
|
202 |
+
partial_response = " ".join(words[:i+1])
|
203 |
+
time.sleep(0.05) # μλ΅ μλ μ‘°μ
|
204 |
+
yield partial_response
|
205 |
except Exception as e:
|
206 |
yield f"μλ΅ μμ± μ€ μ€λ₯ λ°μ: {str(e)}"
|
207 |
|
|
|
318 |
msg = gr.Textbox(label="λ©μμ§")
|
319 |
|
320 |
max_tokens = gr.Slider(minimum=1, maximum=8000, value=4000, label="Max Tokens", visible=False)
|
321 |
+
temperature = gr.Slider(minimum=0, maximum=1, value=0.7, label="Temperature", visible=False)
|
322 |
top_p = gr.Slider(minimum=0, maximum=1, value=0.9, label="Top P", visible=False)
|
323 |
|
324 |
examples = [
|
|
|
329 |
["νΉν μΆμμ νμ©ν κΈ°μ λ° λΉμ¦λμ€λͺ¨λΈ μΈ‘λ©΄μ ν¬ν¨νμ¬ νΉν μΆμμ ꡬμ±μ λ§κ² νμ μ μΈ μ°½μ λ°λͺ
λ΄μ©μ μ€μ¬μΌλ‘ 4000ν ν° μ΄μ μμ±νλΌ."],
|
330 |
["κ³μ μ΄μ΄μ λ΅λ³νλΌ"],
|
331 |
]
|
332 |
+
|
333 |
gr.Examples(examples, inputs=msg)
|
334 |
|
335 |
def respond_wrapper(message, chat_history, max_tokens, temperature, top_p):
|
|
|
|
|
336 |
chat_history.append({"role": "user", "content": message})
|
337 |
+
bot_message = ""
|
338 |
+
for partial_response in respond_stream(message, chat_history, max_tokens, temperature, top_p):
|
339 |
+
bot_message = partial_response
|
340 |
+
chat_history[-1] = {"role": "assistant", "content": bot_message}
|
341 |
+
yield "", chat_history
|
342 |
+
return
|
343 |
|
344 |
msg.submit(respond_wrapper, [msg, chatbot, max_tokens, temperature, top_p], [msg, chatbot])
|
345 |
|
346 |
+
|
347 |
|
348 |
|
349 |
|