bobpopboom commited on
Commit
14ddf0d
·
verified ·
1 Parent(s): e929713

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -21
app.py CHANGED
@@ -18,27 +18,27 @@ def respond(
18
  temperature,
19
  top_p,
20
  ):
21
- # 2. Construct the Prompt (Crucial!)
22
- prompt = f"{system_message}\n"
23
- for user_msg, bot_msg in history:
24
- prompt += f"User: {user_msg}\nAssistant: {bot_msg}\n"
25
- prompt += f"User: {message}\nAssistant:"
26
-
27
- # 3. Generate with the Pipeline
28
- try:
29
- response = pipe(
30
- prompt,
31
- max_new_tokens=max_tokens,
32
- temperature=temperature,
33
- top_p=top_p,
34
- )[0]["generated_text"]
35
- #Extract the bot's reply (adjust if your model format is different)
36
- bot_response = response.split("Assistant:")[-1].strip()
37
- yield bot_response
38
-
39
- except Exception as e:
40
- print(f"Error during generation: {e}")
41
- yield "An error occurred during generation." #Handle generation errors.
42
 
43
  # 4. Gradio Interface (No changes needed here)
44
  demo = gr.ChatInterface(
 
18
  temperature,
19
  top_p,
20
  ):
21
+ # 2. Construct the Prompt (Crucial!)
22
+ prompt = f"{system_message}\n"
23
+ for user_msg, bot_msg in history:
24
+ prompt += f"User: {user_msg}\nAssistant: {bot_msg}\n"
25
+ prompt += f"User: {message}\nAssistant:"
26
+
27
+ # 3. Generate with the Pipeline
28
+ try:
29
+ response = pipe(
30
+ prompt,
31
+ max_new_tokens=max_tokens,
32
+ temperature=temperature,
33
+ top_p=top_p,
34
+ )[0]["generated_text"]
35
+ #Extract the bot's reply (adjust if your model format is different)
36
+ bot_response = response.split("Assistant:")[-1].strip()
37
+ yield bot_response
38
+
39
+ except Exception as e:
40
+ print(f"Error during generation: {e}")
41
+ yield "An error occurred during generation." #Handle generation errors.
42
 
43
  # 4. Gradio Interface (No changes needed here)
44
  demo = gr.ChatInterface(