bobpopboom commited on
Commit
ff2cb04
·
verified ·
1 Parent(s): c6784b6

cleaned up unused options and config error i hope

Browse files
Files changed (1) hide show
  1. app.py +6 -10
app.py CHANGED
@@ -38,21 +38,21 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
38
  prompt += f"User: {user_msg}\nAssistant: {bot_msg}\n"
39
  prompt += f"User: {message}\nAssistant:"
40
 
41
- try:
42
  response = pipe(
43
  prompt,
44
  max_new_tokens=max_tokens,
45
- temperature=temperature,
46
- top_p=top_p,
47
  do_sample=False,
48
  pad_token_id=tokenizer.eos_token_id
49
  )[0]["generated_text"]
50
-
51
  bot_response = response.split("Assistant:")[-1].strip()
52
- yield bot_response
 
 
53
  except Exception as e:
54
  print(f"Error during generation: {e}")
55
- yield "An error occurred during generation."
56
 
57
  demo = gr.ChatInterface(
58
  respond,
@@ -62,10 +62,6 @@ demo = gr.ChatInterface(
62
  label="System message",
63
  ),
64
  gr.Slider(minimum=1, maximum=128, value=128, step=1, label="Max new tokens"),
65
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
66
- gr.Slider(
67
- minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)",
68
- ),
69
  ],
70
  chatbot=gr.Chatbot(type="messages"), # Updated to new format
71
  )
 
38
  prompt += f"User: {user_msg}\nAssistant: {bot_msg}\n"
39
  prompt += f"User: {message}\nAssistant:"
40
 
41
+ try:
42
  response = pipe(
43
  prompt,
44
  max_new_tokens=max_tokens,
 
 
45
  do_sample=False,
46
  pad_token_id=tokenizer.eos_token_id
47
  )[0]["generated_text"]
48
+
49
  bot_response = response.split("Assistant:")[-1].strip()
50
+
51
+ yield [message, bot_response] # Yield a list: [user_message, bot_response]
52
+
53
  except Exception as e:
54
  print(f"Error during generation: {e}")
55
+ yield [message, "An error occurred during generation."]
56
 
57
  demo = gr.ChatInterface(
58
  respond,
 
62
  label="System message",
63
  ),
64
  gr.Slider(minimum=1, maximum=128, value=128, step=1, label="Max new tokens"),
 
 
 
 
65
  ],
66
  chatbot=gr.Chatbot(type="messages"), # Updated to new format
67
  )