KraTUZen commited on
Commit
fde9314
·
1 Parent(s): d8c974e

essential changes

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -47,14 +47,14 @@ def generate_response(user_input, history):
47
  ])
48
  messages += f"\n<|user|>:{user_input}\n<|assistant|>:"
49
  model_inputs = tokenizer([messages], return_tensors="pt").to(device)
50
- streamer = TextIteratorStreamer(tokenizer, timeout=5.0, skip_prompt=True, skip_special_tokens=True)
51
  generate_kwargs = dict(
52
  **model_inputs,
53
  streamer=streamer,
54
- max_new_tokens=256,
55
  do_sample=True,
56
- top_p=0.9,
57
- top_k=12,
58
  temperature=0.7,
59
  num_beams=1,
60
  stopping_criteria=StoppingCriteriaList([stop])
@@ -71,8 +71,8 @@ def generate_response(user_input, history):
71
  # Define the system prompt for seeding the model's context
72
 
73
  SYSTEM_PROMPT = (
74
- "I am LogicLink, Version 5—a state-of-the-art AI chatbot created by "
75
- "Kratu Gautam (A-27) and Geetank Sahare (A-28) from SY CSE(AIML) GHRCEM. "
76
  "I am here to assist you with any queries. How can I help you today?"
77
  )
78
 
@@ -388,4 +388,4 @@ with gr.Blocks(css=css, fill_width=True, title="LogicLinkV5") as demo:
388
  queue=False
389
  )
390
 
391
- demo.queue().launch()
 
47
  ])
48
  messages += f"\n<|user|>:{user_input}\n<|assistant|>:"
49
  model_inputs = tokenizer([messages], return_tensors="pt").to(device)
50
+ streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
51
  generate_kwargs = dict(
52
  **model_inputs,
53
  streamer=streamer,
54
+ max_new_tokens=1024,
55
  do_sample=True,
56
+ top_p=0.95,
57
+ top_k=50,
58
  temperature=0.7,
59
  num_beams=1,
60
  stopping_criteria=StoppingCriteriaList([stop])
 
71
  # Define the system prompt for seeding the model's context
72
 
73
  SYSTEM_PROMPT = (
74
+ "I am LogicLink, Version 5, A state-of-the-art AI chatbot created and engineered by "
75
+ "Kratu Gautam"
76
  "I am here to assist you with any queries. How can I help you today?"
77
  )
78
 
 
388
  queue=False
389
  )
390
 
391
+ demo.queue().launch(share=True, debug=True)