Manofem commited on
Commit
5eda574
·
verified ·
1 Parent(s): c8cabbb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -4
app.py CHANGED
@@ -1,18 +1,25 @@
1
  import time
2
  import gradio as gr
3
  from ctransformers import AutoModelForCausalLM
 
4
 
5
- # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system.
6
  llm = AutoModelForCausalLM.from_pretrained("TheBloke/WizardLM-7B-uncensored-GGUF", model_file="WizardLM-7B-uncensored.Q3_K_M.gguf", model_type="llama", stream=True)
7
  history = [""]
 
 
 
 
 
 
 
8
 
9
  def generate_response(message):
10
  global history
11
  for text in llm(message):
12
-
13
  response = ''.join(text)
14
  time.sleep(2)
15
- history.append(response)
 
16
  yield ' '.join(history)
17
  # Clear the history list after the last response
18
  history = ["Chatbot:"]
@@ -24,4 +31,4 @@ def chatbot(message, history):
24
  yield response
25
 
26
  iface = gr.ChatInterface(chatbot)
27
- iface.launch()
 
1
  import time
2
  import gradio as gr
3
  from ctransformers import AutoModelForCausalLM
4
+ from spellchecker import SpellChecker
5
 
 
6
  llm = AutoModelForCausalLM.from_pretrained("TheBloke/WizardLM-7B-uncensored-GGUF", model_file="WizardLM-7B-uncensored.Q3_K_M.gguf", model_type="llama", stream=True)
7
  history = [""]
8
+ spell = SpellChecker()
9
+
10
+ def correct_words(text):
11
+ words = text.split()
12
+ corrected_words = [spell.correction(word) for word in words]
13
+ corrected_text = ' '.join(corrected_words)
14
+ return corrected_text
15
 
16
  def generate_response(message):
17
  global history
18
  for text in llm(message):
 
19
  response = ''.join(text)
20
  time.sleep(2)
21
+ corrected_response = correct_words(response)
22
+ history.append(corrected_response)
23
  yield ' '.join(history)
24
  # Clear the history list after the last response
25
  history = ["Chatbot:"]
 
31
  yield response
32
 
33
  iface = gr.ChatInterface(chatbot)
34
+ iface.launch()