|
import time |
|
import gradio as gr |
|
from ctransformers import AutoModelForCausalLM |
|
from spellchecker import SpellChecker |
|
|
|
llm = AutoModelForCausalLM.from_pretrained("TheBloke/WizardLM-7B-uncensored-GGUF", model_file="WizardLM-7B-uncensored.Q3_K_M.gguf", model_type="llama", stream=True) |
|
history = [""] |
|
spell = SpellChecker() |
|
|
|
def correct_words(text): |
|
words = text.split() |
|
corrected_words = [spell.correction(word) for word in words] |
|
corrected_text = ' '.join(corrected_words) |
|
return corrected_text |
|
|
|
def generate_response(message): |
|
global history |
|
for text in llm(message): |
|
response = ''.join(text) |
|
time.sleep(2) |
|
corrected_response = correct_words(response) |
|
history.append(corrected_response) |
|
yield ' '.join(history) |
|
|
|
history = ["Chatbot:"] |
|
|
|
def chatbot(message, history): |
|
response_generator = generate_response(message) |
|
for response in response_generator: |
|
time.sleep(0.1) |
|
yield response |
|
|
|
iface = gr.ChatInterface(chatbot) |
|
iface.launch() |