|
|
|
|
|
|
|
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import torch |
|
import gradio as gr |
|
|
|
|
|
model_name = "microsoft/DialoGPT-large" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
def generate_response(prompt): |
|
|
|
input_ids = tokenizer.encode(prompt + tokenizer.eos_token, return_tensors="pt") |
|
|
|
|
|
chat_response = model.generate( |
|
input_ids, |
|
max_length=1000, |
|
pad_token_id=tokenizer.eos_token_id, |
|
do_sample=True, |
|
top_k=50, |
|
top_p=0.95, |
|
temperature=0.8, |
|
no_repeat_ngram_size=2 |
|
) |
|
|
|
|
|
response = tokenizer.decode(chat_response[0], skip_special_tokens=True) |
|
|
|
return response |
|
|
|
|
|
inputs = gr.inputs.Textbox(lines=7, label="Введите сообщение") |
|
outputs = gr.outputs.Textbox(label="Ответ") |
|
|
|
title = "Chat.UI на основе DialoGPT" |
|
description = "Простой чат на основе модели DialoGPT из библиотеки transformers" |
|
examples = [ |
|
["Привет, как дела?"], |
|
["Что ты думаешь о новом фильме?"] |
|
] |
|
|
|
chat_interface = gr.Interface( |
|
fn=generate_response, |
|
inputs=inputs, |
|
outputs=outputs, |
|
title=title, |
|
description=description, |
|
examples=examples |
|
) |
|
|
|
|
|
chat_interface.launch() |