|
import streamlit as st |
|
from transformers import AutoModelForCausalLM, AutoTokenizer,pipeline |
|
import torch |
|
|
|
st.title("Text_Generator Fine tunning model") |
|
|
|
|
|
model_dir = "fine_tuned_model (1)" |
|
tokenizer = AutoTokenizer.from_pretrained(model_dir) |
|
model = AutoModelForCausalLM.from_pretrained(model_dir) |
|
|
|
|
|
code_generator = pipeline("text-generation", model=model, tokenizer=tokenizer) |
|
|
|
prompt = "def quicksort(arr):" |
|
|
|
|
|
inputs_text=st.text_input("Please enter the text",value="def quicksort(arr):") |
|
|
|
if st.button("submit"): |
|
generated_code = code_generator(inputs_text, max_length=200, num_return_sequences=1) |
|
|
|
st.write(generated_code[0]["generated_text"]) |
|
|