|
import gradio as gr
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
|
|
model_name = "segolilylabs/Lily-Cybersecurity-7B-v0.2"
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
model = AutoModelForCausalLM.from_pretrained(model_name)
|
|
|
|
def generate_text(input_text):
|
|
|
|
inputs = tokenizer(input_text, return_tensors="pt")
|
|
|
|
|
|
outputs = model.generate(**inputs, max_length=100)
|
|
|
|
|
|
output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
return output_text
|
|
|
|
|
|
demo = gr.Interface(fn=generate_text, inputs="text", outputs="text")
|
|
|
|
|
|
demo.launch() |