lamma3.2 / app.py
abhishekpawar7218's picture
Add application file
b403b34
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Load model and tokenizer
model_name = "meta-llama/Llama-3.2-3b-base" # Use the actual model path
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.bfloat16,
device_map="auto"
)
# Define inference function
def generate_text(prompt, max_length=100, temperature=0.7):
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
output = model.generate(
inputs["input_ids"],
max_new_tokens=max_length,
do_sample=True,
temperature=temperature
)
return tokenizer.decode(output[0], skip_special_tokens=True)
# Create Gradio interface
demo = gr.Interface(
fn=generate_text,
inputs=[
gr.Textbox(lines=5, placeholder="Enter your prompt here..."),
gr.Slider(minimum=1, maximum=500, value=100, label="Max Length"),
gr.Slider(minimum=0.1, maximum=2.0, value=0.7, label="Temperature")
],
outputs=gr.Textbox(),
title="Llama 3.2 3B API",
description="Generate text using Meta's Llama 3.2 3B model"
)
# Add API functionality
demo.queue()
demo.launch()