Spaces:
Sleeping
Sleeping
File size: 1,869 Bytes
5592134 a845a1d 5592134 674da6b a845a1d 787f6e4 d657256 5592134 a845a1d 5592134 674da6b d80aa94 674da6b 2029bcf d80aa94 5592134 674da6b 5592134 d80aa94 5592134 674da6b 5592134 674da6b a845a1d 674da6b a845a1d 5592134 674da6b d80aa94 5592134 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel
base_model_id = "openlm-research/open_llama_3b"
adapter_path = "jalonso24/lora-lateblight-v3"
# Load to CPU (safe on Spaces)
base_model = AutoModelForCausalLM.from_pretrained(
base_model_id,
torch_dtype=torch.float32
).to("cpu")
model = PeftModel.from_pretrained(base_model, adapter_path)
tokenizer = AutoTokenizer.from_pretrained(base_model_id, use_fast=False)
tokenizer.pad_token = tokenizer.eos_token
# New structured prompt logic
def predict_risk(prompt):
structured_prompt = (
f"{prompt.strip()}\n\n"
"Clasifica el riesgo de tiz贸n tard铆o (elige una sola opci贸n):\n"
"- Bajo\n- Moderado\n- Alto\n\nRespuesta:"
)
inputs = tokenizer(structured_prompt, return_tensors="pt")
inputs = {k: v.to("cpu") for k, v in inputs.items()}
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=10,
temperature=0.5,
do_sample=False,
pad_token_id=tokenizer.eos_token_id
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
result = response.split("Respuesta:")[-1].strip().split("\n")[0]
return result
# Interface
examples = [[
"Variedad: INIA-302 Amarilis\n"
"Precipitaci贸n: 18.4 mm\n"
"Temperatura M谩xima: 17.2掳C\n"
"Temperatura M铆nima: 6.1掳C\n"
"Humedad Promedio: 84.12%\n"
"Mes de siembra: noviembre"
]]
gr.Interface(
fn=predict_risk,
inputs=gr.Textbox(lines=8, label="Escenario estructurado"),
outputs=gr.Textbox(label="Predicci贸n del modelo"),
title="馃尡 Riesgo de Tiz贸n Tard铆o (LLaMA + LoRA)",
description="Predice si el riesgo es Bajo, Moderado o Alto usando datos estructurados.",
examples=examples
).launch()
|