Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,26 +1,28 @@
|
|
1 |
-
|
2 |
import gradio as gr
|
|
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
4 |
from peft import PeftModel
|
5 |
-
import torch
|
6 |
|
7 |
-
# Load base model and LoRA adapter
|
8 |
base_model_id = "openlm-research/open_llama_3b"
|
9 |
adapter_path = "jalonso24/lora-lateblight-v3"
|
10 |
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
12 |
model = PeftModel.from_pretrained(base_model, adapter_path)
|
13 |
-
tokenizer = AutoTokenizer.from_pretrained(base_model_id, use_fast=False)
|
14 |
|
15 |
-
|
|
|
16 |
|
17 |
-
# Inference function
|
18 |
def predict_risk(prompt):
|
19 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
20 |
with torch.no_grad():
|
21 |
outputs = model.generate(
|
22 |
**inputs,
|
23 |
-
max_new_tokens=
|
24 |
temperature=0.7,
|
25 |
do_sample=True,
|
26 |
pad_token_id=tokenizer.eos_token_id
|
@@ -28,23 +30,20 @@ def predict_risk(prompt):
|
|
28 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
29 |
return response
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
]
|
37 |
|
38 |
-
description = ""
|
39 |
-
### 🧪 Predict Late Blight Risk using `lora-lateblight-v3`
|
40 |
-
Enter a weather and crop scenario in Spanish, and the model will respond with the predicted level of late blight risk.
|
41 |
-
"""
|
42 |
|
43 |
gr.Interface(
|
44 |
fn=predict_risk,
|
45 |
-
inputs=gr.Textbox(lines=6, label="
|
46 |
-
outputs=gr.Textbox(lines=4, label="
|
47 |
-
title="🌱
|
48 |
description=description,
|
49 |
examples=examples
|
50 |
).launch()
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import torch
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
4 |
from peft import PeftModel
|
|
|
5 |
|
|
|
6 |
base_model_id = "openlm-research/open_llama_3b"
|
7 |
adapter_path = "jalonso24/lora-lateblight-v3"
|
8 |
|
9 |
+
# Load base model with device_map="auto" for CPU Spaces
|
10 |
+
base_model = AutoModelForCausalLM.from_pretrained(
|
11 |
+
base_model_id,
|
12 |
+
torch_dtype=torch.float32,
|
13 |
+
device_map="auto"
|
14 |
+
)
|
15 |
model = PeftModel.from_pretrained(base_model, adapter_path)
|
|
|
16 |
|
17 |
+
tokenizer = AutoTokenizer.from_pretrained(base_model_id, use_fast=False)
|
18 |
+
tokenizer.pad_token = tokenizer.eos_token
|
19 |
|
|
|
20 |
def predict_risk(prompt):
|
21 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
22 |
with torch.no_grad():
|
23 |
outputs = model.generate(
|
24 |
**inputs,
|
25 |
+
max_new_tokens=50, # keeps output fast but complete
|
26 |
temperature=0.7,
|
27 |
do_sample=True,
|
28 |
pad_token_id=tokenizer.eos_token_id
|
|
|
30 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
31 |
return response
|
32 |
|
33 |
+
examples = [[
|
34 |
+
"La variedad es INIA-302 Amarilis, sembrada en noviembre.\n"
|
35 |
+
"La precipitación fue 18.4 mm, la temperatura máxima 17.2°C, "
|
36 |
+
"la mínima 6.1°C y la humedad promedio 84.12%.\n\n"
|
37 |
+
"¿Cuál es el riesgo de tizón tardío?"
|
38 |
+
]]
|
39 |
|
40 |
+
description = "🧪 Predice el riesgo de tizón tardío a partir de datos climáticos y de cultivo."
|
|
|
|
|
|
|
41 |
|
42 |
gr.Interface(
|
43 |
fn=predict_risk,
|
44 |
+
inputs=gr.Textbox(lines=6, label="Escenario en lenguaje natural (en español)"),
|
45 |
+
outputs=gr.Textbox(lines=4, label="Predicción del modelo"),
|
46 |
+
title="🌱 Predicción de Tizón Tardío con OpenLLaMA 3B",
|
47 |
description=description,
|
48 |
examples=examples
|
49 |
).launch()
|