|
import gradio as gr |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import torch |
|
|
|
|
|
model_name = "mistralai/Mistral-7B-Instruct-v0.1" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
model = torch.compile(model) |
|
|
|
prompt_prefix = """ |
|
You are an energy-saving expert tasked to help households reduce their monthly electricity bills. |
|
Given the user's appliance usage information (device name, wattage, hours used per day, days used per week): |
|
1. Flag the highest energy consumers. |
|
2. Recommend practical, empathetic, achievable actions. |
|
3. Suggest appliance swaps (e.g. LED, inverter AC) and habit changes. |
|
Format with bullet points. |
|
Here is the input of the user: |
|
""" |
|
|
|
def generate_recommendation(appliance_info: str) -> str: |
|
|
|
prompt = prompt_prefix + appliance_info + "\n\nRecommendations:" |
|
|
|
inputs = tokenizer(prompt, return_tensors="pt").to(model.device) |
|
|
|
with torch.no_grad(): |
|
outputs = model.generate( |
|
**inputs, |
|
max_new_tokens=120, |
|
use_cache=True |
|
) |
|
|
|
text = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return text.split("Recommendations:")[-1].strip() |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_recommendation, |
|
inputs=gr.Textbox(lines=10, placeholder="Enter appliance usage details..."), |
|
outputs="text", |
|
title="Energy-Saving Recommendation Generator", |
|
description="Provide appliance usage details to receive energy-saving tips." |
|
) |
|
|
|
if __name__ == "__main__": |
|
iface.launch() |