File size: 1,914 Bytes
506c92c ad27687 506c92c ad27687 5a1c016 ad27687 be2c3d1 cf42c9a 547b515 b6b99da cf42c9a 506c92c 547b515 be2c3d1 547b515 506c92c 547b515 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# ββ Choose the Mistralβ7BβInstruct checkpoint βββββββββββββββββββββββββββββββββββ
model_name = "mistralai/Mistral-7B-Instruct-v0.1"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
model = torch.compile(model)
prompt_prefix = """
You are an energy-saving expert tasked to help households reduce their monthly electricity bills.
Given the user's appliance usage information (device name, wattage, hours used per day, days used per week):
1. Flag the highest energy consumers.
2. Recommend practical, empathetic, achievable actions.
3. Suggest appliance swaps (e.g. LED, inverter AC) and habit changes.
Format with bullet points.
Here is the input of the user:
"""
def generate_recommendation(appliance_info: str) -> str:
# Build the full prompt
prompt = prompt_prefix + appliance_info + "\n\nRecommendations:"
# Tokenize and move inputs to the model device
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
# Generate with no grad, limited tokens
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=120,
use_cache=True
)
# Decode and return only the recommendations section
text = tokenizer.decode(outputs[0], skip_special_tokens=True)
return text.split("Recommendations:")[-1].strip()
# Define the Gradio interface
iface = gr.Interface(
fn=generate_recommendation,
inputs=gr.Textbox(lines=10, placeholder="Enter appliance usage details..."),
outputs="text",
title="Energy-Saving Recommendation Generator",
description="Provide appliance usage details to receive energy-saving tips."
)
if __name__ == "__main__":
iface.launch() |