AppTry / app.py
Wh1plashR's picture
Optimised code
be2c3d1 verified
raw
history blame
1.9 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Load the pre-trained model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2")
model = AutoModelForCausalLM.from_pretrained(
"microsoft/phi-2",
load_in_8bit=True,
device_map="auto"
)
model = torch.compile(model)
promptPre = f"""You are an energy-saving expert tasked to help households reduce their monthly electricity bills.
Given the user's appliance usage information (including device name, wattage, hours used per day, and days used per week),
analyze and recommend specific, practical ways they can reduce their energy consumption.
Always prioritize suggestions like:
- Reducing usage time or frequency when excessive
- Replacing high-consumption appliances with more efficient alternatives (e.g., inverter air conditioners, LED lights)
- Changing habits (e.g., using fans instead of air conditioners when possible)
- Avoiding unnecessary standby power consumption
- Considering lifestyle changes (e.g., reading books instead of watching TV)
Format the output clearly with bullet points or short paragraphs.
Be empathetic, practical, and encouraging. Focus on achievable actions for the user.
Here is the user's input:
"""
def generate_recommendation(summary):
inputs = tokenizer(summary, return_tensors="pt").to(model.device)
with torch.no_grad():
out = model.generate(**inputs, max_new_tokens=100, use_cache=True)
return tokenizer.decode(out[0], skip_special_tokens=True)
# Define the Gradio interface
iface = gr.Interface(
fn=generate_recommendation,
inputs=gr.Textbox(lines=10, placeholder="Enter appliance usage details..."),
outputs="text",
title="Energy-Saving Recommendation Generator",
description="Provide appliance usage details to receive energy-saving tips."
)
if __name__ == "__main__":
iface.launch()