File size: 2,524 Bytes
293845a
506c92c
 
89e764a
 
e14bcdf
89e764a
12662f5
89e764a
 
 
 
 
 
 
 
12662f5
89e764a
 
 
 
12662f5
 
89e764a
12662f5
209742c
 
89e764a
e68a422
 
ab69ad0
e68a422
 
a913fd7
ab69ad0
 
e68a422
506c92c
547b515
89e764a
 
be2c3d1
547b515
 
a913fd7
89e764a
e14bcdf
89e764a
547b515
a913fd7
 
 
 
 
 
 
 
8024ea6
89e764a
 
 
 
 
 
 
 
209742c
 
89e764a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import os
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from huggingface_hub import snapshot_download, hf_hub_download

# Download model files
def setup_model():
    instruct_repo = "Qwen/Qwen2.5-0.5B-Instruct"
    local_dir = snapshot_download(repo_id=instruct_repo)
    gguf_filename = "qwen2.5-0.5b-instruct-q5_k_m.gguf"
    hf_hub_download(
        repo_id="Qwen/Qwen2.5-0.5B-Instruct-GGUF",
        filename=gguf_filename,
        local_dir=local_dir,
        local_dir_use_symlinks=False
    )
    tokenizer = AutoTokenizer.from_pretrained(local_dir, trust_remote_code=True)
    model = AutoModelForCausalLM.from_pretrained(
        local_dir,
        gguf_file=gguf_filename,
        trust_remote_code=True
    )
    return tokenizer, torch.compile(model)

tokenizer, model = setup_model()


prompt_prefix = """
You are an energy‑saving expert tasked to help households reduce their monthly electricity bills.
Analyze the following appliance usage data, which is provided in the format "Appliance Name: Wattage, hours/day, days/week". 
1. Flag the highest energy consumers.
2. Recommend practical, empathetic, achievable actions.
3. Suggest appliance swaps (e.g. Incandescent to LED, inverter AC) and habit changes.
Provide at most 5 recommendation bullet points and stop there.
Input Data:
"""

def generate_recommendation(appliance_info: str) -> str:
    prompt = prompt_prefix + appliance_info + "\n\nRecommendations:"
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
    with torch.no_grad():
        outputs = model.generate(
            **inputs,
            max_new_tokens=120,
            use_cache=True,
            do_sample=False,
            temperature=0.0
        )
    text = tokenizer.decode(outputs[0], skip_special_tokens=True)
    # Extract only the part after "Recommendations:"
    recommendation = text.split("Recommendations:")[-1].strip()
    # Keep only the first 5 numbered lines
    lines = recommendation.splitlines()
    filtered_lines = [line for line in lines if line.strip() and line.strip()[0].isdigit()][:5]
    return "\n".join(filtered_lines)


# Define the Gradio interface
iface = gr.Interface(
    fn=generate_recommendation,
    inputs=gr.Textbox(lines=10, placeholder="Enter appliance usage details..."),
    outputs="text",
    title="Energy-Saving Recommendation Generator",
    description="Provide appliance usage details to receive energy-saving tips."
)

if __name__ == "__main__":
    iface.launch()