Wh1plashR commited on
Commit
cf42c9a
·
verified ·
1 Parent(s): 1245308

add prompt prefix

Browse files
Files changed (1) hide show
  1. app.py +21 -3
app.py CHANGED
@@ -3,11 +3,29 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
  # Load the pre-trained model and tokenizer
6
- model = AutoModelForCausalLM.from_pretrained("Wh1plashR/energy-saving-recommender-phi2-lora")
7
- tokenizer = AutoTokenizer.from_pretrained("Wh1plashR/energy-saving-recommender-phi2-lora")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  def generate_recommendation(appliance_info):
10
- prompt = f"Input: {appliance_info}\nOutput:"
11
  inputs = tokenizer(prompt, return_tensors="pt")
12
  outputs = model.generate(**inputs, max_new_tokens=200)
13
  recommendation = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
3
  import torch
4
 
5
  # Load the pre-trained model and tokenizer
6
+ model_name = "microsoft/phi-2"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(model_name)
9
+
10
+ promptPre = f"""You are an energy-saving expert tasked to help households reduce their monthly electricity bills.
11
+ Given the user's appliance usage information (including device name, wattage, hours used per day, and days used per week),
12
+ analyze and recommend specific, practical ways they can reduce their energy consumption.
13
+
14
+ Always prioritize suggestions like:
15
+ - Reducing usage time or frequency when excessive
16
+ - Replacing high-consumption appliances with more efficient alternatives (e.g., inverter air conditioners, LED lights)
17
+ - Changing habits (e.g., using fans instead of air conditioners when possible)
18
+ - Avoiding unnecessary standby power consumption
19
+ - Considering lifestyle changes (e.g., reading books instead of watching TV)
20
+
21
+ Format the output clearly with bullet points or short paragraphs.
22
+ Be empathetic, practical, and encouraging. Focus on achievable actions for the user.
23
+
24
+ Here is the user's input:
25
+ """
26
 
27
  def generate_recommendation(appliance_info):
28
+ prompt = f"Input: promptPre + {appliance_info}\nOutput:"
29
  inputs = tokenizer(prompt, return_tensors="pt")
30
  outputs = model.generate(**inputs, max_new_tokens=200)
31
  recommendation = tokenizer.decode(outputs[0], skip_special_tokens=True)