Wh1plashR commited on
Commit
e68a422
·
verified ·
1 Parent(s): 8024ea6

clean recommendation

Browse files
Files changed (1) hide show
  1. app.py +13 -14
app.py CHANGED
@@ -26,15 +26,15 @@ def setup_model():
26
  tokenizer, model = setup_model()
27
 
28
 
29
- prompt_prefix = (
30
- "You are an energy-saving advisor. "
31
- "Given appliances (name, wattage, hours/day, days/week), identify top consumers and up to 5 actionable bullet-point recommendations (practical, empathetic), "
32
- "including appliance swaps and habit changes. "
33
- "For each, include estimated monthly kWh saved and cost reduction. "
34
- "Do not include disclaimers, notes, or formatting instructions. "
35
- "Keep response under 120 tokens, bullets only."
36
- "\nSummary:\n"
37
- )
38
 
39
  def generate_recommendation(appliance_info: str) -> str:
40
  prompt = prompt_prefix + appliance_info + "\n\nRecommendations:"
@@ -43,15 +43,14 @@ def generate_recommendation(appliance_info: str) -> str:
43
  outputs = model.generate(
44
  **inputs,
45
  max_new_tokens=120,
46
- return_dict_in_generate=False,
47
  do_sample=False,
48
  temperature=0.0
49
  )
50
  text = tokenizer.decode(outputs[0], skip_special_tokens=True)
51
- cleaned_text = "\n".join(
52
- line for line in text.split("\n") if not line.lower().startswith(("note:", "format:"))
53
- )
54
- return cleaned_text.strip()
55
 
56
  # Define the Gradio interface
57
  iface = gr.Interface(
 
26
  tokenizer, model = setup_model()
27
 
28
 
29
+ prompt_prefix = """
30
+ You are an energysaving expert tasked to help households reduce their monthly electricity bills.
31
+ Given the user's appliance usage information (device name, wattage, hours used per day, days used per week):
32
+ 1. Flag the highest energy consumers.
33
+ 2. Recommend practical, empathetic, achievable actions.
34
+ 3. Suggest appliance swaps (e.g. LED, inverter AC) and habit changes.
35
+ Give at most 5 suggestions and format with bullet points that is <= 120 tokens.
36
+ Here is the summary:
37
+ """
38
 
39
  def generate_recommendation(appliance_info: str) -> str:
40
  prompt = prompt_prefix + appliance_info + "\n\nRecommendations:"
 
43
  outputs = model.generate(
44
  **inputs,
45
  max_new_tokens=120,
46
+ use_cache=True,
47
  do_sample=False,
48
  temperature=0.0
49
  )
50
  text = tokenizer.decode(outputs[0], skip_special_tokens=True)
51
+ recommendation = text.split("Recommendations:")[-1].strip()
52
+ cleaned_recommendation = "\n".join(line.strip() for line in recommendation.splitlines() if line.strip())
53
+ return cleaned_recommendation
 
54
 
55
  # Define the Gradio interface
56
  iface = gr.Interface(