Update app.py
Browse files
app.py
CHANGED
@@ -32,8 +32,9 @@ Given the user's appliance usage information (device name, wattage, hours used p
|
|
32 |
1. Flag the highest energy consumers.
|
33 |
2. Recommend practical, empathetic, achievable actions.
|
34 |
3. Suggest appliance swaps (e.g. LED, inverter AC) and habit changes.
|
35 |
-
Give at most 5 suggestions and format with bullet points that is <=
|
36 |
-
|
|
|
37 |
"""
|
38 |
|
39 |
def generate_recommendation(appliance_info: str) -> str:
|
@@ -42,14 +43,21 @@ def generate_recommendation(appliance_info: str) -> str:
|
|
42 |
with torch.no_grad():
|
43 |
outputs = model.generate(
|
44 |
**inputs,
|
45 |
-
max_new_tokens=
|
46 |
use_cache=True,
|
47 |
do_sample=False,
|
48 |
temperature=0.0
|
49 |
)
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
return cleaned_recommendation
|
54 |
|
55 |
# Define the Gradio interface
|
|
|
32 |
1. Flag the highest energy consumers.
|
33 |
2. Recommend practical, empathetic, achievable actions.
|
34 |
3. Suggest appliance swaps (e.g. LED, inverter AC) and habit changes.
|
35 |
+
Give at most 5 suggestions and format with bullet points that is <= 100 tokens.
|
36 |
+
Don't add anything to the response besides the recommendation
|
37 |
+
Here is the user's input:
|
38 |
"""
|
39 |
|
40 |
def generate_recommendation(appliance_info: str) -> str:
|
|
|
43 |
with torch.no_grad():
|
44 |
outputs = model.generate(
|
45 |
**inputs,
|
46 |
+
max_new_tokens=100,
|
47 |
use_cache=True,
|
48 |
do_sample=False,
|
49 |
temperature=0.0
|
50 |
)
|
51 |
+
text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
52 |
+
recommendation = text.split("Recommendations:")[-1].strip()
|
53 |
+
|
54 |
+
|
55 |
+
if "Note:" in recommendation:
|
56 |
+
|
57 |
+
recommendation = recommendation.split("Note:")[0].strip()
|
58 |
+
|
59 |
+
cleaned_recommendation = "\n".join(line.strip() for line in recommendation.splitlines() if line.strip())
|
60 |
+
|
61 |
return cleaned_recommendation
|
62 |
|
63 |
# Define the Gradio interface
|