Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -7,9 +7,9 @@ model_name = "tanusrich/Mental_Health_Chatbot"
|
|
7 |
model = AutoModelForCausalLM.from_pretrained(
|
8 |
model_name,
|
9 |
torch_dtype=torch.float16, # Reduce memory usage
|
10 |
-
device_map="
|
11 |
low_cpu_mem_usage=True,
|
12 |
-
max_memory={0: "
|
13 |
offload_folder=None
|
14 |
)
|
15 |
|
@@ -23,7 +23,7 @@ model.save_pretrained(model_save_path)
|
|
23 |
tokenizer.save_pretrained(model_save_path)'''
|
24 |
|
25 |
def generate_response(user_input):
|
26 |
-
inputs = tokenizer(user_input, return_tensors="pt").to(
|
27 |
with torch.no_grad():
|
28 |
output = model.generate(
|
29 |
**inputs,
|
|
|
7 |
model = AutoModelForCausalLM.from_pretrained(
|
8 |
model_name,
|
9 |
torch_dtype=torch.float16, # Reduce memory usage
|
10 |
+
device_map="cpu", # Automatically assigns to GPU if available
|
11 |
low_cpu_mem_usage=True,
|
12 |
+
max_memory={0: "3.5GiB", "cpu": "12GiB"}, # Optimize CPU memory
|
13 |
offload_folder=None
|
14 |
)
|
15 |
|
|
|
23 |
tokenizer.save_pretrained(model_save_path)'''
|
24 |
|
25 |
def generate_response(user_input):
|
26 |
+
inputs = tokenizer(user_input, return_tensors="pt").to("cpu")
|
27 |
with torch.no_grad():
|
28 |
output = model.generate(
|
29 |
**inputs,
|