Update app.py
Browse files
app.py
CHANGED
@@ -93,7 +93,7 @@ async def chat(request: ChatRequest):
|
|
93 |
config=GenerateContentConfig(
|
94 |
system_instruction=["You are a helpful assistant who is an expert at summarization."],
|
95 |
max_output_tokens=250,
|
96 |
-
temperature=0.
|
97 |
),
|
98 |
)
|
99 |
request.chat_history = request.chat_history[-(summary_thresh+2):] # keep last k items
|
@@ -134,11 +134,9 @@ async def chat(request: ChatRequest):
|
|
134 |
model="gemini-2.0-flash",
|
135 |
contents=request.chat_history,
|
136 |
config=GenerateContentConfig(
|
137 |
-
system_instruction=[request.system_message]
|
138 |
-
|
139 |
-
|
140 |
-
"max_output_tokens": 256, # Set your desired maximum output tokens here
|
141 |
-
},
|
142 |
),
|
143 |
)
|
144 |
|
|
|
93 |
config=GenerateContentConfig(
|
94 |
system_instruction=["You are a helpful assistant who is an expert at summarization."],
|
95 |
max_output_tokens=250,
|
96 |
+
temperature=0.5
|
97 |
),
|
98 |
)
|
99 |
request.chat_history = request.chat_history[-(summary_thresh+2):] # keep last k items
|
|
|
134 |
model="gemini-2.0-flash",
|
135 |
contents=request.chat_history,
|
136 |
config=GenerateContentConfig(
|
137 |
+
system_instruction=[request.system_message],
|
138 |
+
max_output_tokens=250,
|
139 |
+
temperature=0.8
|
|
|
|
|
140 |
),
|
141 |
)
|
142 |
|