Lhumpal commited on
Commit
b471855
·
verified ·
1 Parent(s): 2076526

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -93,7 +93,7 @@ async def chat(request: ChatRequest):
93
  config=GenerateContentConfig(
94
  system_instruction=["You are a helpful assistant who is an expert at summarization."],
95
  max_output_tokens=250,
96
- temperature=0.8
97
  ),
98
  )
99
  request.chat_history = request.chat_history[-(summary_thresh+2):] # keep last k items
@@ -134,11 +134,9 @@ async def chat(request: ChatRequest):
134
  model="gemini-2.0-flash",
135
  contents=request.chat_history,
136
  config=GenerateContentConfig(
137
- system_instruction=[request.system_message]
138
- generation_config={
139
- "temperature": 0.7,
140
- "max_output_tokens": 256, # Set your desired maximum output tokens here
141
- },
142
  ),
143
  )
144
 
 
93
  config=GenerateContentConfig(
94
  system_instruction=["You are a helpful assistant who is an expert at summarization."],
95
  max_output_tokens=250,
96
+ temperature=0.5
97
  ),
98
  )
99
  request.chat_history = request.chat_history[-(summary_thresh+2):] # keep last k items
 
134
  model="gemini-2.0-flash",
135
  contents=request.chat_history,
136
  config=GenerateContentConfig(
137
+ system_instruction=[request.system_message],
138
+ max_output_tokens=250,
139
+ temperature=0.8
 
 
140
  ),
141
  )
142