Lhumpal commited on
Commit
91304f2
·
verified ·
1 Parent(s): bd7bff2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -91,7 +91,7 @@ async def chat(request: ChatRequest):
91
  model="gemini-2.0-flash",
92
  contents=summarize_prompt,
93
  config=GenerateContentConfig(
94
- system_instruction=["You are a helpful assistant who is an expert at summarization."]
95
  generation_config={
96
  "temperature": 0.8,
97
  "max_output_tokens": 256, # Set your desired maximum output tokens here
 
91
  model="gemini-2.0-flash",
92
  contents=summarize_prompt,
93
  config=GenerateContentConfig(
94
+ system_instruction=["You are a helpful assistant who is an expert at summarization."],
95
  generation_config={
96
  "temperature": 0.8,
97
  "max_output_tokens": 256, # Set your desired maximum output tokens here