Lhumpal commited on
Commit
6a6f1b3
·
verified ·
1 Parent(s): c802485

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -6
app.py CHANGED
@@ -37,12 +37,20 @@ async def chat(request: ChatRequest):
37
  try:
38
  if request.model_choice == "google":
39
  client = genai.Client(api_key=google_api_key)
40
-
41
- # messages = [
42
- # {"role": "user", "parts": [{"text": request.message}]},
43
- # # {"role": "model", "parts": [{"text": "Great! Dogs are fun pets."}]},
44
- # # {"role": "user", "parts": [{"text": "How many dogs do I have?"}]},
45
- # ]
 
 
 
 
 
 
 
 
46
 
47
  response = client.models.generate_content(
48
  model="gemini-2.0-flash",
 
37
  try:
38
  if request.model_choice == "google":
39
  client = genai.Client(api_key=google_api_key)
40
+
41
+ if len(request.chat_history) > 10:
42
+ summarize_prompt = f"""Please summarize the following chat history concisely, focusing on the key points and main topics discussed. Avoid
43
+ unnecessary details and provide a clear, straightforward summary. {request.chat_history[:-10]}""" # summarize everything except last k items
44
+ summary_response = client.models.generate_content(
45
+ model="gemini-2.0-flash",
46
+ contents=summarize_prompt,
47
+ config=GenerateContentConfig(
48
+ system_instruction=["You are a helpful assistant who is an expert at summarization."]
49
+ ),
50
+ )
51
+ request.chat_history = request.chat_history[-10:] # keep last k items
52
+ request.chat_history.insert(0, {"role": "user", "parts": [{"text": f"Here is a summary of this conversation so far: {summary_response.text}"}]})
53
+
54
 
55
  response = client.models.generate_content(
56
  model="gemini-2.0-flash",