DHEIVER commited on
Commit
26a30cd
·
verified ·
1 Parent(s): 263d6ed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -12
app.py CHANGED
@@ -45,6 +45,8 @@ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, pr
45
  temperature=temperature,
46
  max_new_tokens=max_tokens,
47
  top_k=top_k,
 
 
48
  )
49
  else:
50
  llm = HuggingFaceEndpoint(
@@ -53,6 +55,8 @@ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, pr
53
  temperature=temperature,
54
  max_new_tokens=max_tokens,
55
  top_k=top_k,
 
 
56
  )
57
  memory = ConversationBufferMemory(memory_key="chat_history", output_key='answer', return_messages=True)
58
  retriever = vector_db.as_retriever()
@@ -88,19 +92,29 @@ def conversation(qa_chain, message, history, language):
88
  else:
89
  prompt = f"Answer in English: {message}"
90
 
91
- # Enviar o prompt ajustado ao qa_chain
92
- response = qa_chain.invoke({"question": prompt, "chat_history": formatted_chat_history})
93
- response_answer = response["answer"]
94
- if response_answer.find("Helpful Answer:") != -1:
95
- response_answer = response_answer.split("Helpful Answer:")[-1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
- response_sources = response["source_documents"]
98
- response_source1 = response_sources[0].page_content.strip()
99
- response_source2 = response_sources[1].page_content.strip()
100
- response_source3 = response_sources[2].page_content.strip()
101
- response_source1_page = response_sources[0].metadata["page"] + 1
102
- response_source2_page = response_sources[1].metadata["page"] + 1
103
- response_source3_page = response_sources[2].metadata["page"] + 1
104
  new_history = history + [(message, response_answer)]
105
  return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
106
 
 
45
  temperature=temperature,
46
  max_new_tokens=max_tokens,
47
  top_k=top_k,
48
+ timeout=120, # Aumentado para 120 segundos
49
+ max_retries=3 # Tenta até 3 vezes
50
  )
51
  else:
52
  llm = HuggingFaceEndpoint(
 
55
  temperature=temperature,
56
  max_new_tokens=max_tokens,
57
  top_k=top_k,
58
+ timeout=120,
59
+ max_retries=3
60
  )
61
  memory = ConversationBufferMemory(memory_key="chat_history", output_key='answer', return_messages=True)
62
  retriever = vector_db.as_retriever()
 
92
  else:
93
  prompt = f"Answer in English: {message}"
94
 
95
+ try:
96
+ response = qa_chain.invoke({"question": prompt, "chat_history": formatted_chat_history})
97
+ response_answer = response["answer"]
98
+ if response_answer.find("Helpful Answer:") != -1:
99
+ response_answer = response_answer.split("Helpful Answer:")[-1]
100
+ except Exception as e:
101
+ if language == "Português":
102
+ response_answer = f"Erro: Não foi possível obter resposta do modelo devido a problemas no servidor. Tente novamente mais tarde. ({str(e)})"
103
+ else:
104
+ response_answer = f"Error: Could not get a response from the model due to server issues. Please try again later. ({str(e)})"
105
+
106
+ try:
107
+ response_sources = response["source_documents"]
108
+ response_source1 = response_sources[0].page_content.strip()
109
+ response_source1_page = response_sources[0].metadata["page"] + 1
110
+ response_source2 = response_sources[1].page_content.strip()
111
+ response_source2_page = response_sources[1].metadata["page"] + 1
112
+ response_source3 = response_sources[2].page_content.strip()
113
+ response_source3_page = response_sources[2].metadata["page"] + 1
114
+ except:
115
+ response_source1 = response_source2 = response_source3 = "N/A"
116
+ response_source1_page = response_source2_page = response_source3_page = 0
117
 
 
 
 
 
 
 
 
118
  new_history = history + [(message, response_answer)]
119
  return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
120