Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -2,8 +2,8 @@ from langchain_huggingface import HuggingFaceEmbeddings
|
|
2 |
import gradio as gr
|
3 |
import os
|
4 |
from googletrans import Translator
|
5 |
-
import requests
|
6 |
-
from dotenv import load_dotenv
|
7 |
# import numpy as np
|
8 |
from langchain_community.vectorstores import Chroma
|
9 |
from langchain_community.document_loaders import UnstructuredPDFLoader, PyPDFLoader
|
@@ -18,7 +18,7 @@ from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
|
18 |
from langchain.llms.base import LLM
|
19 |
from typing import List, Dict, Any, Optional
|
20 |
from pydantic import BaseModel
|
21 |
-
from tqdm import tqdm
|
22 |
import torch
|
23 |
import logging
|
24 |
|
@@ -78,17 +78,11 @@ def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, pr
|
|
78 |
print(f"Using default LLM {default_llm} for {language}")
|
79 |
llm_name = default_llm
|
80 |
|
81 |
-
memory = ConversationBufferMemory(
|
82 |
-
memory_key="chat_history",
|
83 |
-
output_key='answer',
|
84 |
-
return_messages=True
|
85 |
-
)
|
86 |
-
|
87 |
qa_chain = ConversationalRetrievalChain.from_llm(
|
88 |
llm=llm_name,
|
89 |
retriever=vector_db.as_retriever(),
|
90 |
chain_type="stuff",
|
91 |
-
memory=memory,
|
92 |
return_source_documents=True,
|
93 |
temperature=llm_temperature,
|
94 |
verbose=False,
|
|
|
2 |
import gradio as gr
|
3 |
import os
|
4 |
from googletrans import Translator
|
5 |
+
# import requests
|
6 |
+
# from dotenv import load_dotenv
|
7 |
# import numpy as np
|
8 |
from langchain_community.vectorstores import Chroma
|
9 |
from langchain_community.document_loaders import UnstructuredPDFLoader, PyPDFLoader
|
|
|
18 |
from langchain.llms.base import LLM
|
19 |
from typing import List, Dict, Any, Optional
|
20 |
from pydantic import BaseModel
|
21 |
+
# from tqdm import tqdm
|
22 |
import torch
|
23 |
import logging
|
24 |
|
|
|
78 |
print(f"Using default LLM {default_llm} for {language}")
|
79 |
llm_name = default_llm
|
80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
qa_chain = ConversationalRetrievalChain.from_llm(
|
82 |
llm=llm_name,
|
83 |
retriever=vector_db.as_retriever(),
|
84 |
chain_type="stuff",
|
85 |
+
# memory=memory,
|
86 |
return_source_documents=True,
|
87 |
temperature=llm_temperature,
|
88 |
verbose=False,
|