tdurzynski commited on
Commit
ddcc450
·
verified ·
1 Parent(s): 6daaf67

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -16
app.py CHANGED
@@ -3,9 +3,9 @@ import logging
3
  import gradio as gr
4
  import asyncio
5
  from dotenv import load_dotenv
6
- from langchain.document_loaders import ArxivLoader
7
- from langchain.vectorstores import Chroma
8
- from langchain_community.embeddings import HuggingFaceHubEmbeddings
9
  from langchain_groq import ChatGroq
10
  from PyPDF2 import PdfReader
11
  from huggingface_hub import login
@@ -24,14 +24,11 @@ if not HUGGING_API_KEY or not GROQ_API_KEY:
24
  logging.basicConfig(level=logging.INFO)
25
  logger = logging.getLogger(__name__)
26
 
27
- # Authenticate with Hugging Face
28
  login(HUGGING_API_KEY)
29
 
30
- # Load models and embeddings with a specific embedding model
31
- embedding_model = HuggingFaceHubEmbeddings(
32
- huggingfacehub_api_token=HUGGING_API_KEY,
33
- model_id="sentence-transformers/all-MiniLM-L6-v2" # Explicitly set a proper embedding model
34
- )
35
  llm = ChatGroq(temperature=0, model_name="llama3-70b-8192", api_key=GROQ_API_KEY)
36
  client = AsyncGroq(api_key=GROQ_API_KEY)
37
 
@@ -39,7 +36,7 @@ client = AsyncGroq(api_key=GROQ_API_KEY)
39
  pdf_vector_store = None
40
  current_pdf_path = None
41
 
42
- # General Chat (unchanged)
43
  async def chat_with_replit(message, history):
44
  try:
45
  messages = [{"role": "system", "content": "You are an assistant answering user questions."}]
@@ -59,7 +56,7 @@ async def chat_with_replit(message, history):
59
  def chat_with_replit_sync(message, history):
60
  return asyncio.run(chat_with_replit(message, history))
61
 
62
- # ArXiv Chat (unchanged)
63
  async def chat_with_replit_arxiv(message, history, doi_num):
64
  try:
65
  loader = ArxivLoader(query=str(doi_num), load_max_docs=10)
@@ -125,7 +122,6 @@ def process_pdf(pdf_file):
125
 
126
  # Gradio UI
127
  with gr.Blocks() as app:
128
- # General Chat (unchanged)
129
  with gr.Tab(label="General Chat"):
130
  gr.Markdown("### Chat with the Assistant")
131
  with gr.Row():
@@ -151,7 +147,6 @@ with gr.Blocks() as app:
151
  general_send_button.click(update_general_response, inputs=general_chat_history,
152
  outputs=[general_chat_history, general_chat_output])
153
 
154
- # ArXiv Chat (unchanged)
155
  with gr.Tab(label="Chat with ArXiv Paper"):
156
  gr.Markdown("### Ask Questions About an ArXiv Paper")
157
  with gr.Row():
@@ -178,7 +173,6 @@ with gr.Blocks() as app:
178
  arxiv_send_button.click(update_arxiv_response, inputs=[arxiv_chat_history, arxiv_doi],
179
  outputs=[arxiv_chat_history, arxiv_chat_output])
180
 
181
- # Local PDF Chat
182
  with gr.Tab(label="Chat with Local PDF"):
183
  gr.Markdown("### Ask Questions About an Uploaded PDF")
184
  pdf_file_input = gr.File(label="Upload PDF file", file_types=[".pdf"])
@@ -207,5 +201,4 @@ with gr.Blocks() as app:
207
  pdf_send_button.click(update_pdf_response, inputs=pdf_chat_history,
208
  outputs=[pdf_chat_history, pdf_chat_output])
209
 
210
- app.launch()
211
-
 
3
  import gradio as gr
4
  import asyncio
5
  from dotenv import load_dotenv
6
+ from langchain_community.document_loaders import ArxivLoader # Updated import
7
+ from langchain_community.vectorstores import Chroma # Updated import
8
+ from langchain_huggingface import HuggingFaceEmbeddings # Updated import
9
  from langchain_groq import ChatGroq
10
  from PyPDF2 import PdfReader
11
  from huggingface_hub import login
 
24
  logging.basicConfig(level=logging.INFO)
25
  logger = logging.getLogger(__name__)
26
 
27
+ # Authenticate with Hugging Face (for model downloads)
28
  login(HUGGING_API_KEY)
29
 
30
+ # Load models and embeddings with a local embedding model
31
+ embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
 
 
 
32
  llm = ChatGroq(temperature=0, model_name="llama3-70b-8192", api_key=GROQ_API_KEY)
33
  client = AsyncGroq(api_key=GROQ_API_KEY)
34
 
 
36
  pdf_vector_store = None
37
  current_pdf_path = None
38
 
39
+ # General Chat
40
  async def chat_with_replit(message, history):
41
  try:
42
  messages = [{"role": "system", "content": "You are an assistant answering user questions."}]
 
56
  def chat_with_replit_sync(message, history):
57
  return asyncio.run(chat_with_replit(message, history))
58
 
59
+ # ArXiv Chat
60
  async def chat_with_replit_arxiv(message, history, doi_num):
61
  try:
62
  loader = ArxivLoader(query=str(doi_num), load_max_docs=10)
 
122
 
123
  # Gradio UI
124
  with gr.Blocks() as app:
 
125
  with gr.Tab(label="General Chat"):
126
  gr.Markdown("### Chat with the Assistant")
127
  with gr.Row():
 
147
  general_send_button.click(update_general_response, inputs=general_chat_history,
148
  outputs=[general_chat_history, general_chat_output])
149
 
 
150
  with gr.Tab(label="Chat with ArXiv Paper"):
151
  gr.Markdown("### Ask Questions About an ArXiv Paper")
152
  with gr.Row():
 
173
  arxiv_send_button.click(update_arxiv_response, inputs=[arxiv_chat_history, arxiv_doi],
174
  outputs=[arxiv_chat_history, arxiv_chat_output])
175
 
 
176
  with gr.Tab(label="Chat with Local PDF"):
177
  gr.Markdown("### Ask Questions About an Uploaded PDF")
178
  pdf_file_input = gr.File(label="Upload PDF file", file_types=[".pdf"])
 
201
  pdf_send_button.click(update_pdf_response, inputs=pdf_chat_history,
202
  outputs=[pdf_chat_history, pdf_chat_output])
203
 
204
+ app.launch()