Spaces:
Running
Running
File size: 6,014 Bytes
2b61f9d 51ee036 e15199a 51ee036 e15199a 2b61f9d 51ee036 e15199a 51ee036 2b61f9d 51ee036 2b61f9d 51ee036 2b61f9d 83f80d2 2b61f9d 83f80d2 2b61f9d 83f80d2 2b61f9d 83f80d2 2b61f9d 83f80d2 2b61f9d 83f80d2 2b61f9d 83f80d2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
import os
from typing import List
from chainlit.types import AskFileResponse
from aimakerspace.text_utils import CharacterTextSplitter, TextFileLoader, PDFLoader
from aimakerspace.openai_utils.prompts import (
UserRolePrompt,
SystemRolePrompt,
AssistantRolePrompt,
)
from aimakerspace.openai_utils.embedding import EmbeddingModel
from aimakerspace.vectordatabase import VectorDatabase
from aimakerspace.openai_utils.chatmodel import ChatOpenAI
import chainlit as cl
system_template = """\
Use the following context to answer a users question. If you cannot find the answer in the context, say you don't know the answer."""
system_role_prompt = SystemRolePrompt(system_template)
user_prompt_template = """\
Context:
{context}
Question:
{question}
"""
user_role_prompt = UserRolePrompt(user_prompt_template)
class RetrievalAugmentedQAPipeline:
def __init__(self, llm: ChatOpenAI(), vector_db_retriever: VectorDatabase) -> None:
self.llm = llm
self.vector_db_retriever = vector_db_retriever
async def arun_pipeline(self, user_query: str):
# Get more contexts but limit the total length
context_list = self.vector_db_retriever.search_by_text(user_query, k=3) # Reduced from 6 to 3
# Limit total context length to approximately 3000 tokens (12000 characters)
context_prompt = ""
total_length = 0
max_length = 12000 # Reduced from 24000 to 12000
for context in context_list:
if total_length + len(context[0]) > max_length:
break
context_prompt += context[0] + "\n"
total_length += len(context[0])
print(f"Using {len(context_prompt.split())} words of context")
formatted_system_prompt = system_role_prompt.create_message()
formatted_user_prompt = user_role_prompt.create_message(question=user_query, context=context_prompt)
async def generate_response():
async for chunk in self.llm.astream([formatted_system_prompt, formatted_user_prompt]):
yield chunk
return {"response": generate_response(), "context": context_list}
text_splitter = CharacterTextSplitter()
def process_file(file: AskFileResponse):
import tempfile
import shutil
print(f"Processing file: {file.name}")
# Create a temporary file with the correct extension
suffix = f".{file.name.split('.')[-1]}"
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as temp_file:
# Copy the uploaded file content to the temporary file
shutil.copyfile(file.path, temp_file.name)
print(f"Created temporary file at: {temp_file.name}")
# Create appropriate loader
if file.name.lower().endswith('.pdf'):
loader = PDFLoader(temp_file.name)
else:
loader = TextFileLoader(temp_file.name)
try:
# Load and process the documents
documents = loader.load_documents()
texts = text_splitter.split_texts(documents)
return texts
finally:
# Clean up the temporary file
try:
os.unlink(temp_file.name)
except Exception as e:
print(f"Error cleaning up temporary file: {e}")
@cl.on_chat_start
async def on_chat_start():
files = None
# Wait for the user to upload a file
while files == None:
files = await cl.AskFileMessage(
content="Please upload a Text or PDF file to begin!",
accept=["text/plain", "application/pdf"],
max_size_mb=2,
timeout=180,
).send()
file = files[0]
print(f"Received file: {file.name} ({file.type})")
msg = cl.Message(
content=f"Processing `{file.name}`..."
)
await msg.send()
# load the file
try:
texts = process_file(file)
print(f"Successfully processed file. Generated {len(texts)} text chunks")
print("Sample of first chunk:", texts[0][:200] if texts else "No texts generated")
except Exception as e:
print(f"Error processing file: {str(e)}")
await cl.Message(content=f"Error processing file: {str(e)}").send()
return
# Create a dict vector store
try:
vector_db = VectorDatabase()
vector_db = await vector_db.abuild_from_list(texts)
print("Successfully created vector database")
except Exception as e:
print(f"Error creating vector database: {str(e)}")
await cl.Message(content=f"Error creating vector database: {str(e)}").send()
return
try:
chat_openai = ChatOpenAI()
print("Successfully initialized ChatOpenAI")
except Exception as e:
print(f"Error initializing ChatOpenAI: {str(e)}")
await cl.Message(content=f"Error initializing ChatOpenAI: {str(e)}").send()
return
# Create a chain
retrieval_augmented_qa_pipeline = RetrievalAugmentedQAPipeline(
vector_db_retriever=vector_db,
llm=chat_openai
)
# Let the user know that the system is ready
msg.content = f"Processing `{file.name}` done. You can now ask questions!"
await msg.update()
cl.user_session.set("chain", retrieval_augmented_qa_pipeline)
@cl.on_message
async def main(message):
chain = cl.user_session.get("chain")
if not chain:
await cl.Message(content="Error: Chat session not initialized. Please try uploading the file again.").send()
return
msg = cl.Message(content="")
try:
result = await chain.arun_pipeline(message.content)
print(f"Retrieved {len(result['context'])} relevant contexts")
async for stream_resp in result["response"]:
await msg.stream_token(stream_resp)
await msg.send()
except Exception as e:
print(f"Error in chat pipeline: {str(e)}")
await cl.Message(content=f"Error processing your question: {str(e)}").send() |