muhammadshaheryar commited on
Commit
8f95f35
·
verified ·
1 Parent(s): c388f52

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -0
app.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Continue with the rest of the code
2
+ from langchain.chains import RetrievalQA
3
+ from langchain.document_loaders import TextLoader
4
+ from langchain.embeddings import SentenceTransformerEmbeddings
5
+ from langchain.vectorstores import FAISS
6
+ from transformers import pipeline
7
+
8
+
9
+
10
+ # Paste your data here
11
+ data = """
12
+ Enter your text data here. For example:
13
+ """
14
+
15
+ # Split data into chunks for embedding
16
+ def chunk_text(text, chunk_size=500):
17
+ words = text.split()
18
+ chunks = [" ".join(words[i:i + chunk_size]) for i in range(0, len(words), chunk_size)]
19
+ return chunks
20
+
21
+ # Prepare the text chunks
22
+ text_chunks = chunk_text(data)
23
+
24
+ # Generate embeddings and index the data
25
+ embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
26
+ vectorstore = FAISS.from_texts(text_chunks, embeddings)
27
+
28
+ # Load a simple LLM (Hugging Face model)
29
+ from transformers import pipeline
30
+ qa_pipeline = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad")
31
+
32
+ # Define a function to perform QA
33
+ def answer_question(question):
34
+ retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
35
+ relevant_docs = retriever.get_relevant_documents(question)
36
+ context = " ".join([doc.page_content for doc in relevant_docs])
37
+ answer = qa_pipeline(question=question, context=context)
38
+ return answer["answer"]
39
+
40
+ # Ask a question
41
+ print("Paste the text and ask your question.")
42
+ question = input("Your question: ")
43
+ answer = answer_question(question)
44
+ print("Answer:", answer)