Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,8 +3,11 @@ from transformers import RagTokenizer, RagRetriever, RagSequenceForGeneration
|
|
3 |
from datasets import load_dataset
|
4 |
from transformers import XLMRobertaTokenizer, XLMRobertaForSequenceClassification
|
5 |
|
6 |
-
# Load
|
7 |
-
|
|
|
|
|
|
|
8 |
|
9 |
# Initialize tokenizer and retriever for multilingual support (using XLM-Roberta)
|
10 |
tokenizer = XLMRobertaTokenizer.from_pretrained("xlm-roberta-base")
|
@@ -37,6 +40,3 @@ if user_query:
|
|
37 |
st.write(f"Answer: {answer}")
|
38 |
|
39 |
# Display the most relevant documents
|
40 |
-
st.subheader("Relevant Documents:")
|
41 |
-
for doc in retrieved_docs:
|
42 |
-
st.write(doc['text'][:300] + '...') # Display first 300 characters of each doc
|
|
|
3 |
from datasets import load_dataset
|
4 |
from transformers import XLMRobertaTokenizer, XLMRobertaForSequenceClassification
|
5 |
|
6 |
+
# Load a multilingual dataset (use "xnli" or "tydi_qa")
|
7 |
+
try:
|
8 |
+
dataset = load_dataset("xnli", "en", split="validation") # Using English subset as an example
|
9 |
+
except Exception as e:
|
10 |
+
st.error(f"Error loading the dataset: {e}")
|
11 |
|
12 |
# Initialize tokenizer and retriever for multilingual support (using XLM-Roberta)
|
13 |
tokenizer = XLMRobertaTokenizer.from_pretrained("xlm-roberta-base")
|
|
|
40 |
st.write(f"Answer: {answer}")
|
41 |
|
42 |
# Display the most relevant documents
|
|
|
|
|
|