Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
5 |
import os
|
6 |
import re
|
7 |
from polyglot.detect import Detector
|
8 |
-
|
9 |
|
10 |
|
11 |
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
@@ -21,12 +21,6 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
21 |
device_map="auto")
|
22 |
tokenizer = AutoTokenizer.from_pretrained(MODEL)
|
23 |
|
24 |
-
# Simulated integration of BhasaAnuvaad dataset from paper: https://huggingface.co/papers/2411.04699
|
25 |
-
dataset = load_bhasa_dataset()
|
26 |
-
sample_pair = dataset[0]
|
27 |
-
print(f"Example from dataset - Source: {sample_pair['source_text']}, Target: {sample_pair['target_text']}")
|
28 |
-
|
29 |
-
|
30 |
def lang_detector(text):
|
31 |
min_chars = 5
|
32 |
if len(text) < min_chars:
|
|
|
5 |
import os
|
6 |
import re
|
7 |
from polyglot.detect import Detector
|
8 |
+
|
9 |
|
10 |
|
11 |
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
|
|
21 |
device_map="auto")
|
22 |
tokenizer = AutoTokenizer.from_pretrained(MODEL)
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
def lang_detector(text):
|
25 |
min_chars = 5
|
26 |
if len(text) < min_chars:
|