stivenDR14
commited on
Commit
·
4455476
1
Parent(s):
daf9af3
update models
Browse files- app.py +1 -1
- pdf_processor.py +1 -3
- utils.py +2 -2
app.py
CHANGED
@@ -6,7 +6,7 @@ class PDFProcessorUI:
|
|
6 |
def __init__(self):
|
7 |
self.processor = PDFProcessor()
|
8 |
self.current_language = "Español"
|
9 |
-
self.current_ai_model = "Huggingface / Microsoft Phi
|
10 |
self.current_type_model = "Api Key"
|
11 |
|
12 |
def change_language(self, language):
|
|
|
6 |
def __init__(self):
|
7 |
self.processor = PDFProcessor()
|
8 |
self.current_language = "Español"
|
9 |
+
self.current_ai_model = "Huggingface / Microsoft Phi 4 mini Instruct"
|
10 |
self.current_type_model = "Api Key"
|
11 |
|
12 |
def change_language(self, language):
|
pdf_processor.py
CHANGED
@@ -95,7 +95,7 @@ def authenticate_watsonx(api_key):
|
|
95 |
|
96 |
class PDFProcessor:
|
97 |
def __init__(self):
|
98 |
-
self.language = "
|
99 |
|
100 |
def set_language(self, language):
|
101 |
self.language = language
|
@@ -139,14 +139,12 @@ class PDFProcessor:
|
|
139 |
current_llm = HuggingFaceEndpoint(
|
140 |
repo_id= AI_MODELS[ai_model],
|
141 |
temperature=0.2,
|
142 |
-
max_length=2048,
|
143 |
huggingfacehub_api_token=HUGGINGFACE_TOKEN,
|
144 |
)
|
145 |
else:
|
146 |
current_llm = HuggingFaceEndpoint(
|
147 |
repo_id= AI_MODELS[ai_model],
|
148 |
temperature=0.2,
|
149 |
-
max_length=2048,
|
150 |
)
|
151 |
embeding_model = HuggingFaceEmbeddings(
|
152 |
model_name="ibm-granite/granite-embedding-278m-multilingual",
|
|
|
95 |
|
96 |
class PDFProcessor:
|
97 |
def __init__(self):
|
98 |
+
self.language = "Español"
|
99 |
|
100 |
def set_language(self, language):
|
101 |
self.language = language
|
|
|
139 |
current_llm = HuggingFaceEndpoint(
|
140 |
repo_id= AI_MODELS[ai_model],
|
141 |
temperature=0.2,
|
|
|
142 |
huggingfacehub_api_token=HUGGINGFACE_TOKEN,
|
143 |
)
|
144 |
else:
|
145 |
current_llm = HuggingFaceEndpoint(
|
146 |
repo_id= AI_MODELS[ai_model],
|
147 |
temperature=0.2,
|
|
|
148 |
)
|
149 |
embeding_model = HuggingFaceEmbeddings(
|
150 |
model_name="ibm-granite/granite-embedding-278m-multilingual",
|
utils.py
CHANGED
@@ -8,7 +8,7 @@ ENVIRONMENT = os.getenv("ENVIRONMENT")
|
|
8 |
|
9 |
if ENVIRONMENT == "dev":
|
10 |
AI_MODELS = {
|
11 |
-
"Huggingface / Microsoft Phi
|
12 |
"Huggingface / Mistral Nemo Instruct": "mistralai/Mistral-Nemo-Instruct-2407",
|
13 |
"Huggingface / Google Gemma 2 9B Instruct": "google/gemma-2-9b-it",
|
14 |
"Huggingface / Meta Llama 3.1 8B Instruct": "meta-llama/Llama-3.1-8B-Instruct",
|
@@ -18,7 +18,7 @@ if ENVIRONMENT == "dev":
|
|
18 |
}
|
19 |
else:
|
20 |
AI_MODELS = {
|
21 |
-
"Huggingface / Microsoft Phi
|
22 |
"Huggingface / Mistral Nemo Instruct": "mistralai/Mistral-Nemo-Instruct-2407",
|
23 |
"Huggingface / Google Gemma 2 9B Instruct": "google/gemma-2-9b-it",
|
24 |
"Huggingface / Meta Llama 3.1 8B Instruct": "meta-llama/Llama-3.1-8B-Instruct",
|
|
|
8 |
|
9 |
if ENVIRONMENT == "dev":
|
10 |
AI_MODELS = {
|
11 |
+
"Huggingface / Microsoft Phi 4 mini Instruct": "microsoft/Phi-4-mini-instruct",
|
12 |
"Huggingface / Mistral Nemo Instruct": "mistralai/Mistral-Nemo-Instruct-2407",
|
13 |
"Huggingface / Google Gemma 2 9B Instruct": "google/gemma-2-9b-it",
|
14 |
"Huggingface / Meta Llama 3.1 8B Instruct": "meta-llama/Llama-3.1-8B-Instruct",
|
|
|
18 |
}
|
19 |
else:
|
20 |
AI_MODELS = {
|
21 |
+
"Huggingface / Microsoft Phi 4 mini Instruct": "microsoft/Phi-4-mini-instruct",
|
22 |
"Huggingface / Mistral Nemo Instruct": "mistralai/Mistral-Nemo-Instruct-2407",
|
23 |
"Huggingface / Google Gemma 2 9B Instruct": "google/gemma-2-9b-it",
|
24 |
"Huggingface / Meta Llama 3.1 8B Instruct": "meta-llama/Llama-3.1-8B-Instruct",
|