Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -26,8 +26,8 @@ logger = logging.getLogger(__name__)
|
|
26 |
embedding_function = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
27 |
|
28 |
# Definizione della lista di modelli LLM
|
29 |
-
list_llm_simple = ["
|
30 |
-
list_llm = ["
|
31 |
|
32 |
def initialize_database(document, chunk_size, chunk_overlap, progress=gr.Progress()):
|
33 |
logger.info("Initializing database...")
|
@@ -65,11 +65,11 @@ def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, pr
|
|
65 |
|
66 |
# Definizione del modello LLM
|
67 |
if language == "italiano":
|
68 |
-
model = AutoModelForCausalLM.from_pretrained("
|
69 |
else:
|
70 |
-
model = AutoModelForCausalLM.from_pretrained("
|
71 |
|
72 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
73 |
|
74 |
qa_chain = ConversationalRetrievalChain.from_llm(
|
75 |
llm=model,
|
|
|
26 |
embedding_function = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
27 |
|
28 |
# Definizione della lista di modelli LLM
|
29 |
+
list_llm_simple = ["meta/llama-7b-hf", "meta/llama-7b-hf"]
|
30 |
+
list_llm = ["meta/llama-7b-hf", "meta/llama-7b-hf"]
|
31 |
|
32 |
def initialize_database(document, chunk_size, chunk_overlap, progress=gr.Progress()):
|
33 |
logger.info("Initializing database...")
|
|
|
65 |
|
66 |
# Definizione del modello LLM
|
67 |
if language == "italiano":
|
68 |
+
model = AutoModelForCausalLM.from_pretrained("meta/llama-7b-hf")
|
69 |
else:
|
70 |
+
model = AutoModelForCausalLM.from_pretrained("meta/llama-7b-hf")
|
71 |
|
72 |
+
tokenizer = AutoTokenizer.from_pretrained("meta/llama-7b-hf" if language == "italiano" else "meta/llama-7b-hf")
|
73 |
|
74 |
qa_chain = ConversationalRetrievalChain.from_llm(
|
75 |
llm=model,
|