Spaces:
Sleeping
Sleeping
Commit
·
2b864f4
1
Parent(s):
22b32b3
Microsoft model
Browse files
app.py
CHANGED
@@ -2,8 +2,8 @@ import gradio as gr
|
|
2 |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
3 |
|
4 |
# Load the model and tokenizer using Hugging Face
|
5 |
-
|
6 |
-
model_name = "KingNish/Qwen2.5-0.5b-Test-ft"
|
7 |
|
8 |
|
9 |
# Explicitly load the tokenizer and model
|
@@ -23,8 +23,9 @@ def respond(
|
|
23 |
top_p,
|
24 |
):
|
25 |
# Combine system message and conversation history
|
26 |
-
prompt
|
27 |
-
prompt
|
|
|
28 |
|
29 |
# Generate the response using the model
|
30 |
response = chatbot(prompt, max_length=max_tokens, temperature=temperature, top_p=top_p)[0]['generated_text']
|
|
|
2 |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
3 |
|
4 |
# Load the model and tokenizer using Hugging Face
|
5 |
+
model_name = "microsoft/Phi-3-mini-4k-instruct"
|
6 |
+
#model_name = "KingNish/Qwen2.5-0.5b-Test-ft"
|
7 |
|
8 |
|
9 |
# Explicitly load the tokenizer and model
|
|
|
23 |
top_p,
|
24 |
):
|
25 |
# Combine system message and conversation history
|
26 |
+
prompt=message
|
27 |
+
#prompt = system_message + "\n"
|
28 |
+
#prompt += f"User: {message}\n\nBot:"
|
29 |
|
30 |
# Generate the response using the model
|
31 |
response = chatbot(prompt, max_length=max_tokens, temperature=temperature, top_p=top_p)[0]['generated_text']
|