nishantsharma02 commited on
Commit
d8e0d61
·
1 Parent(s): 621fcdc

Model change

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -2,16 +2,16 @@ import gradio as gr
2
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
3
 
4
  # Load the model and tokenizer using Hugging Face
5
- #model_name = "microsoft/Phi-3-mini-4k-instruct"
6
 
7
 
8
  # Explicitly load the tokenizer and model
9
- #tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
10
- #model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
11
 
12
  # Create the pipeline
13
- chatbot = pipeline("text-generation", model="KingNish/Qwen2.5-0.5b-Test-ft", trust_remote_code=True)
14
- #chatbot = pipeline("text-generation", model=model, tokenizer=tokenizer, framework="pt")
15
 
16
  def respond(
17
  message,
 
2
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
3
 
4
  # Load the model and tokenizer using Hugging Face
5
+ model_name = "microsoft/Phi-3-mini-4k-instruct"
6
 
7
 
8
  # Explicitly load the tokenizer and model
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
10
+ model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
11
 
12
  # Create the pipeline
13
+ #chatbot = pipeline("text-generation", model="KingNish/Qwen2.5-0.5b-Test-ft", trust_remote_code=True)
14
+ chatbot = pipeline("text-generation", model=model, tokenizer=tokenizer, framework="pt")
15
 
16
  def respond(
17
  message,