Gervacius commited on
Commit
043c5c1
·
verified ·
1 Parent(s): fc6062c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -5
app.py CHANGED
@@ -1,10 +1,16 @@
 
1
  from transformers import AutoModelForCausalLM, AutoTokenizer
2
  import gradio as gr
3
 
4
- # Load the model and tokenizer
 
 
 
5
  model_name = "meta-llama/Llama-3.1-8B-Instruct"
6
- tokenizer = AutoTokenizer.from_pretrained(model_name)
7
- model = AutoModelForCausalLM.from_pretrained(model_name)
 
 
8
 
9
  def predict(input_text):
10
  # Tokenize input and generate text
@@ -17,8 +23,8 @@ interface = gr.Interface(
17
  fn=predict,
18
  inputs=gr.Textbox(label="Input Text"),
19
  outputs=gr.Textbox(label="Generated Output"),
20
- title="Phi-4 Model",
21
- description="Generate text using the microsoft/phi-4 model."
22
  )
23
 
24
  # Launch the interface
 
1
+ import os
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import gradio as gr
4
 
5
+ # Get the Hugging Face token from the environment variable
6
+ hf_token = os.getenv("HUGGINGFACE_HUB_TOKEN")
7
+
8
+ # Model name
9
  model_name = "meta-llama/Llama-3.1-8B-Instruct"
10
+
11
+ # Load the model and tokenizer with the token
12
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=hf_token)
13
+ model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=hf_token)
14
 
15
  def predict(input_text):
16
  # Tokenize input and generate text
 
23
  fn=predict,
24
  inputs=gr.Textbox(label="Input Text"),
25
  outputs=gr.Textbox(label="Generated Output"),
26
+ title="Meta-LLaMA-3.1-8B-Instruct",
27
+ description="Generate text using the meta-llama/Llama-3.1-8B-Instruct model."
28
  )
29
 
30
  # Launch the interface