BorderCollieWei commited on
Commit
befb81c
verified
1 Parent(s): 64960bc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -7,13 +7,12 @@ Original file is located at
7
  https://colab.research.google.com/drive/1zRuAxGm_11lNIeBxFlHVzc5tNKhyLef4
8
  """
9
  import gradio as gr
10
- import os
11
  # 鍔犺級 LLaMA 妯″瀷
12
  from transformers import AutoTokenizer, AutoModelForCausalLM
13
- token = os.getenv("Git Access")
14
 
15
- tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf",use_auth_token=token)
16
- model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf",use_auth_token=token)
17
 
18
  # 瀹氱京鎺ㄧ悊鍑芥暩
19
  def generate_text(prompt):
@@ -32,8 +31,8 @@ interface = gr.Interface(
32
  fn=generate_text,
33
  inputs=gr.Textbox(lines=5, placeholder="Enter your prompt here..."),
34
  outputs="text",
35
- title="LLaMA Text Generator",
36
- description="Generate text using LLaMA 2 models hosted on Hugging Face Spaces."
37
  )
38
 
39
  # 鍟熷嫊鎳夌敤
 
7
  https://colab.research.google.com/drive/1zRuAxGm_11lNIeBxFlHVzc5tNKhyLef4
8
  """
9
  import gradio as gr
 
10
  # 鍔犺級 LLaMA 妯″瀷
11
  from transformers import AutoTokenizer, AutoModelForCausalLM
12
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
13
 
14
+ tokenizer = AutoTokenizer.from_pretrained("google/t5-v1_1-xxl")
15
+ model = AutoModelForSeq2SeqLM.from_pretrained("google/t5-v1_1-xxl")
16
 
17
  # 瀹氱京鎺ㄧ悊鍑芥暩
18
  def generate_text(prompt):
 
31
  fn=generate_text,
32
  inputs=gr.Textbox(lines=5, placeholder="Enter your prompt here..."),
33
  outputs="text",
34
+ title="T5 Text Generator",
35
+ description="Generate text using the T5 v1.1 XXL model hosted on Hugging Face Spaces."
36
  )
37
 
38
  # 鍟熷嫊鎳夌敤