metastable-void commited on
Commit
b0097b1
·
unverified ·
1 Parent(s): 9d7e24a
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -25,11 +25,11 @@ if torch.cuda.is_available():
25
  base_model_id = "llm-jp/llm-jp-3-1.8b-instruct"
26
  tokenizer = AutoTokenizer.from_pretrained(base_model_id, trust_remote_code=True)
27
  tokenizer.chat_template = "{{bos_token}}{% for message in messages %}{% if message['role'] == 'user' %}{{ '\\n\\n### 前の投稿:\\n' + message['content'] + '' }}{% elif message['role'] == 'system' %}{{ '以下は、SNS上の投稿です。あなたはSNSの投稿生成botとして、次に続く投稿を考えなさい。説明はせず、投稿の内容のみを鉤括弧をつけずに答えよ。' }}{% elif message['role'] == 'assistant' %}{{ '\\n\\n### 次の投稿:\\n' + message['content'] + eos_token }}{% endif %}{% if loop.last and add_generation_prompt %}{{ '\\n\\n### 次の投稿:\\n' }}{% endif %}{% endfor %}"
28
- base_model = AutoModelForCausalLM.from_pretrained(
29
  base_model_id,
30
  trust_remote_code=True,
31
  )
32
- model = PeftModel.from_pretrained(base_model, model_id, trust_remote_code=True)
33
  my_pipeline=pipeline(
34
  task="text-generation",
35
  model=model,
@@ -52,12 +52,11 @@ def generate(
52
  {"role": "user", "content": message},
53
  ]
54
 
55
- t = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
56
  output = my_pipeline(
57
- t,
58
  )
59
  print(output)
60
- yield output[-1]["generated_text"]
61
 
62
  demo = gr.ChatInterface(
63
  fn=generate,
 
25
  base_model_id = "llm-jp/llm-jp-3-1.8b-instruct"
26
  tokenizer = AutoTokenizer.from_pretrained(base_model_id, trust_remote_code=True)
27
  tokenizer.chat_template = "{{bos_token}}{% for message in messages %}{% if message['role'] == 'user' %}{{ '\\n\\n### 前の投稿:\\n' + message['content'] + '' }}{% elif message['role'] == 'system' %}{{ '以下は、SNS上の投稿です。あなたはSNSの投稿生成botとして、次に続く投稿を考えなさい。説明はせず、投稿の内容のみを鉤括弧をつけずに答えよ。' }}{% elif message['role'] == 'assistant' %}{{ '\\n\\n### 次の投稿:\\n' + message['content'] + eos_token }}{% endif %}{% if loop.last and add_generation_prompt %}{{ '\\n\\n### 次の投稿:\\n' }}{% endif %}{% endfor %}"
28
+ model = AutoModelForCausalLM.from_pretrained(
29
  base_model_id,
30
  trust_remote_code=True,
31
  )
32
+ model.load_adapter(model_id)
33
  my_pipeline=pipeline(
34
  task="text-generation",
35
  model=model,
 
52
  {"role": "user", "content": message},
53
  ]
54
 
 
55
  output = my_pipeline(
56
+ messages,
57
  )
58
  print(output)
59
+ yield output[-1]["generated_text"][-1]["content"]
60
 
61
  demo = gr.ChatInterface(
62
  fn=generate,