KishoreK commited on
Commit
d06aa40
·
verified ·
1 Parent(s): b9d7b50

changing to llama 3.2 1B

Browse files
Files changed (1) hide show
  1. app.py +7 -4
app.py CHANGED
@@ -3,14 +3,17 @@ from transformers import pipeline
3
  import os
4
 
5
  access_token = os.getenv("Token")
6
- pipe = pipeline("text-generation", model="google/gemma-2-2b-it", token=access_token)
7
 
8
- def predict(query):
9
- return pipe(query)
 
 
 
10
 
11
  gradio_app = gr.Interface(
12
  predict,
13
- inputs="text", outputs="text",
14
  title="Gemma 2B",
15
  )
16
 
 
3
  import os
4
 
5
  access_token = os.getenv("Token")
6
+ pipe = pipeline("text-generation", model="meta-llama/Llama-3.2-1B-Instruct", token=access_token)
7
 
8
+ def predict(system="you are a helpful assistant", query):
9
+ messages = []
10
+ messages.append({"role": "system","content": system})
11
+ messages.append({"role": "system","content": query})
12
+ return pipe(messages,max_new_tokens=256)[0]["generated_text"][-1]
13
 
14
  gradio_app = gr.Interface(
15
  predict,
16
+ inputs=["text","text"], outputs="text",
17
  title="Gemma 2B",
18
  )
19