Ghosthash commited on
Commit
01108a5
·
verified ·
1 Parent(s): 6f37457

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -19
app.py CHANGED
@@ -1,24 +1,34 @@
1
- import streamlit as st
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
- model_path = "cognitivecomputations/dolphin-2.8-mistral-7b-v02"
5
 
6
- tokenizer = AutoTokenizer.from_pretrained(model_path)
7
- model = AutoModelForCausalLM.from_pretrained(
8
- model_path,
9
- device_map="auto",
10
- torch_dtype='auto'
11
- ).eval()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  text = st.text_input("enter text here")
13
 
14
  if text:
15
- messages = [
16
- {"role": "user", "content": text},
17
- ]
18
- input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt')
19
- output_ids = model.generate(input_ids.to('cuda'))
20
- response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
21
- print(response)
22
- st.json({
23
- "response": response
24
- })
 
1
+ # import streamlit as st
2
+ # from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
+ # model_path = "cognitivecomputations/dolphin-2.8-mistral-7b-v02"
5
 
6
+ # tokenizer = AutoTokenizer.from_pretrained(model_path)
7
+ # model = AutoModelForCausalLM.from_pretrained(
8
+ # model_path,
9
+ # device_map="auto",
10
+ # torch_dtype='auto'
11
+ # ).eval()
12
+ # text = st.text_input("enter text here")
13
+
14
+ # if text:
15
+ # messages = [
16
+ # {"role": "user", "content": text},
17
+ # ]
18
+ # input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt')
19
+ # output_ids = model.generate(input_ids.to('cuda'))
20
+ # response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
21
+ # print(response)
22
+ # st.json({
23
+ # "response": response
24
+ # })
25
+
26
+ from transformers import pipeline
27
+
28
+ pipe = pipeline("text-generation", model="cognitivecomputations/dolphin-2.8-mistral-7b-v02")
29
  text = st.text_input("enter text here")
30
 
31
  if text:
32
+ response = pipe(text)
33
+ st.json(response)
34
+