Mummia-99 commited on
Commit
550cfb9
·
verified ·
1 Parent(s): e8ac964

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -23
app.py CHANGED
@@ -1,24 +1,24 @@
1
- import streamlit as st
2
- from transformers import AutoModelForCausalLM, AutoTokenizer,pipeline
3
- import torch
4
-
5
- st.title("Text_Generator Fine tunning model")
6
-
7
- # Load model and tokenizer
8
- model_dir = "fine_tuned_model (1)"
9
- tokenizer = AutoTokenizer.from_pretrained(model_dir)
10
- model = AutoModelForCausalLM.from_pretrained(model_dir)
11
-
12
-
13
- code_generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
14
-
15
- prompt = "def quicksort(arr):"
16
-
17
-
18
- inputs_text=st.text_input("Please enter the text",value="I think I really like this place. Ayesha and I had a chance to visit Cheuvront on a Monday night. It wasn\'t terribly busy when we arrived and we were warmly greeted. Unfortunately we were seated next to a loud group of young children that thought they knew something of the world ")
19
-
20
- if st.button("submit"):
21
- generated_code = code_generator(inputs_text, max_length=200, num_return_sequences=1)
22
-
23
- st.write(generated_code[0]["generated_text"])
24
 
 
1
+ import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer,pipeline
3
+ import torch
4
+
5
+ st.title("Text_Generator Fine tunning model")
6
+
7
+ # Load model and tokenizer
8
+ model_dir = "fine_tuned_model (1)"
9
+ tokenizer = AutoTokenizer.from_pretrained(model_dir)
10
+ model = AutoModelForCausalLM.from_pretrained(model_dir)
11
+
12
+
13
+ code_generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
14
+
15
+ prompt = "def quicksort(arr):"
16
+
17
+
18
+ inputs_text=st.text_input("Please enter the text",value="def quicksort(arr):")
19
+
20
+ if st.button("submit"):
21
+ generated_code = code_generator(inputs_text, max_length=200, num_return_sequences=1)
22
+
23
+ st.write(generated_code[0]["generated_text"])
24