akashdhruv commited on
Commit
f380b3d
·
verified ·
1 Parent(s): bfc70ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -15
app.py CHANGED
@@ -1,28 +1,15 @@
1
  import gradio as gr
2
- import os
3
 
4
  # Load the fine-tuned model and tokenizer
5
  model = AutoModelForSeq2SeqLM.from_pretrained("Codellama-7b-Instruct")
6
  tokenizer = AutoTokenizer.from_pretrained("Codellama-7b-Instruct")
7
 
8
- # Define a variable to store the chat transcript
9
- chat_transcript = ""
10
-
11
- # Check if the chat transcript file exists
12
- if not os.path.exists("chat_transcript.txt"):
13
- # If the file doesn't exist, create it
14
- with open("chat_transcript.txt", "w") as f:
15
- pass
16
-
17
- # Define a function to generate a response from the model and save the chat transcript
18
  def generate_response(input_text):
19
- nonlocal chat_transcript
20
  inputs = tokenizer(input_text, return_tensors="pt")
21
  outputs = model.generate(**inputs)
22
  response = tokenizer.decode(outputs[0])
23
- chat_transcript += f"User: {input_text}\nAssistant: {response}\n\n"
24
- with open("chat_transcript.txt", "a") as f:
25
- f.write(chat_transcript)
26
  return response
27
 
28
  # Create a Gradio interface
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
 
4
  # Load the fine-tuned model and tokenizer
5
  model = AutoModelForSeq2SeqLM.from_pretrained("Codellama-7b-Instruct")
6
  tokenizer = AutoTokenizer.from_pretrained("Codellama-7b-Instruct")
7
 
8
+ # Define a function to generate a response from the model
 
 
 
 
 
 
 
 
 
9
  def generate_response(input_text):
 
10
  inputs = tokenizer(input_text, return_tensors="pt")
11
  outputs = model.generate(**inputs)
12
  response = tokenizer.decode(outputs[0])
 
 
 
13
  return response
14
 
15
  # Create a Gradio interface