aaliyaan commited on
Commit
ddf2105
·
1 Parent(s): 4451a94
Files changed (1) hide show
  1. app.py +7 -11
app.py CHANGED
@@ -6,16 +6,12 @@ from PyPDF2 import PdfReader
6
  # Models and Tokenizers Setup
7
  models = {
8
  "Text Generator (Bloom)": {
9
- "model": AutoModelForCausalLM.from_pretrained(
10
- "bigscience/bloom-560m",
11
- device_map="auto",
12
- torch_dtype="auto"
13
- ),
14
  "tokenizer": AutoTokenizer.from_pretrained("bigscience/bloom-560m"),
15
  },
16
  "PDF Summarizer (T5)": {
17
- "model": AutoModelForSeq2SeqLM.from_pretrained("t5-small"),
18
- "tokenizer": AutoTokenizer.from_pretrained("t5-small", use_fast=False),
19
  },
20
  "Broken Answer (T0pp)": {
21
  "model": AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp"),
@@ -37,11 +33,11 @@ def chat_with_model(model_choice, user_message, chat_history, file=None):
37
  model = model_info["model"]
38
 
39
  # Tokenize Input
40
- inputs = tokenizer(user_message, return_tensors="pt", truncation=True, max_length=256)
41
 
42
  # Adjust max_length and parameters for the PDF summarizer
43
- max_length = 100
44
- num_beams = 3
45
  outputs = model.generate(
46
  **inputs,
47
  max_length=max_length,
@@ -117,4 +113,4 @@ def create_chat_interface():
117
 
118
  if __name__ == "__main__":
119
  interface = create_chat_interface()
120
- interface.launch(server_name="0.0.0.0", server_port=7860)
 
6
  # Models and Tokenizers Setup
7
  models = {
8
  "Text Generator (Bloom)": {
9
+ "model": AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m"),
 
 
 
 
10
  "tokenizer": AutoTokenizer.from_pretrained("bigscience/bloom-560m"),
11
  },
12
  "PDF Summarizer (T5)": {
13
+ "model": AutoModelForSeq2SeqLM.from_pretrained("aaliyaan/t5-small-finetuned-career"),
14
+ "tokenizer": AutoTokenizer.from_pretrained("aaliyaan/t5-small-finetuned-career", use_fast=False),
15
  },
16
  "Broken Answer (T0pp)": {
17
  "model": AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp"),
 
33
  model = model_info["model"]
34
 
35
  # Tokenize Input
36
+ inputs = tokenizer(user_message, return_tensors="pt", padding=True, truncation=True, max_length=512)
37
 
38
  # Adjust max_length and parameters for the PDF summarizer
39
+ max_length = 150
40
+ num_beams = 5
41
  outputs = model.generate(
42
  **inputs,
43
  max_length=max_length,
 
113
 
114
  if __name__ == "__main__":
115
  interface = create_chat_interface()
116
+ interface.launch(server_name="0.0.0.0", server_port=7860)