Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -2,19 +2,43 @@ import gradio as gr
|
|
2 |
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
|
3 |
import json
|
4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
context_val = ''
|
6 |
|
7 |
q_n_a_model_name = "deepset/roberta-base-squad2"
|
8 |
q_n_a_model = AutoModelForQuestionAnswering.from_pretrained(q_n_a_model_name)
|
9 |
-
tokenizer = AutoTokenizer.from_pretrained(q_n_a_model_name)
|
10 |
|
11 |
context = gr.Textbox(label="Add the Context (Paragraph or texts) for which you want to get insights", lines=10, outputs="text")
|
12 |
|
13 |
def q_n_a_fn(context, text):
|
14 |
QA_input = {'question': text, 'context': context}
|
15 |
nlp = pipeline('question-answering', model=q_n_a_model, tokenizer=tokenizer)
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
return answer
|
19 |
|
20 |
def classification_fn(text):
|
@@ -51,4 +75,4 @@ with gr.Blocks(theme='gradio/soft') as demo:
|
|
51 |
gr.Interface(fn=classification_fn, inputs=[context], outputs="text")
|
52 |
|
53 |
if __name__ == "__main__":
|
54 |
-
demo.launch()
|
|
|
2 |
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
|
3 |
import json
|
4 |
|
5 |
+
# Define hyperparameters
|
6 |
+
learning_rate = 3e-5
|
7 |
+
batch_size = 16
|
8 |
+
epochs = 3
|
9 |
+
max_seq_length = 512
|
10 |
+
warmup_steps = 100
|
11 |
+
weight_decay = 0.01
|
12 |
+
dropout_prob = 0.1
|
13 |
+
gradient_clip_value = 1.0
|
14 |
+
|
15 |
context_val = ''
|
16 |
|
17 |
q_n_a_model_name = "deepset/roberta-base-squad2"
|
18 |
q_n_a_model = AutoModelForQuestionAnswering.from_pretrained(q_n_a_model_name)
|
19 |
+
tokenizer = AutoTokenizer.from_pretrained(q_n_a_model_name)
|
20 |
|
21 |
context = gr.Textbox(label="Add the Context (Paragraph or texts) for which you want to get insights", lines=10, outputs="text")
|
22 |
|
23 |
def q_n_a_fn(context, text):
|
24 |
QA_input = {'question': text, 'context': context}
|
25 |
nlp = pipeline('question-answering', model=q_n_a_model, tokenizer=tokenizer)
|
26 |
+
|
27 |
+
# Set the device (CPU or GPU)
|
28 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
29 |
+
q_n_a_model.to(device)
|
30 |
+
|
31 |
+
# Convert inputs to tensors
|
32 |
+
inputs = tokenizer(QA_input["context"], QA_input["question"], return_tensors="pt", max_length=max_seq_length, truncation=True, padding="max_length").to(device)
|
33 |
+
|
34 |
+
# Get predictions
|
35 |
+
with torch.no_grad():
|
36 |
+
outputs = q_n_a_model(**inputs)
|
37 |
+
|
38 |
+
# Decode and return the answer
|
39 |
+
start_idx, end_idx = torch.argmax(outputs.start_logits), torch.argmax(outputs.end_logits)
|
40 |
+
answer = tokenizer.decode(inputs["input_ids"][0][start_idx:end_idx+1])
|
41 |
+
|
42 |
return answer
|
43 |
|
44 |
def classification_fn(text):
|
|
|
75 |
gr.Interface(fn=classification_fn, inputs=[context], outputs="text")
|
76 |
|
77 |
if __name__ == "__main__":
|
78 |
+
demo.launch()
|