Update app.py
Browse files
app.py
CHANGED
@@ -67,8 +67,7 @@ regularly used to recharge the Batcave s atomic pile . ''')
|
|
67 |
|
68 |
st.caption('**Example 4**')
|
69 |
st.caption(''':green[Claim:] :point_right: Amazon to hire 100K workers and until April Amazon will raise hourly wages by $2 due to pandemic demand.''')
|
70 |
-
|
71 |
-
|
72 |
#-----------------------------------------------------------
|
73 |
|
74 |
def proc():
|
@@ -353,7 +352,7 @@ def gen_qa_who(df):
|
|
353 |
claim=df['claim'][i]
|
354 |
answer= split_ws(df["who"])
|
355 |
evidence=df["evidence"][i]
|
356 |
-
|
357 |
if srl!="":
|
358 |
try:
|
359 |
for j in range(0,len(answer)):
|
@@ -368,7 +367,7 @@ def gen_qa_who(df):
|
|
368 |
list_of_ques_who.append(f"""Q{j+1}:{question_ids}""")
|
369 |
list_of_ans_who.append(f"""Ans{j+1}:{answer[j]}""")
|
370 |
input_evidence = f"answer_the_next_question_from_context: {question_ids} context: {evidence}"
|
371 |
-
|
372 |
answer_evidence = query_evidence({"inputs":input_evidence,"truncation":True,"wait_for_model":True})[0]['generated_text']
|
373 |
if answer_evidence.lower() in evidence.lower():
|
374 |
list_of_evidence_answer_who.append(f"""Evidence{j+1}:{answer_evidence}""")
|
@@ -414,7 +413,7 @@ def gen_qa_what(df):
|
|
414 |
claim=df['claim'][i]
|
415 |
answer= split_ws(df["what"])
|
416 |
evidence=df["evidence"][i]
|
417 |
-
|
418 |
if srl!="":
|
419 |
try:
|
420 |
for j in range(0,len(answer)):
|
@@ -428,7 +427,7 @@ def gen_qa_what(df):
|
|
428 |
list_of_ques_what.append(f"""Q{j+1}:{question_ids}""")
|
429 |
list_of_ans_what.append(f"""Ans{j+1}:{answer[j]}""")
|
430 |
input_evidence = f"answer_the_next_question_from_context: {question_ids} context: {evidence}"
|
431 |
-
|
432 |
answer_evidence = query_evidence({"inputs":input_evidence,"truncation":True,"wait_for_model":True})[0]['generated_text']
|
433 |
if answer_evidence.lower() in evidence.lower():
|
434 |
list_of_evidence_answer_what.append(f"""Evidence{j+1}:{answer_evidence}""")
|
@@ -476,7 +475,7 @@ def gen_qa_why(df):
|
|
476 |
claim=df['claim'][i]
|
477 |
answer= split_ws(df["why"])
|
478 |
evidence=df["evidence"][i]
|
479 |
-
|
480 |
if srl!="":
|
481 |
try:
|
482 |
for j in range(0,len(answer)):
|
@@ -490,7 +489,7 @@ def gen_qa_why(df):
|
|
490 |
list_of_ques_why.append(f"""Q{j+1}:{question_ids}""")
|
491 |
list_of_ans_why.append(f"""Ans{j+1}:{answer[j]}""")
|
492 |
input_evidence = f"answer_the_next_question_from_context: {question_ids} context: {evidence}"
|
493 |
-
|
494 |
answer_evidence = query_evidence({"inputs":input_evidence,"truncation":True,"wait_for_model":True})[0]['generated_text']
|
495 |
if answer_evidence.lower() in evidence.lower():
|
496 |
list_of_evidence_answer_why.append(f"""Evidence{j+1}:{answer_evidence}""")
|
@@ -537,7 +536,7 @@ def gen_qa_when(df):
|
|
537 |
claim=df['claim'][i]
|
538 |
answer= split_ws(df["when"])
|
539 |
evidence=df["evidence"][i]
|
540 |
-
|
541 |
if srl!="":
|
542 |
try:
|
543 |
for j in range(0,len(answer)):
|
@@ -551,7 +550,7 @@ def gen_qa_when(df):
|
|
551 |
list_of_ques_when.append(f"""Q{j+1}:{question_ids}""")
|
552 |
list_of_ans_when.append(f"""Ans{j+1}:{answer[j]}""")
|
553 |
input_evidence = f"answer_the_next_question_from_context: {question_ids} context: {evidence}"
|
554 |
-
|
555 |
answer_evidence = query_evidence({"inputs":input_evidence,"truncation":True,"wait_for_model":True})[0]['generated_text']
|
556 |
if answer_evidence.lower() in evidence.lower():
|
557 |
list_of_evidence_answer_when.append(f"""Evidence{j+1}:{answer_evidence}""")
|
@@ -598,12 +597,12 @@ def gen_qa_where(df):
|
|
598 |
claim=df['claim'][i]
|
599 |
answer= split_ws(df["where"])
|
600 |
evidence=df["evidence"][i]
|
601 |
-
|
602 |
if srl!="":
|
603 |
try:
|
604 |
for j in range(0,len(answer)):
|
605 |
FACT_TO_GENERATE_QUESTION_FROM = f"""{answer[j]} [SEP] {claim}"""
|
606 |
-
|
607 |
question_ids = query({"inputs":FACT_TO_GENERATE_QUESTION_FROM,
|
608 |
"num_beams":5,
|
609 |
"early_stopping":True,
|
|
|
67 |
|
68 |
st.caption('**Example 4**')
|
69 |
st.caption(''':green[Claim:] :point_right: Amazon to hire 100K workers and until April Amazon will raise hourly wages by $2 due to pandemic demand.''')
|
70 |
+
st.caption(''':green[Evidence:] :Due to the consumers increasingly relying on online retailers, Amazon planned to hire over 99,000 workers in the warehouse and delivery sector during the Pandemic in the USA.''')
|
|
|
71 |
#-----------------------------------------------------------
|
72 |
|
73 |
def proc():
|
|
|
352 |
claim=df['claim'][i]
|
353 |
answer= split_ws(df["who"])
|
354 |
evidence=df["evidence"][i]
|
355 |
+
time.sleep(10)
|
356 |
if srl!="":
|
357 |
try:
|
358 |
for j in range(0,len(answer)):
|
|
|
367 |
list_of_ques_who.append(f"""Q{j+1}:{question_ids}""")
|
368 |
list_of_ans_who.append(f"""Ans{j+1}:{answer[j]}""")
|
369 |
input_evidence = f"answer_the_next_question_from_context: {question_ids} context: {evidence}"
|
370 |
+
time.sleep(10)
|
371 |
answer_evidence = query_evidence({"inputs":input_evidence,"truncation":True,"wait_for_model":True})[0]['generated_text']
|
372 |
if answer_evidence.lower() in evidence.lower():
|
373 |
list_of_evidence_answer_who.append(f"""Evidence{j+1}:{answer_evidence}""")
|
|
|
413 |
claim=df['claim'][i]
|
414 |
answer= split_ws(df["what"])
|
415 |
evidence=df["evidence"][i]
|
416 |
+
time.sleep(10)
|
417 |
if srl!="":
|
418 |
try:
|
419 |
for j in range(0,len(answer)):
|
|
|
427 |
list_of_ques_what.append(f"""Q{j+1}:{question_ids}""")
|
428 |
list_of_ans_what.append(f"""Ans{j+1}:{answer[j]}""")
|
429 |
input_evidence = f"answer_the_next_question_from_context: {question_ids} context: {evidence}"
|
430 |
+
time.sleep(10)
|
431 |
answer_evidence = query_evidence({"inputs":input_evidence,"truncation":True,"wait_for_model":True})[0]['generated_text']
|
432 |
if answer_evidence.lower() in evidence.lower():
|
433 |
list_of_evidence_answer_what.append(f"""Evidence{j+1}:{answer_evidence}""")
|
|
|
475 |
claim=df['claim'][i]
|
476 |
answer= split_ws(df["why"])
|
477 |
evidence=df["evidence"][i]
|
478 |
+
time.sleep(10)
|
479 |
if srl!="":
|
480 |
try:
|
481 |
for j in range(0,len(answer)):
|
|
|
489 |
list_of_ques_why.append(f"""Q{j+1}:{question_ids}""")
|
490 |
list_of_ans_why.append(f"""Ans{j+1}:{answer[j]}""")
|
491 |
input_evidence = f"answer_the_next_question_from_context: {question_ids} context: {evidence}"
|
492 |
+
time.sleep(10)
|
493 |
answer_evidence = query_evidence({"inputs":input_evidence,"truncation":True,"wait_for_model":True})[0]['generated_text']
|
494 |
if answer_evidence.lower() in evidence.lower():
|
495 |
list_of_evidence_answer_why.append(f"""Evidence{j+1}:{answer_evidence}""")
|
|
|
536 |
claim=df['claim'][i]
|
537 |
answer= split_ws(df["when"])
|
538 |
evidence=df["evidence"][i]
|
539 |
+
time.sleep(10)
|
540 |
if srl!="":
|
541 |
try:
|
542 |
for j in range(0,len(answer)):
|
|
|
550 |
list_of_ques_when.append(f"""Q{j+1}:{question_ids}""")
|
551 |
list_of_ans_when.append(f"""Ans{j+1}:{answer[j]}""")
|
552 |
input_evidence = f"answer_the_next_question_from_context: {question_ids} context: {evidence}"
|
553 |
+
time.sleep(10)
|
554 |
answer_evidence = query_evidence({"inputs":input_evidence,"truncation":True,"wait_for_model":True})[0]['generated_text']
|
555 |
if answer_evidence.lower() in evidence.lower():
|
556 |
list_of_evidence_answer_when.append(f"""Evidence{j+1}:{answer_evidence}""")
|
|
|
597 |
claim=df['claim'][i]
|
598 |
answer= split_ws(df["where"])
|
599 |
evidence=df["evidence"][i]
|
600 |
+
time.sleep(10)
|
601 |
if srl!="":
|
602 |
try:
|
603 |
for j in range(0,len(answer)):
|
604 |
FACT_TO_GENERATE_QUESTION_FROM = f"""{answer[j]} [SEP] {claim}"""
|
605 |
+
time.sleep(10)
|
606 |
question_ids = query({"inputs":FACT_TO_GENERATE_QUESTION_FROM,
|
607 |
"num_beams":5,
|
608 |
"early_stopping":True,
|