Spaces:
Running
Running
File size: 7,226 Bytes
ef10e9f 3dd0355 ef10e9f fc0e67e ef10e9f fc0e67e ef10e9f fc0e67e ef10e9f 284474f fc807c3 284474f fc0e67e ef10e9f aacd2d2 ef10e9f fc0e67e ef10e9f fc0e67e 94aa2bb 937bbef 284474f ef10e9f 937bbef ef10e9f 3dd0355 ef10e9f fc807c3 ef10e9f 3dd0355 ef10e9f 3dd0355 cb7341f fc0e67e ef10e9f fc0e67e ef10e9f 3dd0355 ef10e9f fc807c3 ef10e9f 3dd0355 fc0e67e ef10e9f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 |
"""
run:
python -m relevancy run_all_day_paper \
--output_dir ./data \
--model_name="gpt-3.5-turbo-16k" \
"""
import time
import json
import os
import random
import re
import string
from datetime import datetime
import numpy as np
import tqdm
import utils
def encode_prompt(query, prompt_papers):
"""Encode multiple prompt instructions into a single string."""
prompt = open("src/relevancy_prompt.txt").read() + "\n"
prompt += query['interest']
for idx, task_dict in enumerate(prompt_papers):
(title, authors, abstract, content) = task_dict["title"], task_dict["authors"], task_dict["abstract"], task_dict["content"]
if not title:
raise
prompt += f"###\n"
prompt += f"{idx + 1}. Title: {title}\n"
prompt += f"{idx + 1}. Authors: {authors}\n"
prompt += f"{idx + 1}. Abstract: {abstract}\n"
prompt += f"{idx + 1}. Content: {content}\n"
prompt += f"\n Generate response:\n1."
#print(prompt)
return prompt
def is_json(myjson):
try:
json.loads(myjson)
except Exception as e:
return False
return True
def post_process_chat_gpt_response(paper_data, response, threshold_score=7):
selected_data = []
if response is None:
return []
json_items = response['message']['content'].replace("\n\n", "\n").split("\n")
pattern = r"^\d+\. |\\"
import pprint
def try_loads(line):
try:
return json.loads(re.sub(pattern, "", line))
except json.JSONDecodeError:
return None
score_items = []
try:
# score_items = [
# json.loads(re.sub(pattern, "", line))
# for line in json_items if (is_json(line) and "relevancy score" in line.lower())]
for line in json_items:
if is_json(line) and "relevancy score" in line.lower():
score_items.append(json.loads(re.sub(pattern, "", line)))
#elif
# score_items = [
# loaded_json
# for line in json_items if (is_json(line) and "relevancy score" in line.lower())
# for loaded_json in [try_loads(line)] if loaded_json is not None
# ]
except Exception as e:
pprint.pprint([re.sub(pattern, "", line) for line in json_items if "relevancy score" in line.lower()])
try:
score_items = score_items[:-1]
except Exception:
score_items = []
print(e)
raise RuntimeError("failed")
pprint.pprint(score_items)
scores = []
for item in score_items:
temp = item["Relevancy score"]
if isinstance(temp, str) and "/" in temp:
scores.append(int(temp.split("/")[0]))
else:
scores.append(int(temp))
if len(score_items) != len(paper_data):
score_items = score_items[:len(paper_data)]
hallucination = True
else:
hallucination = False
for idx, inst in enumerate(score_items):
# if the decoding stops due to length, the last example is likely truncated so we discard it
if scores[idx] < threshold_score:
continue
output_str = "Subject: " + paper_data[idx]["subjects"] + "\n"
output_str += "Title: " + paper_data[idx]["title"] + "\n"
output_str += "Authors: " + paper_data[idx]["authors"] + "\n"
output_str += "Link: " + paper_data[idx]["main_page"] + "\n"
for key, value in inst.items():
paper_data[idx][key] = value
output_str += str(key) + ": " + str(value) + "\n"
paper_data[idx]['summarized_text'] = output_str
selected_data.append(paper_data[idx])
return selected_data, hallucination
def find_word_in_string(w, s):
return re.compile(r"\b({0})\b".format(w), flags=re.IGNORECASE).search(s)
def process_subject_fields(subjects):
all_subjects = subjects.split(";")
all_subjects = [s.split(" (")[0] for s in all_subjects]
return all_subjects
def generate_relevance_score(
all_papers,
query,
model_name="gpt-3.5-turbo-16k",
threshold_score=7,
num_paper_in_prompt=1,
temperature=0.4,
top_p=1.0,
sorting=True
):
ans_data = []
request_idx = 1
hallucination = False
for id in tqdm.tqdm(range(0, len(all_papers), num_paper_in_prompt)):
prompt_papers = all_papers[id:id+num_paper_in_prompt]
# only sampling from the seed tasks
prompt = encode_prompt(query, prompt_papers)
decoding_args = utils.OpenAIDecodingArguments(
temperature=temperature,
n=1,
max_tokens=1024*num_paper_in_prompt, # The response for each paper should be less than 128 tokens.
top_p=top_p,
)
request_start = time.time()
response = utils.openai_completion(
prompts=prompt,
model_name=model_name,
batch_size=1,
decoding_args=decoding_args,
logit_bias={"100257": -100}, # prevent the <|endoftext|> from being generated
)
print ("response", response['message']['content'])
request_duration = time.time() - request_start
process_start = time.time()
batch_data, hallu = post_process_chat_gpt_response(prompt_papers, response, threshold_score=threshold_score)
hallucination = hallucination or hallu
ans_data.extend(batch_data)
print(f"Request {request_idx+1} took {request_duration:.2f}s")
print(f"Post-processing took {time.time() - process_start:.2f}s")
if sorting:
ans_data = sorted(ans_data, key=lambda x: int(x["Relevancy score"]), reverse=True)
return ans_data, hallucination
def run_all_day_paper(
query={"interest":"Computer Science", "subjects":["Machine Learning", "Computation and Language", "Artificial Intelligence", "Information Retrieval"]},
date=None,
data_dir="../data",
model_name="gpt-3.5-turbo-16k",
threshold_score=7,
num_paper_in_prompt=2,
temperature=0.4,
top_p=1.0
):
if date is None:
date = datetime.today().strftime('%a, %d %b %y')
# string format such as Wed, 10 May 23
print ("the date for the arxiv data is: ", date)
all_papers = [json.loads(l) for l in open(f"{data_dir}/{date}.jsonl", "r")]
print (f"We found {len(all_papers)}.")
all_papers_in_subjects = [
t for t in all_papers
if bool(set(process_subject_fields(t['subjects'])) & set(query['subjects']))
]
print(f"After filtering subjects, we have {len(all_papers_in_subjects)} papers left.")
ans_data = generate_relevance_score(all_papers_in_subjects, query, model_name, threshold_score, num_paper_in_prompt, temperature, top_p)
utils.write_ans_to_file(ans_data, date, output_dir="../outputs")
return ans_data
if __name__ == "__main__":
query = {"interest":"""
1. Large language model pretraining and finetunings
2. Multimodal machine learning
3. Do not care about specific application, for example, information extraction, summarization, etc.
4. Not interested in paper focus on specific languages, e.g., Arabic, Chinese, etc.\n""",
"subjects":["Computation and Language"]}
ans_data = run_all_day_paper(query)
|