Spaces:
Sleeping
Sleeping
remove commented out code from previous HF model creation attempt
Browse files
app.py
CHANGED
@@ -9,33 +9,12 @@ import re
|
|
9 |
|
10 |
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
11 |
LEPTON_API_TOKEN = os.environ.get("LEPTON_API_TOKEN", None)
|
12 |
-
# if torch.cuda.is_available():
|
13 |
-
# device = "cuda:0"
|
14 |
-
# else:
|
15 |
-
# device = "cpu"
|
16 |
|
17 |
-
# Set up client to call inference
|
18 |
client=openai.OpenAI(
|
19 |
base_url="https://yb15a7dy-lynx-70b.tin.lepton.run/api/v1/",
|
20 |
api_key=LEPTON_API_TOKEN
|
21 |
)
|
22 |
|
23 |
-
# Create own model
|
24 |
-
# tokenizer = AutoTokenizer.from_pretrained("PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct")
|
25 |
-
# model = AutoModelForCausalLM.from_pretrained("PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct", torch_dtype=torch.float16, device_map="auto")
|
26 |
-
# model.gradient_checkpointing_enable()
|
27 |
-
|
28 |
-
# def load_model_and_tokenizer(model_choice):
|
29 |
-
# if model_choice == "Patronus Lynx 8B":
|
30 |
-
# model_name = "PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct"
|
31 |
-
# else:
|
32 |
-
# model_name = "PatronusAI/Llama-3-Patronus-Lynx-70B-Instruct"
|
33 |
-
|
34 |
-
# tokenizer = AutoTokenizer.from_pretrained(model_name)
|
35 |
-
# model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto").to(device)
|
36 |
-
# model.gradient_checkpointing_enable()
|
37 |
-
# return tokenizer, model
|
38 |
-
|
39 |
PROMPT = """
|
40 |
Given the following QUESTION, DOCUMENT and ANSWER you must analyze the provided answer and determine whether it is faithful to the contents of the DOCUMENT. The ANSWER must not offer new information beyond the context provided in the DOCUMENT. The ANSWER also must not contradict information provided in the DOCUMENT. Output your final verdict by strictly following this format: "PASS" if the answer is faithful to the DOCUMENT and "FAIL" if the answer is not faithful to the DOCUMENT. Show your reasoning.
|
41 |
|
@@ -95,10 +74,7 @@ def clean_json_string(json_str):
|
|
95 |
|
96 |
return json_str
|
97 |
|
98 |
-
# @spaces.GPU()
|
99 |
-
# def model_call(question, document, answer, tokenizer, model):
|
100 |
def model_call(question, document, answer):
|
101 |
-
# device = next(model.parameters()).device
|
102 |
NEW_FORMAT = PROMPT.format(question=question, document=document, answer=answer)
|
103 |
print("ENTIRE NEW_FORMAT", NEW_FORMAT)
|
104 |
response = client.completions.create(
|
@@ -112,29 +88,8 @@ def model_call(question, document, answer):
|
|
112 |
print("type of GENERATED TEXT", type(generated_text))
|
113 |
reasoning = generated_text["REASONING"][0]
|
114 |
score = generated_text["SCORE"]
|
115 |
-
# inputs = tokenizer(NEW_FORMAT, return_tensors="pt")
|
116 |
-
# print("INPUTS", inputs)
|
117 |
-
# input_ids = inputs.input_ids
|
118 |
-
# attention_mask = inputs.attention_mask
|
119 |
-
# generate_kwargs = dict(
|
120 |
-
# input_ids=input_ids,
|
121 |
-
# do_sample=True,
|
122 |
-
# attention_mask=attention_mask,
|
123 |
-
# pad_token_id=tokenizer.eos_token_id,
|
124 |
-
# )
|
125 |
-
# print("GENERATE_KWARGS", generate_kwargs)
|
126 |
-
# with torch.no_grad():
|
127 |
-
# outputs = model.generate(**generate_kwargs)
|
128 |
-
# print("OUTPUTS", outputs)
|
129 |
-
# generated_text = tokenizer.decode(outputs[0])
|
130 |
-
# print(generated_text)
|
131 |
return reasoning, score
|
132 |
|
133 |
-
# def update_model(model_choice, tokenizer_state, model_state):
|
134 |
-
# new_tokenizer, new_model = load_model_and_tokenizer(model_choice)
|
135 |
-
# print("UPDATED MODEL", new_tokenizer, new_model)
|
136 |
-
# return new_tokenizer, new_model
|
137 |
-
|
138 |
inputs = [
|
139 |
gr.Textbox(label="Question"),
|
140 |
gr.Textbox(label="Document"),
|
@@ -145,14 +100,10 @@ outputs = [
|
|
145 |
gr.Textbox(label="Score")
|
146 |
]
|
147 |
|
148 |
-
# submit_button = gr.Button("Submit")
|
149 |
-
|
150 |
with gr.Blocks() as demo:
|
151 |
gr.Markdown(HEADER)
|
152 |
# gr.Interface(fn=model_call, inputs=inputs, outputs=outputs)
|
153 |
|
154 |
-
# tokenizer_state = gr.State()
|
155 |
-
# model_state = gr.State()
|
156 |
with gr.Column(scale=1):
|
157 |
question = gr.Textbox(label="Question")
|
158 |
document = gr.Textbox(label="Document")
|
@@ -167,6 +118,4 @@ with gr.Blocks() as demo:
|
|
167 |
|
168 |
submit_button.click(fn=model_call, inputs=[question, document, answer], outputs=[reasoning, score])
|
169 |
|
170 |
-
# initial_tokenizer, initial_model = load_model_and_tokenizer("Patronus Lynx 8B")
|
171 |
-
# demo.load(fn=lambda: (initial_tokenizer, initial_model), outputs=[tokenizer_state, model_state])
|
172 |
demo.launch()
|
|
|
9 |
|
10 |
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
11 |
LEPTON_API_TOKEN = os.environ.get("LEPTON_API_TOKEN", None)
|
|
|
|
|
|
|
|
|
12 |
|
|
|
13 |
client=openai.OpenAI(
|
14 |
base_url="https://yb15a7dy-lynx-70b.tin.lepton.run/api/v1/",
|
15 |
api_key=LEPTON_API_TOKEN
|
16 |
)
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
PROMPT = """
|
19 |
Given the following QUESTION, DOCUMENT and ANSWER you must analyze the provided answer and determine whether it is faithful to the contents of the DOCUMENT. The ANSWER must not offer new information beyond the context provided in the DOCUMENT. The ANSWER also must not contradict information provided in the DOCUMENT. Output your final verdict by strictly following this format: "PASS" if the answer is faithful to the DOCUMENT and "FAIL" if the answer is not faithful to the DOCUMENT. Show your reasoning.
|
20 |
|
|
|
74 |
|
75 |
return json_str
|
76 |
|
|
|
|
|
77 |
def model_call(question, document, answer):
|
|
|
78 |
NEW_FORMAT = PROMPT.format(question=question, document=document, answer=answer)
|
79 |
print("ENTIRE NEW_FORMAT", NEW_FORMAT)
|
80 |
response = client.completions.create(
|
|
|
88 |
print("type of GENERATED TEXT", type(generated_text))
|
89 |
reasoning = generated_text["REASONING"][0]
|
90 |
score = generated_text["SCORE"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
return reasoning, score
|
92 |
|
|
|
|
|
|
|
|
|
|
|
93 |
inputs = [
|
94 |
gr.Textbox(label="Question"),
|
95 |
gr.Textbox(label="Document"),
|
|
|
100 |
gr.Textbox(label="Score")
|
101 |
]
|
102 |
|
|
|
|
|
103 |
with gr.Blocks() as demo:
|
104 |
gr.Markdown(HEADER)
|
105 |
# gr.Interface(fn=model_call, inputs=inputs, outputs=outputs)
|
106 |
|
|
|
|
|
107 |
with gr.Column(scale=1):
|
108 |
question = gr.Textbox(label="Question")
|
109 |
document = gr.Textbox(label="Document")
|
|
|
118 |
|
119 |
submit_button.click(fn=model_call, inputs=[question, document, answer], outputs=[reasoning, score])
|
120 |
|
|
|
|
|
121 |
demo.launch()
|