Spaces:
Sleeping
Sleeping
File size: 3,435 Bytes
0703e71 210b40c e303329 d232ed1 b5e9a85 d232ed1 0703e71 17a12ac 60ffe71 f833ec9 af1a93c 8d132dc 60ffe71 210b40c 090dd00 210b40c 090dd00 210b40c 090dd00 210b40c f21f2ed a73aede f21f2ed a73aede f21f2ed 60ffe71 9d86cbe af1a93c 090dd00 af1a93c d59c183 76c4bfe af1a93c d59c183 210b40c d232ed1 9d86cbe f21f2ed d232ed1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
import os
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
import spaces
HF_TOKEN = os.environ.get("HF_TOKEN", None)
if torch.cuda.is_available():
device = "cuda:0"
else:
device = "cpu"
tokenizer = AutoTokenizer.from_pretrained("PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct", torch_dtype=torch.float16, device_map="auto").to(device)
model.gradient_checkpointing_enable()
PROMPT = """
Given the following QUESTION, DOCUMENT and ANSWER you must analyze the provided answer and determine whether it is faithful to the contents of the DOCUMENT. The ANSWER must not offer new information beyond the context provided in the DOCUMENT. The ANSWER also must not contradict information provided in the DOCUMENT. Output your final verdict by strictly following this format: "PASS" if the answer is faithful to the DOCUMENT and "FAIL" if the answer is not faithful to the DOCUMENT. Show your reasoning.
--
QUESTION (THIS DOES NOT COUNT AS BACKGROUND INFORMATION):
{question}
--
DOCUMENT:
{document}
--
ANSWER:
{answer}
--
Your output should be in JSON FORMAT with the keys "REASONING" and "SCORE":
{{"REASONING": <your reasoning as bullet points>, "SCORE": <your final score>}}
"""
HEADER = """
# Patronus Lynx Demo
<table bgcolor="#1E2432" cellspacing="0" cellpadding="0" width="450">
<tr style="height:50px;">
<td style="text-align: center;">
<a href="https://www.patronus.ai">
<img src="https://cdn.prod.website-files.com/64e655d42d3be60f582d0472/64ede352897bcddbe2d41207_patronusai_final_logo.svg" width="200" height="40" />
</a>
</td>
</tr>
</table>
<table bgcolor="#1E2432" cellspacing="0" cellpadding="0" width="450">
<tr style="height:30px;">
<td style="text-align: center;">
<a href="https://huggingface.co/PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct"><img src="https://img.shields.io/badge/%F0%9F%A4%97%20Model_Card-Huggingface-orange" height="20"></a>
</td>
<td style="text-align: center;">
<a href="https://github.com/patronus-ai/Lynx-hallucination-detection"><img src="https://postimage.me/images/2024/03/04/GitHub_Logo_White.png" width="100" height="20"></a>
</td>
<td style="text-align: center; color: white;">
<a href="https://arxiv.org/abs/2407.08488"><img src="https://img.shields.io/badge/arXiv-2407.08488-b31b1b.svg" height="20"></a>
</td>
</tr>
</table>
**Patronus Lynx** is a state-of-the-art open-source model for hallucination detection.
"""
@spaces.GPU()
def model_call(question, document, answer):
device = next(model.parameters()).device
NEW_FORMAT = PROMPT.format(question=question, document=document, answer=answer)
inputs = tokenizer(NEW_FORMAT, return_tensors="pt").to(device)
input_ids = inputs.input_ids
attention_mask = inputs.attention_mask
generate_kwargs = dict(
input_ids=input_ids,
do_sample=True,
attention_mask=attention_mask,
pad_token_id=tokenizer.eos_token_id,
)
with torch.no_grad():
outputs = model.generate(**generate_kwargs)
generated_text = tokenizer.decode(outputs[0])
print(generated_text)
return generated_text
inputs = [
gr.Textbox(label="Question"),
gr.Textbox(label="Document"),
gr.Textbox(label="Answer")
]
with gr.Blocks() as demo:
gr.Markdown(HEADER)
gr.Interface(fn=model_call, inputs=inputs, outputs="text")
demo.launch()
|