Allen Park commited on
Commit
a1f7b63
·
1 Parent(s): 7be224f

add client initialization and dropdown changing model for client

Browse files
Files changed (1) hide show
  1. app.py +32 -19
app.py CHANGED
@@ -11,10 +11,10 @@ from typing import List, Optional, Tuple, Union
11
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
12
  LEPTON_API_TOKEN = os.environ.get("LEPTON_API_TOKEN", None)
13
 
14
- client=openai.OpenAI(
15
- base_url="https://yb15a7dy-patronus-lynx-8b-v1-1.tin.lepton.run/api/v1/",
16
- api_key=LEPTON_API_TOKEN
17
- )
18
  # client=openai.OpenAI(
19
  # base_url="https://yb15a7dy-lynx-70b.tin.lepton.run/api/v1/",
20
  # api_key=LEPTON_API_TOKEN
@@ -71,6 +71,18 @@ HEADER = """
71
  **Getting Started**: Provide a question and document or context given to your model in addition to the answer given by the model and then click submit. The output panel will indicate whether the reponse is a hallucination (Fail) or if it is faithful to the given document or context (Pass) through the score Pass or Fail and provide reasoning behind the score.
72
  """
73
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  def parse_patronus_lynx_response(
75
  response: str,
76
  ) -> Tuple[bool, Union[List[str], None]]:
@@ -97,7 +109,7 @@ def parse_patronus_lynx_response(
97
 
98
  return hallucination, reasoning
99
 
100
- def model_call(question, document, answer):
101
  if question == "" or document == "" or answer == "":
102
  return "", ""
103
  NEW_FORMAT = PROMPT.format(question=question, document=document, answer=answer)
@@ -112,17 +124,18 @@ def model_call(question, document, answer):
112
  combined_reasoning = " ".join(reasoning)[1:-1]
113
  return combined_reasoning, score
114
 
115
- inputs = [
116
- gr.Textbox(label="Question"),
117
- gr.Textbox(label="Document"),
118
- gr.Textbox(label="Answer")
119
- ]
120
- outputs = [
121
- gr.Textbox(label="Reasoning"),
122
- gr.Textbox(label="Score")
123
- ]
124
 
125
  with gr.Blocks() as demo:
 
126
  gr.Markdown(HEADER)
127
  # gr.Interface(fn=model_call, inputs=inputs, outputs=outputs)
128
  model_dropdown = gr.Dropdown(choices=["Patronus Lynx 8B", "Patronus Lynx 70B"], value="Patronus Lynx 8B", label="Model", interactive=True)
@@ -138,10 +151,10 @@ with gr.Blocks() as demo:
138
  reasoning = gr.Textbox(label="Reasoning")
139
  score = gr.Textbox(label="Score (FAIL if Hallucinated, PASS if not)")
140
 
141
- # model_dropdown.change(fn=update_model, inputs=[model_dropdown, tokenizer_state, model_state], outputs=[tokenizer_state, model_state])
142
 
143
- submit_button.click(fn=model_call, inputs=[question, document, answer], outputs=[reasoning, score])
144
- question.submit(fn=model_call, inputs=[question, document, answer], outputs=[reasoning, score])
145
- document.submit(fn=model_call, inputs=[question, document, answer], outputs=[reasoning, score])
146
- answer.submit(fn=model_call, inputs=[question, document, answer], outputs=[reasoning, score])
147
  demo.launch()
 
11
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
12
  LEPTON_API_TOKEN = os.environ.get("LEPTON_API_TOKEN", None)
13
 
14
+ # client=openai.OpenAI(
15
+ # base_url="https://yb15a7dy-patronus-lynx-8b-v1-1.tin.lepton.run/api/v1/",
16
+ # api_key=LEPTON_API_TOKEN
17
+ # )
18
  # client=openai.OpenAI(
19
  # base_url="https://yb15a7dy-lynx-70b.tin.lepton.run/api/v1/",
20
  # api_key=LEPTON_API_TOKEN
 
71
  **Getting Started**: Provide a question and document or context given to your model in addition to the answer given by the model and then click submit. The output panel will indicate whether the reponse is a hallucination (Fail) or if it is faithful to the given document or context (Pass) through the score Pass or Fail and provide reasoning behind the score.
72
  """
73
 
74
+ def update_client(model_name):
75
+ if model_name == "Patronus Lynx 8B":
76
+ return openai.OpenAI(
77
+ base_url="https://yb15a7dy-patronus-lynx-8b-v1-1.tin.lepton.run/api/v1/",
78
+ api_key=LEPTON_API_TOKEN
79
+ )
80
+ elif model_name == "Patronus Lynx 70B":
81
+ return openai.OpenAI(
82
+ base_url="https://yb15a7dy-lynx-70b.tin.lepton.run/api/v1/",
83
+ api_key=LEPTON_API_TOKEN
84
+ )
85
+
86
  def parse_patronus_lynx_response(
87
  response: str,
88
  ) -> Tuple[bool, Union[List[str], None]]:
 
109
 
110
  return hallucination, reasoning
111
 
112
+ def model_call(question, document, answer, client):
113
  if question == "" or document == "" or answer == "":
114
  return "", ""
115
  NEW_FORMAT = PROMPT.format(question=question, document=document, answer=answer)
 
124
  combined_reasoning = " ".join(reasoning)[1:-1]
125
  return combined_reasoning, score
126
 
127
+ # inputs = [
128
+ # gr.Textbox(label="Question"),
129
+ # gr.Textbox(label="Document"),
130
+ # gr.Textbox(label="Answer")
131
+ # ]
132
+ # outputs = [
133
+ # gr.Textbox(label="Reasoning"),
134
+ # gr.Textbox(label="Score")
135
+ # ]
136
 
137
  with gr.Blocks() as demo:
138
+ client_state = gr.State(update_client("Patronus Lynx 8B"))
139
  gr.Markdown(HEADER)
140
  # gr.Interface(fn=model_call, inputs=inputs, outputs=outputs)
141
  model_dropdown = gr.Dropdown(choices=["Patronus Lynx 8B", "Patronus Lynx 70B"], value="Patronus Lynx 8B", label="Model", interactive=True)
 
151
  reasoning = gr.Textbox(label="Reasoning")
152
  score = gr.Textbox(label="Score (FAIL if Hallucinated, PASS if not)")
153
 
154
+ model_dropdown.change(fn=update_client, inputs=[model_dropdown], outputs=[client_state])
155
 
156
+ submit_button.click(fn=model_call, inputs=[question, document, answer, client_state], outputs=[reasoning, score])
157
+ question.submit(fn=model_call, inputs=[question, document, answer, client_state], outputs=[reasoning, score])
158
+ document.submit(fn=model_call, inputs=[question, document, answer, client_state], outputs=[reasoning, score])
159
+ answer.submit(fn=model_call, inputs=[question, document, answer, client_state], outputs=[reasoning, score])
160
  demo.launch()