Niklas Hoepner commited on
Commit
e130a6a
·
1 Parent(s): 21ab6f3

Implemened L3Score from SPIQA datase paper

Browse files
Files changed (5) hide show
  1. L3Score.py +255 -0
  2. README.md +145 -28
  3. app.py +1 -1
  4. l3score.py +0 -95
  5. requirements.txt +7 -1
L3Score.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ L3Score metric to score the quality of a free-form answer given a question and a ground-truth answer.
17
+ The metric is based on the log-probability of the Yes/No token of an LLM judge.
18
+ Metric is based on the paper: https://arxiv.org/pdf/2407.09413
19
+ """
20
+
21
+ import os
22
+
23
+ import evaluate
24
+ import datasets
25
+ import numpy as np
26
+
27
+ from langchain.chat_models.base import init_chat_model
28
+
29
+
30
+ _CITATION = """\
31
+ @article{pramanick2024spiqa,
32
+ title={Spiqa: A dataset for multimodal question answering on scientific papers},
33
+ author={Pramanick, Shraman and Chellappa, Rama and Venugopalan, Subhashini},
34
+ journal={arXiv preprint arXiv:2407.09413},
35
+ year={2024}
36
+ }
37
+ """
38
+
39
+ _DESCRIPTION = """\
40
+ Implements the L3Score metric to score the quality of a free-form answer given a question and a ground-truth answer.
41
+ The metric is based on the log-probability of the Yes/No token of an LLM judge.
42
+ Metric is based on the paper: https://arxiv.org/pdf/2407.09413
43
+ """
44
+
45
+
46
+ _KWARGS_DESCRIPTION = """
47
+ Implements the L3Score metric to score the quality of a free-form answer given a question and a ground-truth answer.
48
+ Args:
49
+ questions: list of questions to score. Each question should be a string.
50
+ predictions: list of predictions to score. Each predictions
51
+ should be a string.
52
+ references: list of reference for each prediction. Each
53
+ reference should be a string.
54
+ Returns:
55
+ L3Score: mean L3Score for all (question, prediction, reference) triplets.
56
+ Examples:
57
+ Example 1: High certainty the prediction is the same as the ground-truth.
58
+ >>> L3Score = evaluate.load("L3Score")
59
+ >>> L3Score.compute(questions=["What is the capital of France?"], predictions=["Paris"], references=["Paris"], api_key="your-openai-api-key", provider="openai", model="gpt-4o-mini")
60
+ {'L3Score': 0.99...}
61
+
62
+ Example 2: High certainty the prediction is not the same as the ground-truth.
63
+ >>> L3Score = evaluate.load("L3Score")
64
+ >>> L3Score.compute(questions=["What is the capital of Germany?"], predictions=["Moscow"], references=["Berlin"], api_key="your-openai-api-key", provider="openai", model="gpt-4o-mini")
65
+ {'L3Score': 0.00...}
66
+ """
67
+
68
+
69
+ PROVIDER_WITH_TOP_LOGPROBS = ["openai", "deepseek", "xai"]
70
+
71
+ _PROMPT = "You are given a question, ground-truth answer, and a candidate answer. Question: {question} \nGround-truth answer: {gt} \nCandidate answer: {answer} \n\
72
+ Is the semantic meaning of the ground-truth and candidate answers similar? Answer in one word - Yes or No."
73
+
74
+ _SUFFIXES_TO_SCORE = [" yes", " yeah"]
75
+ _COMPLEMENT_SUFFIXES = [" no"]
76
+
77
+ NEGATIVE_INF = -1000.0
78
+
79
+
80
+ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
81
+ class L3Score(evaluate.Metric):
82
+ """
83
+ L3Score metric to score the quality of a free-form answer given a question and a ground-truth answer.
84
+ The metric is based on the log-probability of the Yes/No token of an LLM judge.
85
+ Metric is from the paper: https://arxiv.org/pdf/2407.09413
86
+ """
87
+
88
+ def _info(self):
89
+ return evaluate.MetricInfo(
90
+ module_type="metric",
91
+ description=_DESCRIPTION,
92
+ citation=_CITATION,
93
+ inputs_description=_KWARGS_DESCRIPTION,
94
+ features=datasets.Features(
95
+ {
96
+ "questions": datasets.Value("string"),
97
+ "predictions": datasets.Value("string"),
98
+ "references": datasets.Value("string"),
99
+ }
100
+ ),
101
+ homepage="https://github.com/google/spiqa",
102
+ codebase_urls=[
103
+ "https://github.com/google/spiqa/blob/main/metrics/llmlogscore/llmlogscore.py"
104
+ ],
105
+ reference_urls=["https://arxiv.org/pdf/2407.09413","https://github.com/google/spiqa","https://huggingface.co/datasets/google/spiqa"],
106
+ )
107
+
108
+ def _download_and_prepare(self, dl_manager):
109
+ """Optional: download external resources useful to compute the scores"""
110
+ pass
111
+
112
+ def _verify_input(self, provider, model, api_key):
113
+ """Verify the input parameters"""
114
+
115
+ print(provider)
116
+ if provider not in PROVIDER_WITH_TOP_LOGPROBS:
117
+ raise ValueError(
118
+ "Provider must offer top_logprobs to use this metric, pick from {}".format(
119
+ PROVIDER_WITH_TOP_LOGPROBS
120
+ )
121
+ )
122
+
123
+ if api_key == "":
124
+ raise ValueError("api_key is required")
125
+
126
+ def _get_llm(self, model, api_key):
127
+ """Get the LLM"""
128
+ llm = init_chat_model(model=model, api_key=api_key)
129
+ llm = llm.bind(logprobs=True, top_logprobs=5)
130
+ return llm
131
+
132
+ def _compute(
133
+ self,
134
+ questions,
135
+ predictions,
136
+ references,
137
+ api_key="",
138
+ provider="openai",
139
+ model="gpt-4o-mini",
140
+ ):
141
+ """Returns the scores"""
142
+
143
+ # Check whether llm can be initialized
144
+ self._verify_input(provider, model, api_key)
145
+
146
+ # Initialize the LLM
147
+ llm = self._get_llm(model, api_key)
148
+
149
+ L3Score = 0
150
+ count = 0
151
+ for question, prediction, reference in zip(questions, predictions, references):
152
+ response = llm.invoke(
153
+ (
154
+ "human",
155
+ _PROMPT.format(question=question, gt=reference, answer=prediction),
156
+ )
157
+ )
158
+ score = self._calculate_L3Score(
159
+ response.response_metadata["logprobs"]["content"][0]["top_logprobs"]
160
+ )
161
+ L3Score += score.item()
162
+ count += 1
163
+
164
+ if count > 0:
165
+ L3Score = L3Score / count
166
+
167
+ return {
168
+ "L3Score": L3Score,
169
+ }
170
+
171
+ def _calculate_L3Score(self, top_logprobs):
172
+ """
173
+ Calculates the L3 score for a given response.
174
+ """
175
+
176
+ normalized_suffixes = [self._normalize(suffix) for suffix in _SUFFIXES_TO_SCORE]
177
+ normalized_complement_suffixes = [
178
+ self._normalize(complement_suffix)
179
+ for complement_suffix in _COMPLEMENT_SUFFIXES
180
+ ]
181
+
182
+ suffix_logprob = NEGATIVE_INF
183
+ complement_logprob = NEGATIVE_INF
184
+ suffix_index = -1
185
+ complement_suffix_index = -1
186
+
187
+ for i, token_logprob in enumerate(top_logprobs):
188
+ if self._normalize(token_logprob["token"]) in normalized_suffixes:
189
+ suffix_logprob = token_logprob["logprob"]
190
+ suffix_index = i
191
+ break
192
+
193
+ for i, token_logprob in enumerate(top_logprobs):
194
+ if (
195
+ self._normalize(token_logprob["token"])
196
+ in normalized_complement_suffixes
197
+ ):
198
+ complement_suffix_index = i
199
+ complement_logprob = token_logprob["logprob"]
200
+ break
201
+
202
+ if suffix_index == -1 and complement_suffix_index == -1:
203
+ return 0.0
204
+
205
+ if suffix_index != -1 and complement_suffix_index != -1:
206
+ return self._renormalize_score(
207
+ yes_score=suffix_logprob, no_score=complement_logprob
208
+ )
209
+
210
+ lowest_logprob = top_logprobs[-1]["logprob"]
211
+ lowest_token_prob = np.exp(lowest_logprob)
212
+ sum_probs = sum(
213
+ [np.exp(token_logprob["logprob"]) for token_logprob in top_logprobs]
214
+ )
215
+ remaining_prob = 1 - sum_probs
216
+ min_prob = min(lowest_token_prob, remaining_prob)
217
+ if min_prob < 1e-8:
218
+ min_prob = 1e-8
219
+ reciprocal_logprob = np.log(min_prob)
220
+
221
+ if suffix_index != -1:
222
+ exclude_score = suffix_logprob
223
+ include_score = reciprocal_logprob
224
+ elif complement_suffix_index != -1:
225
+ exclude_score = reciprocal_logprob
226
+ include_score = complement_logprob
227
+
228
+ return self._renormalize_score(yes_score=exclude_score, no_score=include_score)
229
+
230
+ def _renormalize_score(self, yes_score: float, no_score: float) -> float:
231
+ """Renormalize the scores to be between 0 and 1."""
232
+ return 1 / (1 + np.exp(-(yes_score - no_score)))
233
+
234
+ def _normalize(self, text: str) -> str:
235
+ """Remove white space and lower case for normalized comparisons."""
236
+ return text.strip().lower()
237
+
238
+
239
+ if __name__ == "__main__":
240
+
241
+ questions = ["What is the capital of France?", "What is the capital of Germany?"]
242
+ predictions = ["Paris", "Moscow"]
243
+ references = ["Paris", "Berlin"]
244
+
245
+ L3Score_test = L3Score()
246
+
247
+ results = L3Score_test.compute(
248
+ questions=questions,
249
+ predictions=predictions,
250
+ references=references,
251
+ api_key=os.environ["OPENAI_API_KEY"],
252
+ provider="deepseek",
253
+ model="deepseek-coder",
254
+ )
255
+ print(results)
README.md CHANGED
@@ -1,50 +1,167 @@
1
  ---
2
- title: l3score
3
  datasets:
4
- -
5
  tags:
6
- - evaluate
7
- - metric
8
- description: "TODO: add a description here"
 
 
 
 
 
 
9
  sdk: gradio
10
  sdk_version: 3.19.1
11
  app_file: app.py
12
  pinned: false
13
  ---
14
 
15
- # Metric Card for l3score
16
 
17
- ***Module Card Instructions:*** *Fill out the following subsections. Feel free to take a look at existing metric cards if you'd like examples.*
18
 
19
- ## Metric Description
20
- *Give a brief overview of this metric, including what task(s) it is usually used for, if any.*
21
 
22
- ## How to Use
23
- *Give general statement of how to use the metric*
24
 
25
- *Provide simplest possible example for using the metric*
 
 
26
 
27
- ### Inputs
28
- *List all input arguments in the format below*
29
- - **input_field** *(type): Definition of input, with explanation if necessary. State any default value(s).*
30
 
31
- ### Output Values
32
 
33
- *Explain what this metric outputs and provide an example of what the metric output looks like. Modules should return a dictionary with one or multiple key-value pairs, e.g. {"bleu" : 6.02}*
34
 
35
- *State the range of possible values that the metric's output can take, as well as what in that range is considered good. For example: "This metric can take on any value between 0 and 100, inclusive. Higher scores are better."*
36
 
37
- #### Values from Popular Papers
38
- *Give examples, preferrably with links to leaderboards or publications, to papers that have reported this metric, along with the values they have reported.*
39
 
40
- ### Examples
41
- *Give code examples of the metric being used. Try to include examples that clear up any potential ambiguity left from the metric description above. If possible, provide a range of examples that show both typical and atypical results, as well as examples where a variety of input parameters are passed.*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
- ## Limitations and Bias
44
- *Note any known limitations or biases that the metric has, with links and references if possible.*
45
 
46
- ## Citation
47
- *Cite the source where this metric was introduced.*
48
 
49
- ## Further References
50
- *Add any useful further references.*
 
1
  ---
2
+ title: L3Score
3
  datasets:
4
+ - google/spiqa
5
  tags:
6
+ - evaluate
7
+ - metric
8
+ - semantic-similarity
9
+ - qa
10
+ - llm-eval
11
+ description: >
12
+ L3Score is a metric for evaluating the semantic similarity of free-form answers in question answering tasks.
13
+ It uses log-probabilities of "Yes"/"No" tokens from a language model acting as a judge.
14
+ Based on the SPIQA benchmark: https://arxiv.org/pdf/2407.09413
15
  sdk: gradio
16
  sdk_version: 3.19.1
17
  app_file: app.py
18
  pinned: false
19
  ---
20
 
21
+ # 🦢 Metric Card: L3Score
22
 
23
+ ## 📌 Description
24
 
25
+ **L3Score** evaluates how semantically close a model-generated answer is to a reference answer for a given question. It prompts a **language model as a judge** using the following format:
 
26
 
27
+ ```text
28
+ You are given a question, ground-truth answer, and a candidate answer.
29
 
30
+ Question: {question}
31
+ Ground-truth answer: {gt}
32
+ Candidate answer: {answer}
33
 
34
+ Is the semantic meaning of the ground-truth and candidate answers similar?
35
+ Answer in one word - Yes or No.
36
+ ```
37
 
38
+ The model's **log-probabilities** for "Yes" and "No" tokens are used to compute the score.
39
 
40
+ ### 🧮 Scoring Logic
41
 
42
+ Let $ l_{\text{yes}}$ and $ l_{\text{no}}$ be the log-probabilities of "Yes" and "No", respectively.
43
 
44
+ If neither token is in the top-5:
 
45
 
46
+ $$
47
+ \text{L3Score} = 0
48
+ $$
49
+
50
+ If both are present:
51
+
52
+ $$
53
+ \text{L3Score} = \frac{\exp(l_{\text{yes}})}{\exp(l_{\text{yes}}) + \exp(l_{\text{no}})}
54
+ $$
55
+
56
+ If only one is present, the missing token’s probability is estimated using the minimum of the remaining mass or the least likely token in top-5.
57
+ See [SPIQA paper](https://arxiv.org/pdf/2407.09413) for details.
58
+
59
+ ---
60
+
61
+ ## 🚀 How to Use
62
+
63
+ ```python
64
+ import evaluate
65
+
66
+ l3score = evaluate.load("your-username/L3Score")
67
+
68
+ questions = ["What is the capital of France?", "What is the capital of Germany?"]
69
+ predictions = ["Paris", "Moscow"]
70
+ references = ["Paris", "Berlin"]
71
+
72
+ score = l3score.compute(
73
+ questions=questions,
74
+ predictions=predictions,
75
+ references=references,
76
+ api_key="your-openai-api-key",
77
+ provider="openai",
78
+ model="gpt-4o-mini"
79
+ )
80
+
81
+ print(score)
82
+ # {'L3Score': 0.49...}
83
+ ```
84
+
85
+ ---
86
+
87
+ ### 🔠 Inputs
88
+
89
+ | Name | Type | Description |
90
+ |--------------|--------------|-----------------------------------------------------------------------------|
91
+ | `questions` | `list[str]` | The list of input questions. |
92
+ | `predictions`| `list[str]` | Generated answers by the model being evaluated. |
93
+ | `references` | `list[str]` | Ground-truth or reference answers. |
94
+ | `api_key` | `str` | API key for the selected LLM provider. |
95
+ | `provider` | `str` | Must support top-n token log-probabilities (currently available: `"openai"`, `"deepseek","xai"`).|
96
+ | `model` | `str` | Name of the evaluation LLM (e.g., `"gpt-4o-mini"`). |
97
+
98
+ ---
99
+
100
+ ### 📄 Output
101
+
102
+ A dictionary with a single key:
103
+
104
+ ```python
105
+ {"L3Score": float}
106
+ ```
107
+
108
+ The value is the **average score** over all (question, prediction, reference) triplets.
109
+
110
+ ---
111
+
112
+ ## 💡 Examples
113
+
114
+ ```python
115
+ l3score = evaluate.load("your-username/L3Score")
116
+
117
+ score = l3score.compute(
118
+ questions=["What is the capital of France?"],
119
+ predictions=["Paris"],
120
+ references=["Paris"],
121
+ api_key="your-openai-api-key",
122
+ provider="openai",
123
+ model="gpt-4o-mini"
124
+ )
125
+ # {'L3Score': 0.99...}
126
+
127
+ score = l3score.compute(
128
+ questions=["What is the capital of Germany?"],
129
+ predictions=["Moscow"],
130
+ references=["Berlin"],
131
+ api_key="your-openai-api-key",
132
+ provider="openai",
133
+ model="gpt-4o-mini"
134
+ )
135
+ # {'L3Score': 0.00...}
136
+ ```
137
+
138
+ ---
139
+
140
+ ## ⚠️ Limitations and Bias
141
+
142
+ - Requires models that expose **top-n token log-probabilities** (e.g., OpenAI, DeepSeek, Groq).
143
+ - Scores are **only comparable when using the same judge model**.
144
+
145
+ ---
146
+
147
+ ## 📖 Citation
148
+
149
+ ```bibtex
150
+ @article{pramanick2024spiqa,
151
+ title={SPIQA: A Dataset for Multimodal Question Answering on Scientific Papers},
152
+ author={Pramanick, Shraman and Chellappa, Rama and Venugopalan, Subhashini},
153
+ journal={arXiv preprint arXiv:2407.09413},
154
+ year={2024}
155
+ }
156
+ ```
157
+
158
+ ---
159
+
160
+ ## 🔗 Further References
161
+
162
+ - 🤗 [Dataset on Hugging Face](https://huggingface.co/datasets/google/spiqa)
163
+ - 🐙 [GitHub Repository](https://github.com/google/spiqa)
164
+ - 📄 [SPIQA Paper (arXiv:2407.09413)](https://arxiv.org/pdf/2407.09413)
165
 
 
 
166
 
 
 
167
 
 
 
app.py CHANGED
@@ -2,5 +2,5 @@ import evaluate
2
  from evaluate.utils import launch_gradio_widget
3
 
4
 
5
- module = evaluate.load("nhop/l3score")
6
  launch_gradio_widget(module)
 
2
  from evaluate.utils import launch_gradio_widget
3
 
4
 
5
+ module = evaluate.load("nhop/L3Score")
6
  launch_gradio_widget(module)
l3score.py DELETED
@@ -1,95 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """TODO: Add a description here."""
15
-
16
- import evaluate
17
- import datasets
18
-
19
-
20
- # TODO: Add BibTeX citation
21
- _CITATION = """\
22
- @InProceedings{huggingface:module,
23
- title = {A great new module},
24
- authors={huggingface, Inc.},
25
- year={2020}
26
- }
27
- """
28
-
29
- # TODO: Add description of the module here
30
- _DESCRIPTION = """\
31
- This new module is designed to solve this great ML task and is crafted with a lot of care.
32
- """
33
-
34
-
35
- # TODO: Add description of the arguments of the module here
36
- _KWARGS_DESCRIPTION = """
37
- Calculates how good are predictions given some references, using certain scores
38
- Args:
39
- predictions: list of predictions to score. Each predictions
40
- should be a string with tokens separated by spaces.
41
- references: list of reference for each prediction. Each
42
- reference should be a string with tokens separated by spaces.
43
- Returns:
44
- accuracy: description of the first score,
45
- another_score: description of the second score,
46
- Examples:
47
- Examples should be written in doctest format, and should illustrate how
48
- to use the function.
49
-
50
- >>> my_new_module = evaluate.load("my_new_module")
51
- >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
52
- >>> print(results)
53
- {'accuracy': 1.0}
54
- """
55
-
56
- # TODO: Define external resources urls if needed
57
- BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
58
-
59
-
60
- @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
61
- class l3score(evaluate.Metric):
62
- """TODO: Short description of my evaluation module."""
63
-
64
- def _info(self):
65
- # TODO: Specifies the evaluate.EvaluationModuleInfo object
66
- return evaluate.MetricInfo(
67
- # This is the description that will appear on the modules page.
68
- module_type="metric",
69
- description=_DESCRIPTION,
70
- citation=_CITATION,
71
- inputs_description=_KWARGS_DESCRIPTION,
72
- # This defines the format of each prediction and reference
73
- features=datasets.Features({
74
- 'predictions': datasets.Value('int64'),
75
- 'references': datasets.Value('int64'),
76
- }),
77
- # Homepage of the module for documentation
78
- homepage="http://module.homepage",
79
- # Additional links to the codebase or references
80
- codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
81
- reference_urls=["http://path.to.reference.url/new_module"]
82
- )
83
-
84
- def _download_and_prepare(self, dl_manager):
85
- """Optional: download external resources useful to compute the scores"""
86
- # TODO: Download external resources if needed
87
- pass
88
-
89
- def _compute(self, predictions, references):
90
- """Returns the scores"""
91
- # TODO: Compute the different scores of the module
92
- accuracy = sum(i == j for i, j in zip(predictions, references)) / len(predictions)
93
- return {
94
- "accuracy": accuracy,
95
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1 +1,7 @@
1
- git+https://github.com/huggingface/evaluate@main
 
 
 
 
 
 
 
1
+ git+https://github.com/huggingface/evaluate@main
2
+ langchain==0.3.23
3
+ langchain-deepseek==0.1.3
4
+ langchain-openai==0.3.12
5
+ langchain-community==0.3.21
6
+ langchain-core==0.3.52
7
+ numpy==2.2.4