Update frontend/src/pages/QuotePage/QuotePage.js
Browse files
frontend/src/pages/QuotePage/QuotePage.js
CHANGED
@@ -27,18 +27,18 @@ const citations = [
|
|
27 |
}`,
|
28 |
type: "main",
|
29 |
},
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
}`,
|
40 |
-
|
41 |
-
|
42 |
{
|
43 |
title: "Evaluation Framework",
|
44 |
authors: "Leo Gao et al.",
|
@@ -58,24 +58,51 @@ const citations = [
|
|
58 |
|
59 |
const priorWork = [
|
60 |
{
|
61 |
-
title: "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
authors:
|
63 |
-
"
|
64 |
-
citation: `@inproceedings{
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
abstract = {Although large language models (LLMs) have shown great performance in natural language processing (NLP) in the financial domain, there are no publicly available financially tailored LLMs, instruction tuning datasets, and evaluation benchmarks, which is critical for continually pushing forward the open-source development of financial artificial intelligence (AI). This paper introduces PIXIU, a comprehensive framework including the first financial LLM based on fine-tuning LLaMA with instruction data, the first instruction data with 128K data samples to support the fine-tuning, and an evaluation benchmark with 8 tasks and 15 datasets. We first construct the large-scale multi-task instruction data considering a variety of financial tasks, financial document types, and financial data modalities. We then propose a financial LLM called FinMA by fine-tuning LLaMA with the constructed dataset to be able to follow instructions for various financial tasks. To support the evaluation of financial LLMs, we propose a standardized benchmark that covers a set of critical financial tasks, including six financial NLP tasks and two financial prediction tasks. With this benchmark, we conduct a detailed analysis of FinMA and several existing LLMs, uncovering their strengths and weaknesses in handling critical financial tasks. The model, datasets, benchmark, and experimental results are open-sourced to facilitate future research in financial AI.},
|
71 |
-
booktitle = {Proceedings of the 37th International Conference on Neural Information Processing Systems},
|
72 |
-
articleno = {1454},
|
73 |
-
numpages = {16},
|
74 |
-
location = {New Orleans, LA, USA},
|
75 |
-
series = {NIPS '23}
|
76 |
}`,
|
77 |
type: "main",
|
78 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
];
|
80 |
|
81 |
const benchmarks = [
|
|
|
27 |
}`,
|
28 |
type: "main",
|
29 |
},
|
30 |
+
// {
|
31 |
+
// title: "FinBen: A Holistic Financial Benchmark for Large Language Models",
|
32 |
+
// authors:
|
33 |
+
// "Qianqian Xie et al.",
|
34 |
+
// citation: `@article{xie2024finben,
|
35 |
+
// title={The finben: An holistic financial benchmark for large language models},
|
36 |
+
// author={Xie, Qianqian and Han, Weiguang and Chen, Zhengyu and Xiang, Ruoyu and Zhang, Xiao and He, Yueru and Xiao, Mengxi and Li, Dong and Dai, Yongfu and Feng, Duanyu and others},
|
37 |
+
// journal={arXiv preprint arXiv:2402.12659},
|
38 |
+
// year={2024}
|
39 |
+
// }`,
|
40 |
+
// type: "main",
|
41 |
+
// },
|
42 |
{
|
43 |
title: "Evaluation Framework",
|
44 |
authors: "Leo Gao et al.",
|
|
|
58 |
|
59 |
const priorWork = [
|
60 |
{
|
61 |
+
title: "The financial narrative summarisation shared task (FNS 2023)",
|
62 |
+
authors:
|
63 |
+
"Elias Zavitsanos et al.",
|
64 |
+
citation: `@inproceedings{zavitsanos2023financial,
|
65 |
+
title={The financial narrative summarisation shared task (FNS 2023)},
|
66 |
+
author={Zavitsanos, Elias and Kosmopoulos, Aris and Giannakopoulos, George and Litvak, Marina and Carbajo-Coronado, Blanca and Moreno-Sandoval, Antonio and El-Haj, Mo},
|
67 |
+
booktitle={2023 IEEE International Conference on Big Data (BigData)},
|
68 |
+
pages={2890--2896},
|
69 |
+
year={2023},
|
70 |
+
organization={IEEE}
|
71 |
+
}`,
|
72 |
+
type: "main",
|
73 |
+
},
|
74 |
+
{
|
75 |
+
title: "MultiFin: A dataset for multilingual financial NLP",
|
76 |
authors:
|
77 |
+
"Rasmus J{\o}rgensen et al.",
|
78 |
+
citation: `@inproceedings{jorgensen2023multifin,
|
79 |
+
title={MultiFin: A dataset for multilingual financial NLP},
|
80 |
+
author={J{\o}rgensen, Rasmus and Brandt, Oliver and Hartmann, Mareike and Dai, Xiang and Igel, Christian and Elliott, Desmond},
|
81 |
+
booktitle={Findings of the Association for Computational Linguistics: EACL 2023},
|
82 |
+
pages={894--909},
|
83 |
+
year={2023}
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
}`,
|
85 |
type: "main",
|
86 |
},
|
87 |
+
// {
|
88 |
+
// title: "PIXIU: a large language model, instruction data and evaluation benchmark for finance",
|
89 |
+
// authors:
|
90 |
+
// "Qianqian Xie et al.",
|
91 |
+
// citation: `@inproceedings{10.5555/3666122.3667576,
|
92 |
+
// author = {Xie, Qianqian and Han, Weiguang and Zhang, Xiao and Lai, Yanzhao and Peng, Min and Lopez-Lira, Alejandro and Huang, Jimin},
|
93 |
+
// title = {PIXIU: a large language model, instruction data and evaluation benchmark for finance},
|
94 |
+
// year = {2024},
|
95 |
+
// publisher = {Curran Associates Inc.},
|
96 |
+
// address = {Red Hook, NY, USA},
|
97 |
+
// abstract = {Although large language models (LLMs) have shown great performance in natural language processing (NLP) in the financial domain, there are no publicly available financially tailored LLMs, instruction tuning datasets, and evaluation benchmarks, which is critical for continually pushing forward the open-source development of financial artificial intelligence (AI). This paper introduces PIXIU, a comprehensive framework including the first financial LLM based on fine-tuning LLaMA with instruction data, the first instruction data with 128K data samples to support the fine-tuning, and an evaluation benchmark with 8 tasks and 15 datasets. We first construct the large-scale multi-task instruction data considering a variety of financial tasks, financial document types, and financial data modalities. We then propose a financial LLM called FinMA by fine-tuning LLaMA with the constructed dataset to be able to follow instructions for various financial tasks. To support the evaluation of financial LLMs, we propose a standardized benchmark that covers a set of critical financial tasks, including six financial NLP tasks and two financial prediction tasks. With this benchmark, we conduct a detailed analysis of FinMA and several existing LLMs, uncovering their strengths and weaknesses in handling critical financial tasks. The model, datasets, benchmark, and experimental results are open-sourced to facilitate future research in financial AI.},
|
98 |
+
// booktitle = {Proceedings of the 37th International Conference on Neural Information Processing Systems},
|
99 |
+
// articleno = {1454},
|
100 |
+
// numpages = {16},
|
101 |
+
// location = {New Orleans, LA, USA},
|
102 |
+
// series = {NIPS '23}
|
103 |
+
// }`,
|
104 |
+
// type: "main",
|
105 |
+
// },
|
106 |
];
|
107 |
|
108 |
const benchmarks = [
|