Agent-Papers / src /about.py
luojunyu's picture
update
f8d35c2
from dataclasses import dataclass
from enum import Enum
@dataclass
class Task:
benchmark: str
metric: str
col_name: str
# Select your tasks here
# ---------------------------------------------------
class Tasks(Enum):
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
task0 = Task("anli_r1", "acc", "ANLI")
task1 = Task("logiqa", "acc_norm", "LogiQA")
NUM_FEWSHOT = 0 # Change with your few shot
# ---------------------------------------------------
# Your leaderboard name
TITLE = """<h1 align="center" id="space-title">LLM Agent Papers</h1>"""
# What does your leaderboard evaluate?
INTRODUCTION_TEXT = """
# Large Language Model Agent: A Survey on Methodology, Applications and Challenges
The era of intelligent agents is upon us, driven by revolutionary advancements in large language models.
Large Language Model (LLM) agents, with goal-driven behaviors and dynamic adaptation capabilities, potentially
represent a critical pathway toward artificial general intelligence.
This application showcases papers from our comprehensive survey on Large Language Model (LLM) agents.
We organize papers across key categories including agent construction, collaboration mechanisms, evolution,
tools, security, benchmarks, and applications.
"""
# Which evaluations are you running? how can people reproduce what you have?
LLM_BENCHMARKS_TEXT = f"""
## Survey Overview
This survey systematically deconstructs LLM agent systems through a methodology-centered taxonomy,
linking architectural foundations, collaboration mechanisms, and evolutionary pathways.
We unify fragmented research threads by revealing fundamental connections between agent design
principles and their emergent behaviors in complex environments.
Our work provides a unified architectural perspective, examining how agents are constructed,
how they collaborate, and how they evolve over time, while also addressing evaluation methodologies,
tool applications, practical challenges, and diverse application domains.
### Paper Categories
Our collection organizes papers into several key categories:
- **Introduction**: Survey papers and foundational works introducing LLM agents
- **Construction**: Papers on building and designing agents
- **Collaboration**: Multi-agent systems and communication methods
- **Evolution**: Learning and improvement of agents over time
- **Tools**: Integration of external tools with LLM agents
- **Security**: Safety, alignment, and ethical considerations
- **Datasets & Benchmarks**: Evaluation frameworks and resources
- **Applications**: Domain-specific uses in science, medicine, etc.
View the full paper on [arXiv](https://arxiv.org/abs/2503.21460) and explore our GitHub repository at
[https://github.com/luo-junyu/Awesome-Agent-Papers](https://github.com/luo-junyu/Awesome-Agent-Papers)
"""
EVALUATION_QUEUE_TEXT = """
## How to Contribute
If you have a paper that you believe should be included in our collection:
1. Check if the paper is already in our database
2. Submit your paper at [https://forms.office.com/r/sW0Zzymi5b](https://forms.office.com/r/sW0Zzymi5b) or email us at [email protected]
3. Include the paper's title, authors, abstract, URL, publication venue, and year
4. Suggest a section/category for the paper
We regularly update the repository and this application with new submissions.
"""
CITATION_BUTTON_LABEL = "Cite our survey paper"
CITATION_BUTTON_TEXT = r"""
@article{agentsurvey2025,
title={Large Language Model Agent: A Survey on Methodology, Applications and Challenges},
author={Junyu Luo and Weizhi Zhang and Ye Yuan and Yusheng Zhao and Junwei Yang and Yiyang Gu and Bohan Wu and Binqi Chen and Ziyue Qiao and Qingqing Long and Rongcheng Tu and Xiao Luo and Wei Ju and Zhiping Xiao and Yifan Wang and Meng Xiao and Chenwu Liu and Jingyang Yuan and Shichang Zhang and Yiqiao Jin and Fan Zhang and Xian Wu and Hanqing Zhao and Dacheng Tao and Philip S. Yu and Ming Zhang},
journal={arXiv preprint arXiv:2503.21460},
year={2025}
}
"""