DataCanvasAILab commited on
Commit
a43119f
·
verified ·
1 Parent(s): ce00007

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. dataset.py +47 -47
  2. dataset_info.json +16 -0
  3. titan_agent_benchmark_v3.json +0 -0
dataset.py CHANGED
@@ -1,47 +1,47 @@
1
- import json
2
- import datasets
3
-
4
- _DESCRIPTION = """Titan CV Agent Benchmark:用于评测智能体在复杂视觉任务下的推理与决策能力。包含图片与视频样本,以及配套的问答任务。"""
5
- _HOMEPAGE = "https://huggingface.co/datasets/DataCanvasAILab/Titan-CV-Agent-Benchmark"
6
- _LICENSE = "MIT"
7
- _DATA_URL = "https://huggingface.co/datasets/DataCanvasAILab/Titan-CV-Agent-Benchmark/resolve/main/titan_agent_benchmark.json"
8
-
9
- class TitanCVAgentBenchmark(datasets.GeneratorBasedBuilder):
10
- VERSION = datasets.Version("1.0.0")
11
-
12
- def _info(self):
13
- return datasets.DatasetInfo(
14
- description=_DESCRIPTION,
15
- homepage=_HOMEPAGE,
16
- license=_LICENSE,
17
- features=datasets.Features({
18
- "id": datasets.Value("string"),
19
- "media_path": datasets.Value("string"),
20
- "media_type": datasets.ClassLabel(names=["image", "video"]),
21
- "query": datasets.Value("string"),
22
- "answer": datasets.Value("string"),
23
- "note": datasets.Value("string"),
24
- }),
25
- )
26
-
27
- def _split_generators(self, dl_manager):
28
- data_path = dl_manager.download(_DATA_URL)
29
- return [
30
- datasets.SplitGenerator(
31
- name=datasets.Split.TRAIN,
32
- gen_kwargs={"filepath": data_path},
33
- )
34
- ]
35
-
36
- def _generate_examples(self, filepath):
37
- with open(filepath, encoding="utf-8") as f:
38
- data = json.load(f)
39
- for idx, row in enumerate(data):
40
- yield idx, {
41
- "id": row["id"],
42
- "media_path": row["media_path"],
43
- "media_type": row["media_type"],
44
- "query": row["query"],
45
- "answer": row["answer"],
46
- "note": row["note"],
47
- }
 
1
+ import json
2
+ import datasets
3
+
4
+ _DESCRIPTION = """Titan CV Agent Benchmark:用于评测智能体在复杂视觉任务下的推理与决策能力。包含图片与视频样本,以及配套的问答任务。"""
5
+ _HOMEPAGE = "https://huggingface.co/datasets/DataCanvasAILab/Titan-CV-Agent-Benchmark"
6
+ _LICENSE = "MIT"
7
+ _DATA_URL = "https://huggingface.co/datasets/DataCanvasAILab/Titan-CV-Agent-Benchmark/resolve/main/titan_agent_benchmark_v3.json"
8
+
9
+ class Dataset(datasets.GeneratorBasedBuilder): # ← 关键改成 Dataset
10
+ VERSION = datasets.Version("1.0.0")
11
+
12
+ def _info(self):
13
+ return datasets.DatasetInfo(
14
+ description=_DESCRIPTION,
15
+ homepage=_HOMEPAGE,
16
+ license=_LICENSE,
17
+ features=datasets.Features({
18
+ "id": datasets.Value("string"),
19
+ "media_path": datasets.Value("string"),
20
+ "media_type": datasets.ClassLabel(names=["image", "video"]),
21
+ "query": datasets.Value("string"),
22
+ "answer": datasets.Value("string"),
23
+ "note": datasets.Value("string"),
24
+ }),
25
+ )
26
+
27
+ def _split_generators(self, dl_manager):
28
+ data_path = dl_manager.download(_DATA_URL)
29
+ return [
30
+ datasets.SplitGenerator(
31
+ name=datasets.Split.TRAIN,
32
+ gen_kwargs={"filepath": data_path},
33
+ )
34
+ ]
35
+
36
+ def _generate_examples(self, filepath):
37
+ with open(filepath, encoding="utf-8") as f:
38
+ data = json.load(f)
39
+ for idx, row in enumerate(data):
40
+ yield idx, {
41
+ "id": row["id"],
42
+ "media_path": row["media_path"],
43
+ "media_type": row["media_type"],
44
+ "query": row["query"],
45
+ "answer": row["answer"],
46
+ "note": row["note"],
47
+ }
dataset_info.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "default": {
3
+ "description": "Titan CV Agent Benchmark:用于评测智能体在复杂视觉任务下的推理与决策能力。",
4
+ "features": {
5
+ "id": {"dtype": "string", "_type": "Value"},
6
+ "media_path": {"dtype": "string", "_type": "Value"},
7
+ "media_type": {"names": ["image", "video"], "_type": "ClassLabel"},
8
+ "query": {"dtype": "string", "_type": "Value"},
9
+ "answer": {"dtype": "string", "_type": "Value"},
10
+ "note": {"dtype": "string", "_type": "Value"}
11
+ },
12
+ "homepage": "https://huggingface.co/datasets/DataCanvasAILab/Titan-CV-Agent-Benchmark",
13
+ "license": "MIT",
14
+ "citation": ""
15
+ }
16
+ }
titan_agent_benchmark_v3.json ADDED
The diff for this file is too large to render. See raw diff