File size: 4,379 Bytes
b4bc845 9cd050d b4bc845 9cd050d b4bc845 9cd050d b4bc845 c77b687 b4bc845 299c8f2 b4bc845 9cd050d 384a98d b4bc845 384a98d c848739 384a98d 2ebe002 82b72d0 2ebe002 82b72d0 811722b 1ccead8 811722b 526367e 1ccead8 811722b 526367e 811722b 1ccead8 82b72d0 9cd050d 1ccead8 811722b 403b0be 811722b 403b0be a3a1af7 811722b aff1e6b 811722b d021f3f 811722b a3a1af7 811722b 526367e 403b0be 6fb346b 2ebe002 c77b687 b4bc845 9cd050d c77b687 b4bc845 9cd050d c77b687 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
### This is example of the script that will be run in the test environment.
### You can change the rest of the code to define and test your solution.
### However, you should not change the signature of the provided function.
### The script saves "submission.parquet" file in the current directory.
### You can use any additional files and subdirectories to organize your code.
from pathlib import Path
from tqdm import tqdm
import pandas as pd
import numpy as np
from datasets import load_dataset
from typing import Dict
def empty_solution(sample):
'''Return a minimal valid solution, i.e. 2 vertices and 1 edge.'''
return np.zeros((2,3)), [(0, 1)]
class Sample(Dict):
def pick_repr_data(self, x):
if hasattr(x, 'shape'):
return x.shape
if isinstance(x, (str, float, int)):
return x
if isinstance(x, list):
return [type(x[0])] if len(x) > 0 else []
return type(x)
def __repr__(self):
# return str({k: v.shape if hasattr(v, 'shape') else [type(v[0])] if isinstance(v, list) else type(v) for k,v in self.items()})
return str({k: self.pick_repr_data(v) for k,v in self.items()})
import json
if __name__ == "__main__":
print ("------------ Loading dataset------------ ")
param_path = Path('params.json')
print(param_path)
with param_path.open() as f:
params = json.load(f)
print(params)
import os
print('pwd:')
os.system('pwd')
print(os.system('ls -lahtr'))
print('/tmp/data/')
print(os.system('ls -lahtr /tmp/data/'))
print('/tmp/data/data')
print(os.system('ls -lahtrR /tmp/data/data'))
data_path_test_server = Path('/tmp/data')
data_path_local = Path().home() / '.cache/huggingface/datasets/usm3d___hoho25k_test_x/'
if data_path_test_server.exists():
# data_path = data_path_test_server
TEST_ENV = True
else:
# data_path = data_path_local
TEST_ENV = False
from huggingface_hub import snapshot_download
_ = snapshot_download(
repo_id=params['dataset'],
local_dir="/tmp/data",
repo_type="dataset",
)
data_path = data_path_test_server
print(data_path)
# dataset = load_dataset(params['dataset'], trust_remote_code=True, use_auth_token=params['token'])
# data_files = {
# "validation": [str(p) for p in [*data_path.rglob('*validation*.arrow')]+[*data_path.rglob('*public*/**/*.tar')]],
# "test": [str(p) for p in [*data_path.rglob('*test*.arrow')]+[*data_path.rglob('*private*/**/*.tar')]],
# }
data_files = {
"validation": [str(p) for p in data_path.rglob('*public*/**/*.tar')],
"test": [str(p) for p in data_path.rglob('*private*/**/*.tar')],
}
print(data_files)
dataset = load_dataset(
str(data_path / 'hoho25k_test_x.py'),
data_files=data_files,
trust_remote_code=True,
writer_batch_size=100
)
# if TEST_ENV:
# dataset = load_dataset(
# "webdataset",
# data_files=data_files,
# trust_remote_code=True,
# # streaming=True
# )
print('load with webdataset')
# else:
# dataset = load_dataset(
# "arrow",
# data_files=data_files,
# trust_remote_code=True,
# # streaming=True
# )
# print('load with arrow')
print(dataset, flush=True)
# dataset = load_dataset('webdataset', data_files={)
print('------------ Now you can do your solution ---------------')
solution = []
for subset_name in dataset:
for i, sample in enumerate(tqdm(dataset[subset_name])):
# replace this with your solution
print(Sample(sample), flush=True)
print('------')
pred_vertices, pred_edges = empty_solution(sample)
solution.append({
'order_id': sample['order_id'],
'wf_vertices': pred_vertices.tolist(),
'wf_edges': pred_edges
})
print('------------ Saving results ---------------')
sub = pd.DataFrame(solution, columns=["order_id", "wf_vertices", "wf_edges"])
sub.to_parquet("submission.parquet")
print("------------ Done ------------ ") |