File size: 1,410 Bytes
df9abf4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import os
import glob
import json
from pathlib import Path
from collections import OrderedDict


pretrained_models_dict = {'E2VID': "0000_000012", 'FireNet': "0000_000020",
                          'E2VID+': "0000_000000", 'FireNet+': "0000_000002",
                          'ET-Net': "0000_000030", 'SSL_E2VID': "0000_000040",
                          'SPADE_E2VID': "0000_000050"}

full_ref_metrics = ['mse', 'ssim', 'lpips']
no_ref_metrics = ['brisque', 'niqe', 'maniqa']


def read_json(fname):
    fname = Path(fname)
    with fname.open('rt') as handle:
        return json.load(handle, object_hook=OrderedDict)


def get_filenames(base_path, filename_pattern):
    file_names = []
    glob_pattern = os.path.join(base_path, filename_pattern)
    file_paths = glob.glob(glob_pattern)
    for file_path in file_paths:
        file_name = Path(file_path).stem
        file_names.append(file_name)
    return file_names


def get_dataset_config_names(data_configs_path):
    data_configs_filename_pattern = "*.json"
    return get_filenames(data_configs_path, data_configs_filename_pattern)


def get_sequence_names(data_configs_path, dataset_name):
    sequences = []
    dataset_file_name = os.path.join(data_configs_path, dataset_name + ".json")
    dataset_config = read_json(dataset_file_name)
    for sequence in dataset_config['sequences']:
        sequences.append(sequence)
    return sequences