|
import datasets |
|
import pandas as pd |
|
import os |
|
|
|
_DESCRIPTION = """\ |
|
A dataset containing both DGA and normal domain names. The normal domain names were taken from the Alexa top one million domains. An additional 3,161 normal |
|
domains were included in the dataset, provided by the Bambenek Consulting feed. This later group is particularly interesting since it consists of suspicious domain |
|
names that were not generated by DGA. Therefore, the total amount of domains normal in the dataset is 1,003,161. DGA domains were obtained from the repositories |
|
of DGA domains of Andrey Abakumov and John Bambenek. The total amount of DGA domains is 1,915,335, and they correspond to 51 different malware families. DGA domains |
|
were generated by 51 different malware families. About the 55% of of the DGA portion of dataset is composed of samples from the Banjori, Post, Timba, Cryptolocker, |
|
Ramdo and Conficker malware. |
|
""" |
|
_HOMEPAGE = "https://https://huggingface.co/datasets/harpomaxx/dga-detection" |
|
|
|
class MyDataset(datasets.GeneratorBasedBuilder): |
|
def _info(self): |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{"domain": datasets.Value("string"), |
|
"label": datasets.Value("string"), |
|
"class": datasets.Value("int32") |
|
} |
|
), |
|
supervised_keys=("domain", "class"), |
|
homepage="_HOMEPAGE", |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadConfig): |
|
|
|
csv_path = "https://huggingface.co/datasets/harpomaxx/dga-detection/resolve/main/argencon.csv.gz" |
|
|
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=split, |
|
gen_kwargs={ |
|
"filepath": csv_path, |
|
"split": split, |
|
}, |
|
) |
|
for split in ["train", "test", "validation"] |
|
] |
|
|
|
""""" |
|
The data variable in the _generate_examples() method is a temporary variable that holds the portion of the dataset based on the current split. |
|
The datasets.SplitGenerator in the _split_generators() method is responsible for creating the three different keys ('train', 'test', 'validation').When you load your |
|
dataset using load_dataset(), the Hugging Face Datasets library will automatically call the _split_generators() method to create the three different dataset splits. |
|
Then, it will call the _generate_examples() method for each split separately, passing the corresponding split name as the split argument. |
|
This is how the different keys are created. To clarify, the _generate_examples() method processes one split at a time, and the Datasets library combines the results |
|
to create a final dataset with keys for 'train', 'test', and 'validation'. |
|
""" |
|
def _generate_examples( |
|
self, |
|
filepath: str, |
|
split: str, |
|
): |
|
|
|
dataset = pd.read_csv(filepath,compression='gzip') |
|
|
|
|
|
dataset['class'] = dataset['label'].apply(lambda x: 0 if 'normal' in x else 1) |
|
|
|
|
|
total_rows = len(dataset) |
|
|
|
|
|
train_ratio = 0.7 |
|
test_ratio = 0.2 |
|
|
|
|
|
train_end = int(train_ratio * total_rows) |
|
test_end = train_end + int(test_ratio * total_rows) |
|
|
|
|
|
if split == "train": |
|
dataset = dataset.iloc[:train_end] |
|
elif split == "test": |
|
dataset = dataset.iloc[train_end:test_end] |
|
elif split == "validation": |
|
dataset = dataset.iloc[test_end:] |
|
|
|
|
|
for index, row in dataset.iterrows(): |
|
yield index, { |
|
"domain": row["domain"], |
|
"label": row["label"], |
|
"class": row["class"], |
|
} |
|
|