GRCh38 / GRCh38.py
zpn's picture
feat: dataloading script
d2a5d42
raw
history blame
3.46 kB
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import random
import datasets
# You can copy an official description
_DESCRIPTION = """\
A dataset of all autosomal and sex chromosomes sequences from reference assembly GRCh38/hg38 1 and reached a total of 3.2 billion nucleotides.
"""
_HOMEPAGE = "https://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.26"
FILES = ["intervals.jsonl"]
class PubchemSelfies(datasets.GeneratorBasedBuilder):
"""A dataset of all autosomal and sex chromosomes sequences from reference assembly GRCh38/hg38 and reached a total of 3.2 billion nucleotides."""
VERSION = datasets.Version("1.1.0")
# You will be able to load one or the other configurations in the following list with
BUILDER_CONFIG = datasets.BuilderConfig(
version=VERSION, description="A dataset of all autosomal and sex chromosomes sequences from reference assembly GRCh38/hg38 and reached a total of 3.2 billion nucleotides."
)
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=datasets.Features(
{
"chr": datasets.Value("string"),
"description": datasets.Value("string"),
"seq": datasets.Value("string"),
"split": datasets.Value("string"),
}
),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download(FILES)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filename": downloaded_files[0]
},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filename):
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
with open(filename) as jsonl_file:
for row, line in enumerate(jsonl_file):
data = json.loads(line)
# 5% of the time the data is validation so we set the split accordingly
# This is kind of a hacky but it's so we can load in streaming
split = "valid" if random.random() < 0.05 else "train"
yield row, {
"chr": data["chr"],
"description": data["description"],
"seq": data["seq"],
"split": split,
}