diff --git a/bigbio/hub/hub_repos/cei/README.md b/bigbio/hub/hub_repos/cei/README.md
new file mode 100644
index 00000000..82267544
--- /dev/null
+++ b/bigbio/hub/hub_repos/cei/README.md
@@ -0,0 +1,44 @@
+---
+language:
+  - en
+bigbio_language:
+  - English
+multilinguality: monolingual
+pretty_name: CEI
+homepage: https://github.com/sb895/chemical-exposure-information-corpus
+bigbio_pubmed: true
+bigbio_public: true
+bigbio_tasks:
+  - TXTCLASS
+---
+
+
+# Dataset Card for Chemical Exposure Information (CEI) Corpus
+
+## Dataset Description
+
+- **Homepage:** https://github.com/sb895/chemical-exposure-information-corpus
+- **Pubmed:** True
+- **Public:** True
+- **Tasks:** TXTCLASS
+
+The Chemical Exposure Information (CEI) Corpus consists of 3661 PubMed publication abstracts manually annotated by 
+experts according to a taxonomy. The taxonomy consists of 32 classes in a hierarchy. Zero or more class labels are 
+assigned to each sentence in the corpus.
+
+## Citation Information
+
+```
+@article{,
+  author    = {Larsson, Kristin and Baker, Simon and Silins, Ilona and Guo, Yufan and Stenius, Ulla and Korhonen, \
+    Anna and Berglund, Marika},
+  title     = {Text mining for improved exposure assessment},
+  journal   = {PloS one},
+  volume    = {12},
+  year      = {2017},
+  url       = {https://doi.org/10.1371/journal.pone.0173132},
+  doi       = {10.1371/journal.pone.0173132},
+  biburl    = {https://journals.plos.org/plosone/article/citation/bibtex?id=10.1371/journal.pone.0173132},
+  bibsource = {PloS one}
+}
+```
diff --git a/bigbio/hub/hub_repos/cei/bigbiohub.py b/bigbio/hub/hub_repos/cei/bigbiohub.py
new file mode 100644
index 00000000..2af473d3
--- /dev/null
+++ b/bigbio/hub/hub_repos/cei/bigbiohub.py
@@ -0,0 +1,590 @@
+from collections import defaultdict
+from dataclasses import dataclass
+from enum import Enum
+import logging
+from pathlib import Path
+from types import SimpleNamespace
+from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple
+
+import datasets
+
+if TYPE_CHECKING:
+    import bioc
+
+logger = logging.getLogger(__name__)
+
+
+BigBioValues = SimpleNamespace(NULL="<BB_NULL_STR>")
+
+
+@dataclass
+class BigBioConfig(datasets.BuilderConfig):
+    """BuilderConfig for BigBio."""
+
+    name: str = None
+    version: datasets.Version = None
+    description: str = None
+    schema: str = None
+    subset_id: str = None
+
+
+class Tasks(Enum):
+    NAMED_ENTITY_RECOGNITION = "NER"
+    NAMED_ENTITY_DISAMBIGUATION = "NED"
+    EVENT_EXTRACTION = "EE"
+    RELATION_EXTRACTION = "RE"
+    COREFERENCE_RESOLUTION = "COREF"
+    QUESTION_ANSWERING = "QA"
+    TEXTUAL_ENTAILMENT = "TE"
+    SEMANTIC_SIMILARITY = "STS"
+    TEXT_PAIRS_CLASSIFICATION = "TXT2CLASS"
+    PARAPHRASING = "PARA"
+    TRANSLATION = "TRANSL"
+    SUMMARIZATION = "SUM"
+    TEXT_CLASSIFICATION = "TXTCLASS"
+
+
+entailment_features = datasets.Features(
+    {
+        "id": datasets.Value("string"),
+        "premise": datasets.Value("string"),
+        "hypothesis": datasets.Value("string"),
+        "label": datasets.Value("string"),
+    }
+)
+
+pairs_features = datasets.Features(
+    {
+        "id": datasets.Value("string"),
+        "document_id": datasets.Value("string"),
+        "text_1": datasets.Value("string"),
+        "text_2": datasets.Value("string"),
+        "label": datasets.Value("string"),
+    }
+)
+
+qa_features = datasets.Features(
+    {
+        "id": datasets.Value("string"),
+        "question_id": datasets.Value("string"),
+        "document_id": datasets.Value("string"),
+        "question": datasets.Value("string"),
+        "type": datasets.Value("string"),
+        "choices": [datasets.Value("string")],
+        "context": datasets.Value("string"),
+        "answer": datasets.Sequence(datasets.Value("string")),
+    }
+)
+
+text_features = datasets.Features(
+    {
+        "id": datasets.Value("string"),
+        "document_id": datasets.Value("string"),
+        "text": datasets.Value("string"),
+        "labels": [datasets.Value("string")],
+    }
+)
+
+text2text_features = datasets.Features(
+    {
+        "id": datasets.Value("string"),
+        "document_id": datasets.Value("string"),
+        "text_1": datasets.Value("string"),
+        "text_2": datasets.Value("string"),
+        "text_1_name": datasets.Value("string"),
+        "text_2_name": datasets.Value("string"),
+    }
+)
+
+kb_features = datasets.Features(
+    {
+        "id": datasets.Value("string"),
+        "document_id": datasets.Value("string"),
+        "passages": [
+            {
+                "id": datasets.Value("string"),
+                "type": datasets.Value("string"),
+                "text": datasets.Sequence(datasets.Value("string")),
+                "offsets": datasets.Sequence([datasets.Value("int32")]),
+            }
+        ],
+        "entities": [
+            {
+                "id": datasets.Value("string"),
+                "type": datasets.Value("string"),
+                "text": datasets.Sequence(datasets.Value("string")),
+                "offsets": datasets.Sequence([datasets.Value("int32")]),
+                "normalized": [
+                    {
+                        "db_name": datasets.Value("string"),
+                        "db_id": datasets.Value("string"),
+                    }
+                ],
+            }
+        ],
+        "events": [
+            {
+                "id": datasets.Value("string"),
+                "type": datasets.Value("string"),
+                # refers to the text_bound_annotation of the trigger
+                "trigger": {
+                    "text": datasets.Sequence(datasets.Value("string")),
+                    "offsets": datasets.Sequence([datasets.Value("int32")]),
+                },
+                "arguments": [
+                    {
+                        "role": datasets.Value("string"),
+                        "ref_id": datasets.Value("string"),
+                    }
+                ],
+            }
+        ],
+        "coreferences": [
+            {
+                "id": datasets.Value("string"),
+                "entity_ids": datasets.Sequence(datasets.Value("string")),
+            }
+        ],
+        "relations": [
+            {
+                "id": datasets.Value("string"),
+                "type": datasets.Value("string"),
+                "arg1_id": datasets.Value("string"),
+                "arg2_id": datasets.Value("string"),
+                "normalized": [
+                    {
+                        "db_name": datasets.Value("string"),
+                        "db_id": datasets.Value("string"),
+                    }
+                ],
+            }
+        ],
+    }
+)
+
+
+TASK_TO_SCHEMA = {
+    Tasks.NAMED_ENTITY_RECOGNITION.name: "KB",
+    Tasks.NAMED_ENTITY_DISAMBIGUATION.name: "KB",
+    Tasks.EVENT_EXTRACTION.name: "KB",
+    Tasks.RELATION_EXTRACTION.name: "KB",
+    Tasks.COREFERENCE_RESOLUTION.name: "KB",
+    Tasks.QUESTION_ANSWERING.name: "QA",
+    Tasks.TEXTUAL_ENTAILMENT.name: "TE",
+    Tasks.SEMANTIC_SIMILARITY.name: "PAIRS",
+    Tasks.TEXT_PAIRS_CLASSIFICATION.name: "PAIRS",
+    Tasks.PARAPHRASING.name: "T2T",
+    Tasks.TRANSLATION.name: "T2T",
+    Tasks.SUMMARIZATION.name: "T2T",
+    Tasks.TEXT_CLASSIFICATION.name: "TEXT",
+}
+
+SCHEMA_TO_TASKS = defaultdict(set)
+for task, schema in TASK_TO_SCHEMA.items():
+    SCHEMA_TO_TASKS[schema].add(task)
+SCHEMA_TO_TASKS = dict(SCHEMA_TO_TASKS)
+
+VALID_TASKS = set(TASK_TO_SCHEMA.keys())
+VALID_SCHEMAS = set(TASK_TO_SCHEMA.values())
+
+SCHEMA_TO_FEATURES = {
+    "KB": kb_features,
+    "QA": qa_features,
+    "TE": entailment_features,
+    "T2T": text2text_features,
+    "TEXT": text_features,
+    "PAIRS": pairs_features,
+}
+
+
+def get_texts_and_offsets_from_bioc_ann(ann: "bioc.BioCAnnotation") -> Tuple:
+
+    offsets = [(loc.offset, loc.offset + loc.length) for loc in ann.locations]
+
+    text = ann.text
+
+    if len(offsets) > 1:
+        i = 0
+        texts = []
+        for start, end in offsets:
+            chunk_len = end - start
+            texts.append(text[i : chunk_len + i])
+            i += chunk_len
+            while i < len(text) and text[i] == " ":
+                i += 1
+    else:
+        texts = [text]
+
+    return offsets, texts
+
+
+def remove_prefix(a: str, prefix: str) -> str:
+    if a.startswith(prefix):
+        a = a[len(prefix) :]
+    return a
+
+
+def parse_brat_file(
+    txt_file: Path,
+    annotation_file_suffixes: List[str] = None,
+    parse_notes: bool = False,
+) -> Dict:
+    """
+    Parse a brat file into the schema defined below.
+    `txt_file` should be the path to the brat '.txt' file you want to parse, e.g. 'data/1234.txt'
+    Assumes that the annotations are contained in one or more of the corresponding '.a1', '.a2' or '.ann' files,
+    e.g. 'data/1234.ann' or 'data/1234.a1' and 'data/1234.a2'.
+    Will include annotator notes, when `parse_notes == True`.
+    brat_features = datasets.Features(
+        {
+            "id": datasets.Value("string"),
+            "document_id": datasets.Value("string"),
+            "text": datasets.Value("string"),
+            "text_bound_annotations": [  # T line in brat, e.g. type or event trigger
+                {
+                    "offsets": datasets.Sequence([datasets.Value("int32")]),
+                    "text": datasets.Sequence(datasets.Value("string")),
+                    "type": datasets.Value("string"),
+                    "id": datasets.Value("string"),
+                }
+            ],
+            "events": [  # E line in brat
+                {
+                    "trigger": datasets.Value(
+                        "string"
+                    ),  # refers to the text_bound_annotation of the trigger,
+                    "id": datasets.Value("string"),
+                    "type": datasets.Value("string"),
+                    "arguments": datasets.Sequence(
+                        {
+                            "role": datasets.Value("string"),
+                            "ref_id": datasets.Value("string"),
+                        }
+                    ),
+                }
+            ],
+            "relations": [  # R line in brat
+                {
+                    "id": datasets.Value("string"),
+                    "head": {
+                        "ref_id": datasets.Value("string"),
+                        "role": datasets.Value("string"),
+                    },
+                    "tail": {
+                        "ref_id": datasets.Value("string"),
+                        "role": datasets.Value("string"),
+                    },
+                    "type": datasets.Value("string"),
+                }
+            ],
+            "equivalences": [  # Equiv line in brat
+                {
+                    "id": datasets.Value("string"),
+                    "ref_ids": datasets.Sequence(datasets.Value("string")),
+                }
+            ],
+            "attributes": [  # M or A lines in brat
+                {
+                    "id": datasets.Value("string"),
+                    "type": datasets.Value("string"),
+                    "ref_id": datasets.Value("string"),
+                    "value": datasets.Value("string"),
+                }
+            ],
+            "normalizations": [  # N lines in brat
+                {
+                    "id": datasets.Value("string"),
+                    "type": datasets.Value("string"),
+                    "ref_id": datasets.Value("string"),
+                    "resource_name": datasets.Value(
+                        "string"
+                    ),  # Name of the resource, e.g. "Wikipedia"
+                    "cuid": datasets.Value(
+                        "string"
+                    ),  # ID in the resource, e.g. 534366
+                    "text": datasets.Value(
+                        "string"
+                    ),  # Human readable description/name of the entity, e.g. "Barack Obama"
+                }
+            ],
+            ### OPTIONAL: Only included when `parse_notes == True`
+            "notes": [  # # lines in brat
+                {
+                    "id": datasets.Value("string"),
+                    "type": datasets.Value("string"),
+                    "ref_id": datasets.Value("string"),
+                    "text": datasets.Value("string"),
+                }
+            ],
+        },
+        )
+    """
+
+    example = {}
+    example["document_id"] = txt_file.with_suffix("").name
+    with txt_file.open() as f:
+        example["text"] = f.read()
+
+    # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
+    # for event extraction
+    if annotation_file_suffixes is None:
+        annotation_file_suffixes = [".a1", ".a2", ".ann"]
+
+    if len(annotation_file_suffixes) == 0:
+        raise AssertionError(
+            "At least one suffix for the to-be-read annotation files should be given!"
+        )
+
+    ann_lines = []
+    for suffix in annotation_file_suffixes:
+        annotation_file = txt_file.with_suffix(suffix)
+        if annotation_file.exists():
+            with annotation_file.open() as f:
+                ann_lines.extend(f.readlines())
+
+    example["text_bound_annotations"] = []
+    example["events"] = []
+    example["relations"] = []
+    example["equivalences"] = []
+    example["attributes"] = []
+    example["normalizations"] = []
+
+    if parse_notes:
+        example["notes"] = []
+
+    for line in ann_lines:
+        line = line.strip()
+        if not line:
+            continue
+
+        if line.startswith("T"):  # Text bound
+            ann = {}
+            fields = line.split("\t")
+
+            ann["id"] = fields[0]
+            ann["type"] = fields[1].split()[0]
+            ann["offsets"] = []
+            span_str = remove_prefix(fields[1], (ann["type"] + " "))
+            text = fields[2]
+            for span in span_str.split(";"):
+                start, end = span.split()
+                ann["offsets"].append([int(start), int(end)])
+
+            # Heuristically split text of discontiguous entities into chunks
+            ann["text"] = []
+            if len(ann["offsets"]) > 1:
+                i = 0
+                for start, end in ann["offsets"]:
+                    chunk_len = end - start
+                    ann["text"].append(text[i : chunk_len + i])
+                    i += chunk_len
+                    while i < len(text) and text[i] == " ":
+                        i += 1
+            else:
+                ann["text"] = [text]
+
+            example["text_bound_annotations"].append(ann)
+
+        elif line.startswith("E"):
+            ann = {}
+            fields = line.split("\t")
+
+            ann["id"] = fields[0]
+
+            ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
+
+            ann["arguments"] = []
+            for role_ref_id in fields[1].split()[1:]:
+                argument = {
+                    "role": (role_ref_id.split(":"))[0],
+                    "ref_id": (role_ref_id.split(":"))[1],
+                }
+                ann["arguments"].append(argument)
+
+            example["events"].append(ann)
+
+        elif line.startswith("R"):
+            ann = {}
+            fields = line.split("\t")
+
+            ann["id"] = fields[0]
+            ann["type"] = fields[1].split()[0]
+
+            ann["head"] = {
+                "role": fields[1].split()[1].split(":")[0],
+                "ref_id": fields[1].split()[1].split(":")[1],
+            }
+            ann["tail"] = {
+                "role": fields[1].split()[2].split(":")[0],
+                "ref_id": fields[1].split()[2].split(":")[1],
+            }
+
+            example["relations"].append(ann)
+
+        # '*' seems to be the legacy way to mark equivalences,
+        # but I couldn't find any info on the current way
+        # this might have to be adapted dependent on the brat version
+        # of the annotation
+        elif line.startswith("*"):
+            ann = {}
+            fields = line.split("\t")
+
+            ann["id"] = fields[0]
+            ann["ref_ids"] = fields[1].split()[1:]
+
+            example["equivalences"].append(ann)
+
+        elif line.startswith("A") or line.startswith("M"):
+            ann = {}
+            fields = line.split("\t")
+
+            ann["id"] = fields[0]
+
+            info = fields[1].split()
+            ann["type"] = info[0]
+            ann["ref_id"] = info[1]
+
+            if len(info) > 2:
+                ann["value"] = info[2]
+            else:
+                ann["value"] = ""
+
+            example["attributes"].append(ann)
+
+        elif line.startswith("N"):
+            ann = {}
+            fields = line.split("\t")
+
+            ann["id"] = fields[0]
+            ann["text"] = fields[2]
+
+            info = fields[1].split()
+
+            ann["type"] = info[0]
+            ann["ref_id"] = info[1]
+            ann["resource_name"] = info[2].split(":")[0]
+            ann["cuid"] = info[2].split(":")[1]
+            example["normalizations"].append(ann)
+
+        elif parse_notes and line.startswith("#"):
+            ann = {}
+            fields = line.split("\t")
+
+            ann["id"] = fields[0]
+            ann["text"] = fields[2] if len(fields) == 3 else BigBioValues.NULL
+
+            info = fields[1].split()
+
+            ann["type"] = info[0]
+            ann["ref_id"] = info[1]
+            example["notes"].append(ann)
+
+    return example
+
+
+def brat_parse_to_bigbio_kb(brat_parse: Dict) -> Dict:
+    """
+    Transform a brat parse (conforming to the standard brat schema) obtained with
+    `parse_brat_file` into a dictionary conforming to the `bigbio-kb` schema (as defined in ../schemas/kb.py)
+    :param brat_parse:
+    """
+
+    unified_example = {}
+
+    # Prefix all ids with document id to ensure global uniqueness,
+    # because brat ids are only unique within their document
+    id_prefix = brat_parse["document_id"] + "_"
+
+    # identical
+    unified_example["document_id"] = brat_parse["document_id"]
+    unified_example["passages"] = [
+        {
+            "id": id_prefix + "_text",
+            "type": "abstract",
+            "text": [brat_parse["text"]],
+            "offsets": [[0, len(brat_parse["text"])]],
+        }
+    ]
+
+    # get normalizations
+    ref_id_to_normalizations = defaultdict(list)
+    for normalization in brat_parse["normalizations"]:
+        ref_id_to_normalizations[normalization["ref_id"]].append(
+            {
+                "db_name": normalization["resource_name"],
+                "db_id": normalization["cuid"],
+            }
+        )
+
+    # separate entities and event triggers
+    unified_example["events"] = []
+    non_event_ann = brat_parse["text_bound_annotations"].copy()
+    for event in brat_parse["events"]:
+        event = event.copy()
+        event["id"] = id_prefix + event["id"]
+        trigger = next(
+            tr
+            for tr in brat_parse["text_bound_annotations"]
+            if tr["id"] == event["trigger"]
+        )
+        if trigger in non_event_ann:
+            non_event_ann.remove(trigger)
+        event["trigger"] = {
+            "text": trigger["text"].copy(),
+            "offsets": trigger["offsets"].copy(),
+        }
+        for argument in event["arguments"]:
+            argument["ref_id"] = id_prefix + argument["ref_id"]
+
+        unified_example["events"].append(event)
+
+    unified_example["entities"] = []
+    anno_ids = [ref_id["id"] for ref_id in non_event_ann]
+    for ann in non_event_ann:
+        entity_ann = ann.copy()
+        entity_ann["id"] = id_prefix + entity_ann["id"]
+        entity_ann["normalized"] = ref_id_to_normalizations[ann["id"]]
+        unified_example["entities"].append(entity_ann)
+
+    # massage relations
+    unified_example["relations"] = []
+    skipped_relations = set()
+    for ann in brat_parse["relations"]:
+        if (
+            ann["head"]["ref_id"] not in anno_ids
+            or ann["tail"]["ref_id"] not in anno_ids
+        ):
+            skipped_relations.add(ann["id"])
+            continue
+        unified_example["relations"].append(
+            {
+                "arg1_id": id_prefix + ann["head"]["ref_id"],
+                "arg2_id": id_prefix + ann["tail"]["ref_id"],
+                "id": id_prefix + ann["id"],
+                "type": ann["type"],
+                "normalized": [],
+            }
+        )
+    if len(skipped_relations) > 0:
+        example_id = brat_parse["document_id"]
+        logger.info(
+            f"Example:{example_id}: The `bigbio_kb` schema allows `relations` only between entities."
+            f" Skip (for now): "
+            f"{list(skipped_relations)}"
+        )
+
+    # get coreferences
+    unified_example["coreferences"] = []
+    for i, ann in enumerate(brat_parse["equivalences"], start=1):
+        is_entity_cluster = True
+        for ref_id in ann["ref_ids"]:
+            if not ref_id.startswith("T"):  # not textbound -> no entity
+                is_entity_cluster = False
+            elif ref_id not in anno_ids:  # event trigger -> no entity
+                is_entity_cluster = False
+        if is_entity_cluster:
+            entity_ids = [id_prefix + i for i in ann["ref_ids"]]
+            unified_example["coreferences"].append(
+                {"id": id_prefix + str(i), "entity_ids": entity_ids}
+            )
+    return unified_example
\ No newline at end of file
diff --git a/bigbio/hub/hub_repos/cei/cei.py b/bigbio/hub/hub_repos/cei/cei.py
new file mode 100644
index 00000000..ad110433
--- /dev/null
+++ b/bigbio/hub/hub_repos/cei/cei.py
@@ -0,0 +1,174 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+from pathlib import Path
+from typing import Dict, List, Tuple
+
+import datasets
+
+from .bigbiohub import BigBioConfig, Tasks, text_features
+
+_DATASETNAME = "cei"
+_DISPLAYNAME = "CEI"
+
+_LANGUAGES = ["English"]
+_LOCAL = False
+_PUBMED = True
+
+_CITATION = """\
+@article{,
+  author    = {Larsson, Kristin and Baker, Simon and Silins, Ilona and Guo, Yufan and Stenius, Ulla and Korhonen, \
+    Anna and Berglund, Marika},
+  title     = {Text mining for improved exposure assessment},
+  journal   = {PloS one},
+  volume    = {12},
+  year      = {2017},
+  url       = {https://doi.org/10.1371/journal.pone.0173132},
+  doi       = {10.1371/journal.pone.0173132},
+  biburl    = {https://journals.plos.org/plosone/article/citation/bibtex?id=10.1371/journal.pone.0173132},
+  bibsource = {PloS one}
+}
+"""
+
+_DESCRIPTION = """\
+The Chemical Exposure Information (CEI) Corpus consists of 3661 PubMed publication abstracts manually annotated by \
+experts according to a taxonomy. The taxonomy consists of 32 classes in a hierarchy. Zero or more class labels are \
+assigned to each sentence in the corpus.
+"""
+
+_HOMEPAGE = "https://github.com/sb895/chemical-exposure-information-corpus"
+_LICENSE = "GPL_3p0"
+
+_URLS = {
+    _DATASETNAME: "https://github.com/sb895/chemical-exposure-information-corpus/archive/refs/heads/master.zip",
+}
+
+_SUPPORTED_TASKS = [Tasks.TEXT_CLASSIFICATION]
+
+_SOURCE_VERSION = "1.0.0"
+_BIGBIO_VERSION = "1.0.0"
+
+LABEL_REGEX = re.compile(r"[Be][a-z\-\ ]+")
+
+
+class CieDataset(datasets.GeneratorBasedBuilder):
+    """The Chemical Exposure Information (CEI) Corpus consists of 3661 PubMed publication abstracts manually annotated
+    by experts according to a taxonomy. The taxonomy consists of 32 classes in a hierarchy. Zero or more class labels
+    are assigned to each sentence in the corpus. The labels are found under the "labels" directory, while the tokenized
+    text can be found under "text" directory. The filenames are the corresponding PubMed IDs (PMID)."""
+
+    SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
+    BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
+
+    BUILDER_CONFIGS = [
+        BigBioConfig(
+            name=f"{_DATASETNAME}_source",
+            version=SOURCE_VERSION,
+            description=f"{_DATASETNAME} source schema",
+            schema="source",
+            subset_id=f"{_DATASETNAME}",
+        ),
+        BigBioConfig(
+            name=f"{_DATASETNAME}_bigbio_text",
+            version=BIGBIO_VERSION,
+            description=f"{_DATASETNAME} BigBio schema",
+            schema="bigbio_text",
+            subset_id=f"{_DATASETNAME}",
+        ),
+    ]
+
+    DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
+
+    def _info(self) -> datasets.DatasetInfo:
+
+        if self.config.schema == "source":
+            features = datasets.Features(
+                {
+                    "id": datasets.Value("string"),
+                    "document_id": datasets.Value("string"),
+                    "text": datasets.Value("string"),
+                    "label_text": datasets.Value("string"),
+                    "labels": datasets.Sequence(datasets.Value("string")),
+                }
+            )
+        elif self.config.schema == "bigbio_text":
+            features = text_features
+        else:
+            raise NotImplementedError(f"Schema {self.config.schema} not supported")
+
+        return datasets.DatasetInfo(
+            description=_DESCRIPTION,
+            features=features,
+            homepage=_HOMEPAGE,
+            license=_LICENSE,
+            citation=_CITATION,
+        )
+
+    def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
+        """Returns SplitGenerators."""
+        urls = _URLS[_DATASETNAME]
+        data_dir = dl_manager.download_and_extract(urls)
+
+        return [
+            datasets.SplitGenerator(
+                name=datasets.Split.TRAIN,
+                gen_kwargs={
+                    "base_dir": (Path(data_dir) / "chemical-exposure-information-corpus-master"),
+                    "split": "train",
+                },
+            )
+        ]
+
+    def _generate_examples(self, base_dir: Path, split: str) -> Tuple[int, Dict]:
+        """Yields examples as (key, example) tuples."""
+
+        text_files = sorted(list(base_dir.glob("./text/*.txt")))
+
+        if self.config.schema == "source":
+            # TODO: yield (key, example) tuples in the original dataset schema
+            for text_file in text_files:
+                key, example = self._read_example_from_file(text_file)
+                yield key, example
+
+        elif self.config.schema == "bigbio_text":
+            # TODO: yield (key, example) tuples in the bigbio schema
+            for text_file in text_files:
+                key, example = self._read_example_from_file_in_kb_schema(text_file)
+                yield key, example
+
+    def _read_example_from_file(self, text_file: Path) -> Tuple[str, Dict]:
+        with open(text_file, encoding="utf-8") as fp:
+            text = fp.read().rstrip()
+
+        label_file = text_file.parent.parent / "labels" / text_file.name
+        with open(label_file, encoding="utf-8") as fp:
+            label_text = fp.read()
+
+        labels = [line.strip(" -") for line in LABEL_REGEX.findall(label_text)]
+        key = text_file.name.rsplit(".", 1)[0]
+        example = {
+            "id": key,
+            "document_id": key,
+            "text": text,
+            "label_text": label_text,
+            "labels": labels,
+        }
+        return key, example
+
+    def _read_example_from_file_in_kb_schema(self, text_file: Path) -> Tuple[str, Dict]:
+        key, example = self._read_example_from_file(text_file)
+        example = {k: v for k, v in example.items() if k in {"id", "document_id", "text", "labels"}}
+        return key, example