|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Data Loader for Turku Paraphrase Corpus""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{kanerva-etal-2021-finnish, |
|
title = {Finnish Paraphrase Corpus}, |
|
author = {Kanerva, Jenna and Ginter, Filip and Chang, Li-Hsin and Rastas, Iiro and Skantsi, Valtteri and Kilpeläinen, Jemina and Kupari, Hanna-Mari and Saarni, Jenna and Sevón, Maija and Tarkka, Otto}, |
|
booktitle = {Proceedings of the 23rd Nordic Conference on Computational Linguistics (NoDaLiDa'21)}, |
|
year = {2021}, |
|
publisher = {Linköping University Electronic Press, Sweden}, |
|
url = {https://aclanthology.org/2021.nodalida-main.29}, |
|
pages = {288--298} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
Turku Paraphrase Corpus is a dataset of 104,645 manually annotated Finnish paraphrases. The vast majority of the data is classified as a paraphrase either in the given context, or universally. |
|
""" |
|
|
|
_HOMEPAGE = "https://turkunlp.org/paraphrase.html" |
|
|
|
_LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)" |
|
|
|
|
|
|
|
|
|
_URLs = { |
|
'train': 'https://raw.githubusercontent.com/TurkuNLP/Turku-paraphrase-corpus/main/data-fi/train.json', |
|
'validation': 'https://raw.githubusercontent.com/TurkuNLP/Turku-paraphrase-corpus/main/data-fi/dev.json', |
|
'test': 'https://raw.githubusercontent.com/TurkuNLP/Turku-paraphrase-corpus/main/data-fi/test.json' |
|
} |
|
|
|
|
|
|
|
class TurkuParaphraseCorpus(datasets.GeneratorBasedBuilder): |
|
"""Turku Paraphrase Corpus is a dataset of 104,645 manually annotated Finnish paraphrases.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="plain", version=VERSION, description="This loads the dataset in its plain format without any additional data transformations. In case of applying the dataset to a task (e.g. paraphrase classification or generation), some additional data transformations are suggested depending on the task (see 'classification' and 'generation' for ready made transformations for paraphrase classification and paraphrase generation)."), |
|
datasets.BuilderConfig(name="classification", version=VERSION, description="This loads the dataset in a format directly suitable for paraphrase classification. Each example is introduced twice with different order of the text passages, (text1, text2, label) and (text2, text1, label)"), |
|
datasets.BuilderConfig(name="generation", version=VERSION, description="This loads the dataset in a format suitable for paraphrase generation, where examples not considered suitable for generation models are discarded. Paraphrases without directionality are generated in both directions, while directional paraphrases (subsumption flag) are only generated from more detailed to more general one. Labels 2 (related but not a paraphrase), 3 (context dependent paraphrase), flag i (minor deviation), and flag s (style difference) are discarded."), |
|
] |
|
|
|
|
|
|
|
def _info(self): |
|
|
|
if self.config.name == "generation": |
|
features = datasets.Features( |
|
{ |
|
"gem_id": datasets.Value("string"), |
|
"goeswith": datasets.Value("string"), |
|
"fold": datasets.Value("int32"), |
|
"input": datasets.Value("string"), |
|
"target": datasets.Value("string"), |
|
"references": [datasets.Value("string")], |
|
"label": datasets.Value("string"), |
|
"binary_label": datasets.Value("string"), |
|
"is_rewrite": datasets.Value("bool"), |
|
} |
|
) |
|
else: |
|
features = datasets.Features( |
|
{ |
|
"gem_id": datasets.Value("string"), |
|
"goeswith": datasets.Value("string"), |
|
"fold": datasets.Value("int32"), |
|
"text1": datasets.Value("string"), |
|
"text2": datasets.Value("string"), |
|
"target": datasets.Value("string"), |
|
"references": [datasets.Value("string")], |
|
"label": datasets.Value("string"), |
|
"binary_label": datasets.Value("string"), |
|
"is_rewrite": datasets.Value("bool"), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
my_urls = _URLs |
|
data_dir = dl_manager.download_and_extract(my_urls) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir["train"], "split": "train"}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir["validation"], "split": "validation"}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir["test"], "split": "test"}) |
|
] |
|
|
|
def _generate_examples(self, filepath, split): |
|
""" Yields examples as (key, example) tuples. """ |
|
|
|
|
|
|
|
with open(filepath, "rt", encoding="utf-8") as f: |
|
data = json.load(f) |
|
counter = 0 |
|
for example in data: |
|
if self.config.name == "generation": |
|
examples = self._prepare_for_generation(example) |
|
else: |
|
examples = self._prepare_plain_and_classification(example) |
|
for e in examples: |
|
e["gem_id"] = f"gem-turku_paraphrase_corpus-{split}-{counter}" |
|
yield counter, e |
|
counter += 1 |
|
|
|
|
|
|
|
|
|
def _skip_in_generation(self, label): |
|
""" define here which examples should be skipped when doing paraphrase generation """ |
|
skip_labels = ["2", "3", "i", "s"] |
|
for l in skip_labels: |
|
if l in label: |
|
return True |
|
return False |
|
|
|
def _prepare_for_generation(self, orig_example): |
|
""" turn examples into generation format """ |
|
processed = [] |
|
d = { |
|
"gem_id": "placeholder", |
|
"goeswith": orig_example["goeswith"] if orig_example["goeswith"]!=None else "not available", |
|
"fold": orig_example["fold"], |
|
"input": orig_example["txt1"], |
|
"target": orig_example["txt2"], |
|
"label": orig_example["label"], |
|
"binary_label": "positive" if orig_example["label"] != "2" else "negative", |
|
"is_rewrite": False} |
|
|
|
label = d["label"] |
|
if self._skip_in_generation(label) == False: |
|
if ">" in label: |
|
processed.append(d) |
|
elif "<" in label: |
|
processed.append(self._flip_example(d, "input", "target")) |
|
else: |
|
processed.append(d) |
|
processed.append(self._flip_example(d, "input", "target")) |
|
|
|
for rew in orig_example["rewrites"]: |
|
r = self._generate_rew(d, rew, "input", "target") |
|
processed.append(r) |
|
processed.append(self._flip_example(r, "input", "target")) |
|
|
|
|
|
|
|
for i in range(len(processed)): |
|
processed[i]["references"] = [processed[i]["target"]] |
|
return processed |
|
|
|
def _prepare_plain_and_classification(self, orig_example): |
|
""" turn examples into classification format """ |
|
processed = [] |
|
d = { |
|
"gem_id": "placeholder", |
|
"goeswith": orig_example["goeswith"] if orig_example["goeswith"]!=None else "not available", |
|
"fold": orig_example["fold"], |
|
"text1": orig_example["txt1"], |
|
"text2": orig_example["txt2"], |
|
"label": orig_example["label"], |
|
"binary_label": "positive" if orig_example["label"] != "2" else "negative", |
|
"is_rewrite": False} |
|
processed.append(d) |
|
if self.config.name == "classification": |
|
processed.append(self._flip_example(d, "text1", "text2")) |
|
for rew in orig_example["rewrites"]: |
|
r = self._generate_rew(d, rew, "text1", "text2") |
|
processed.append(r) |
|
if self.config.name == "classification": |
|
processed.append(self._flip_example(d, "text1", "text2")) |
|
|
|
|
|
for i in range(len(processed)): |
|
processed[i]["references"] = [] |
|
processed[i]["target"] = processed[i]["label"] |
|
|
|
return processed |
|
|
|
def _generate_rew(self, orig, rew, field1, field2): |
|
""" turn rewrite into individual example """ |
|
d = { |
|
"gem_id": "placeholder", |
|
"goeswith": orig["goeswith"], |
|
"fold": orig["fold"], |
|
field1: rew[0], |
|
field2: rew[1], |
|
"label": "4", |
|
"binary_label": "positive", |
|
"is_rewrite": True} |
|
return d |
|
|
|
def _flip_example(self, example, field1, field2): |
|
""" flip the example (text1, text2, label) --> (text2, text1, label) """ |
|
flipped = example.copy() |
|
if "<" in example["label"]: |
|
flipped["label"] = example["label"].replace("<", ">") |
|
if ">" in example["label"]: |
|
flipped["label"] = example["label"].replace(">", "<") |
|
flipped[field1] = example[field2] |
|
flipped[field2] = example[field1] |
|
|
|
return flipped |
|
|
|
|