Datasets:

Modalities:
Text
Formats:
json
Languages:
Polish
Size:
< 1K
Libraries:
Datasets
pandas
License:
kwojtasik commited on
Commit
a5bb418
·
verified ·
1 Parent(s): 0c67752

Delete ComplexQA.py

Browse files
Files changed (1) hide show
  1. ComplexQA.py +0 -154
ComplexQA.py DELETED
@@ -1,154 +0,0 @@
1
- """Polish Question Answering Dataset"""
2
-
3
- import json
4
- import datasets
5
-
6
- _CITATION = """\
7
- @InProceedings{10.1007/978-3-032-09318-9_18,
8
- author="Wojtasik, Konrad
9
- and Domaga{\l}a, Aleksandra
10
- and Oleksy, Marcin
11
- and Piasecki, Maciej",
12
- editor="Nguyen, Ngoc Thanh
13
- and Dinh Duc Anh, Vu
14
- and Kozierkiewicz, Adrianna
15
- and Nguyen Van, Sinh
16
- and N{\'u}{\~{n}}ez, Manuel
17
- and Treur, Jan
18
- and Vossen, Gottfried",
19
- title="Towards Complex Question Answering in Polish Language",
20
- booktitle="Computational Collective Intelligence",
21
- year="2026",
22
- publisher="Springer Nature Switzerland",
23
- address="Cham",
24
- pages="256--268",
25
- abstract="Reasoning over text is a challenging task, especially if the reasoning requires aggregating information from long context and multiple steps to reach the correct answer. We introduce Complex Question Answering dataset (Complex Q{\&}A Corpus) and its annotation procedure in the Polish language. Our dataset features human-annotated reasoning across extended documents. The questions within this dataset are carefully prepared and undergo rigorous cross-examination. Each complex question is accompanied by auxiliary questions that highlight specific text fragments and information necessary to formulate the final answer. We have identified the main reasoning patterns from our dataset annotation and human evaluation. We also proposed automatic evaluation procedure through the LLM-as-a-Judge paradigm and evaluated the performance of current state-of-the-art models.",
26
- isbn="978-3-032-09318-9"
27
- }
28
- """
29
-
30
- _DESCRIPTION = """\
31
- This dataset contains Polish language questions and answers based on factual documents.
32
- It includes both simple and complex questions with different reasoning patterns for
33
- question answering and reading comprehension tasks.
34
- """
35
-
36
- _HOMEPAGE = ""
37
-
38
- _LICENSE = ""
39
-
40
- _URLS = {
41
- "test": "anotated_ComplexQA.jsonl",
42
- }
43
-
44
-
45
- class PolishQADataset(datasets.GeneratorBasedBuilder):
46
- """ComplexQA Dataset"""
47
-
48
- VERSION = datasets.Version("1.0.0")
49
-
50
- BUILDER_CONFIGS = [
51
- datasets.BuilderConfig(
52
- name="default",
53
- version=VERSION,
54
- description="Default configuration with all questions",
55
- ),
56
- ]
57
-
58
- DEFAULT_CONFIG_NAME = "default"
59
-
60
- def _info(self):
61
- features = datasets.Features(
62
- {
63
- "doc_id": datasets.Value("string"),
64
- "doc_text": datasets.Value("string"),
65
- "complex_questions": [
66
- {
67
- "question_id": datasets.Value("string"),
68
- "question_type": datasets.Value("string"),
69
- "question": datasets.Value("string"),
70
- "answer": datasets.Value("string"),
71
- "span": datasets.Sequence(datasets.Value("int32")),
72
- "reasoning_pattern": datasets.Value("string"),
73
- }
74
- ],
75
- "simple_questions": [
76
- {
77
- "question_id": datasets.Value("string"),
78
- "question_type": datasets.Value("string"),
79
- "question": datasets.Value("string"),
80
- "answer": datasets.Value("string"),
81
- "span": datasets.Sequence(datasets.Value("int32")),
82
- }
83
- ],
84
- }
85
- )
86
-
87
- return datasets.DatasetInfo(
88
- description=_DESCRIPTION,
89
- features=features,
90
- homepage=_HOMEPAGE,
91
- license=_LICENSE,
92
- citation=_CITATION,
93
- )
94
-
95
- def _split_generators(self, dl_manager):
96
- """Returns SplitGenerators."""
97
- urls = _URLS
98
- data_dir = dl_manager.download_and_extract(urls)
99
-
100
- return [
101
- datasets.SplitGenerator(
102
- name=datasets.Split.TRAIN,
103
- gen_kwargs={
104
- "filepath": data_dir["train"],
105
- "split": "train",
106
- },
107
- ),
108
- datasets.SplitGenerator(
109
- name=datasets.Split.VALIDATION,
110
- gen_kwargs={
111
- "filepath": data_dir["validation"],
112
- "split": "validation",
113
- },
114
- ),
115
- datasets.SplitGenerator(
116
- name=datasets.Split.TEST,
117
- gen_kwargs={
118
- "filepath": data_dir["test"],
119
- "split": "test",
120
- },
121
- ),
122
- ]
123
-
124
- def _generate_examples(self, filepath, split):
125
- """Yields examples."""
126
- with open(filepath, encoding="utf-8") as f:
127
- for idx, line in enumerate(f):
128
- data = json.loads(line)
129
-
130
- yield idx, {
131
- "doc_id": data["doc_id"],
132
- "doc_text": data["doc_text"],
133
- "complex_questions": [
134
- {
135
- "question_id": q["question_id"],
136
- "question_type": q["question_type"],
137
- "question": q["question"],
138
- "answer": q["answer"],
139
- "span": q["span"],
140
- "reasoning_pattern": q["reasoning_pattern"],
141
- }
142
- for q in data.get("complex_questions", [])
143
- ],
144
- "simple_questions": [
145
- {
146
- "question_id": q["question_id"],
147
- "question_type": q["question_type"],
148
- "question": q["question"],
149
- "answer": q["answer"],
150
- "span": q["span"],
151
- }
152
- for q in data.get("simple_questions", [])
153
- ],
154
- }