Datasets:
Uploading tokenizer_robustness_completion_chinese_ocr_errors subset
Browse files
README.md
CHANGED
|
@@ -178,6 +178,40 @@ dataset_info:
|
|
| 178 |
num_examples: 40
|
| 179 |
download_size: 8251
|
| 180 |
dataset_size: 7340
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 181 |
configs:
|
| 182 |
- config_name: tokenizer_robustness_completion_chinese_canonical
|
| 183 |
data_files:
|
|
@@ -199,6 +233,10 @@ configs:
|
|
| 199 |
data_files:
|
| 200 |
- split: test
|
| 201 |
path: tokenizer_robustness_completion_chinese_keyboard_proximity_errors/test-*
|
|
|
|
|
|
|
|
|
|
|
|
|
| 202 |
---
|
| 203 |
|
| 204 |
# Dataset Card for Tokenization Robustness
|
|
|
|
| 178 |
num_examples: 40
|
| 179 |
download_size: 8251
|
| 180 |
dataset_size: 7340
|
| 181 |
+
- config_name: tokenizer_robustness_completion_chinese_ocr_errors
|
| 182 |
+
features:
|
| 183 |
+
- name: question
|
| 184 |
+
dtype: string
|
| 185 |
+
- name: choices
|
| 186 |
+
list: string
|
| 187 |
+
- name: answer
|
| 188 |
+
dtype: int64
|
| 189 |
+
- name: answer_label
|
| 190 |
+
dtype: string
|
| 191 |
+
- name: split
|
| 192 |
+
dtype: string
|
| 193 |
+
- name: subcategories
|
| 194 |
+
dtype: string
|
| 195 |
+
- name: category
|
| 196 |
+
dtype: string
|
| 197 |
+
- name: lang
|
| 198 |
+
dtype: string
|
| 199 |
+
- name: second_lang
|
| 200 |
+
dtype: string
|
| 201 |
+
- name: notes
|
| 202 |
+
dtype: string
|
| 203 |
+
- name: id
|
| 204 |
+
dtype: string
|
| 205 |
+
- name: set_id
|
| 206 |
+
dtype: string
|
| 207 |
+
- name: variation_id
|
| 208 |
+
dtype: string
|
| 209 |
+
splits:
|
| 210 |
+
- name: test
|
| 211 |
+
num_bytes: 8441
|
| 212 |
+
num_examples: 40
|
| 213 |
+
download_size: 8307
|
| 214 |
+
dataset_size: 8441
|
| 215 |
configs:
|
| 216 |
- config_name: tokenizer_robustness_completion_chinese_canonical
|
| 217 |
data_files:
|
|
|
|
| 233 |
data_files:
|
| 234 |
- split: test
|
| 235 |
path: tokenizer_robustness_completion_chinese_keyboard_proximity_errors/test-*
|
| 236 |
+
- config_name: tokenizer_robustness_completion_chinese_ocr_errors
|
| 237 |
+
data_files:
|
| 238 |
+
- split: test
|
| 239 |
+
path: tokenizer_robustness_completion_chinese_ocr_errors/test-*
|
| 240 |
---
|
| 241 |
|
| 242 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_chinese_ocr_errors/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e498920b69c5631dd74105826f7a898c80b9910747620aaaf69e080749491763
|
| 3 |
+
size 8307
|