gsaltintas commited on
Commit
3d3512c
·
verified ·
1 Parent(s): 5b31429

Uploading tokenizer_robustness_completion_chinese_word_spacing_zero-width_characters_extra_space subset

Browse files
README.md CHANGED
@@ -416,6 +416,40 @@ dataset_info:
416
  num_examples: 33
417
  download_size: 7768
418
  dataset_size: 6125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
419
  configs:
420
  - config_name: tokenizer_robustness_completion_chinese_canonical
421
  data_files:
@@ -465,6 +499,10 @@ configs:
465
  data_files:
466
  - split: test
467
  path: tokenizer_robustness_completion_chinese_traditional/test-*
 
 
 
 
468
  ---
469
 
470
  # Dataset Card for Tokenization Robustness
 
416
  num_examples: 33
417
  download_size: 7768
418
  dataset_size: 6125
419
+ - config_name: tokenizer_robustness_completion_chinese_word_spacing_zero-width_characters_extra_space
420
+ features:
421
+ - name: question
422
+ dtype: string
423
+ - name: choices
424
+ list: string
425
+ - name: answer
426
+ dtype: int64
427
+ - name: answer_label
428
+ dtype: string
429
+ - name: split
430
+ dtype: string
431
+ - name: subcategories
432
+ dtype: string
433
+ - name: category
434
+ dtype: string
435
+ - name: lang
436
+ dtype: string
437
+ - name: second_lang
438
+ dtype: string
439
+ - name: notes
440
+ dtype: string
441
+ - name: id
442
+ dtype: string
443
+ - name: set_id
444
+ dtype: string
445
+ - name: variation_id
446
+ dtype: string
447
+ splits:
448
+ - name: test
449
+ num_bytes: 8831
450
+ num_examples: 40
451
+ download_size: 8368
452
+ dataset_size: 8831
453
  configs:
454
  - config_name: tokenizer_robustness_completion_chinese_canonical
455
  data_files:
 
499
  data_files:
500
  - split: test
501
  path: tokenizer_robustness_completion_chinese_traditional/test-*
502
+ - config_name: tokenizer_robustness_completion_chinese_word_spacing_zero-width_characters_extra_space
503
+ data_files:
504
+ - split: test
505
+ path: tokenizer_robustness_completion_chinese_word_spacing_zero-width_characters_extra_space/test-*
506
  ---
507
 
508
  # Dataset Card for Tokenization Robustness
tokenizer_robustness_completion_chinese_word_spacing_zero-width_characters_extra_space/test-00000-of-00001.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b89c8545accd3b9b6bc50a3e8f3a5a3f9fb3e3dd5e55f6f6345cb6f39e658806
3
- size 7633
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50dcc67af0996e63aa2c9256524306e51a4295e897a21354f45005a6a05e8cfa
3
+ size 8368