| [ | |
| { | |
| "results": { | |
| "arc_challenge": { | |
| "acc,none": 0.5921501706484642, | |
| "acc_stderr,none": 0.014361097288449701, | |
| "acc_norm,none": 0.6339590443686007, | |
| "acc_norm_stderr,none": 0.014077223108470139, | |
| "alias": "arc_challenge" | |
| } | |
| }, | |
| "group_subtasks": { | |
| "arc_challenge": [] | |
| }, | |
| "configs": { | |
| "arc_challenge": { | |
| "task": "arc_challenge", | |
| "group": [ | |
| "ai2_arc" | |
| ], | |
| "dataset_path": "allenai/ai2_arc", | |
| "dataset_name": "ARC-Challenge", | |
| "training_split": "train", | |
| "validation_split": "validation", | |
| "test_split": "test", | |
| "doc_to_text": "Question: {{question}}\nAnswer:", | |
| "doc_to_target": "{{choices.label.index(answerKey)}}", | |
| "doc_to_choice": "{{choices.text}}", | |
| "description": "", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "num_fewshot": 25, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| }, | |
| { | |
| "metric": "acc_norm", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": true, | |
| "doc_to_decontamination_query": "Question: {{question}}\nAnswer:", | |
| "metadata": { | |
| "version": 1 | |
| } | |
| } | |
| }, | |
| "versions": { | |
| "arc_challenge": 1 | |
| }, | |
| "n-shot": { | |
| "arc_challenge": 25 | |
| }, | |
| "config": { | |
| "model": "hf", | |
| "model_args": "pretrained=cognitivecomputations/dolphin-2.8-mistral-7b-v02,dtype=auto", | |
| "batch_size": "8", | |
| "batch_sizes": [], | |
| "device": "cuda:0", | |
| "use_cache": null, | |
| "limit": null, | |
| "bootstrap_iters": 100000, | |
| "gen_kwargs": null | |
| }, | |
| "git_hash": "ab7cc6b1", | |
| "date": 1711787477.9239457, | |
| "pretty_env_info": "PyTorch version: 2.2.2+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.11.8 (main, Feb 26 2024, 21:39:34) [GCC 11.2.0] (64-bit runtime)\nPython platform: Linux-5.15.0-101-generic-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L40S\nGPU 1: NVIDIA L40S\nGPU 2: NVIDIA L40S\nGPU 3: NVIDIA L40S\nGPU 4: NVIDIA L40S\nGPU 5: NVIDIA L40S\nGPU 6: NVIDIA L40S\nGPU 7: NVIDIA L40S\nGPU 8: NVIDIA L40S\nGPU 9: NVIDIA L40S\n\nNvidia driver version: 550.54.15\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 57 bits virtual\nByte Order: Little Endian\nCPU(s): 80\nOn-line CPU(s) list: 0-79\nVendor ID: AuthenticAMD\nModel name: AMD EPYC 9254 24-Core Processor\nCPU family: 25\nModel: 17\nThread(s) per core: 2\nCore(s) per socket: 4\nSocket(s): 10\nStepping: 1\nBogoMIPS: 5800.00\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy svm cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext perfctr_core invpcid_single ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves avx512_bf16 clzero xsaveerptr wbnoinvd arat npt lbrv nrip_save tsc_scale vmcb_clean pausefilter pfthreshold v_vmsave_vmload vgif avx512vbmi umip pku ospke avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq la57 rdpid fsrm flush_l1d arch_capabilities\nVirtualization: AMD-V\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 1.3 MiB (40 instances)\nL1i cache: 1.3 MiB (40 instances)\nL2 cache: 40 MiB (40 instances)\nL3 cache: 160 MiB (5 instances)\nNUMA node(s): 2\nNUMA node0 CPU(s): 0-39\nNUMA node1 CPU(s): 40-79\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Not affected\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Not affected\nVulnerability Retbleed: Not affected\nVulnerability Spec rstack overflow: Mitigation; safe RET\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Retpolines, IBPB conditional, IBRS_FW, STIBP always-on, RSB filling, PBRSB-eIBRS Not affected\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Not affected\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.2.2\n[pip3] torchaudio==2.2.2\n[pip3] torchvision==0.17.2\n[pip3] triton==2.2.0\n[conda] numpy 1.26.4 pypi_0 pypi\n[conda] torch 2.2.2 pypi_0 pypi\n[conda] torchaudio 2.2.2 pypi_0 pypi\n[conda] torchvision 0.17.2 pypi_0 pypi\n[conda] triton 2.2.0 pypi_0 pypi", | |
| "transformers_version": "4.39.2", | |
| "upper_git_hash": null | |
| }, | |
| { | |
| "results": { | |
| "gsm8k": { | |
| "exact_match,strict-match": 0.4783927217589083, | |
| "exact_match_stderr,strict-match": 0.013759618667051773, | |
| "exact_match,flexible-extract": 0.5367702805155421, | |
| "exact_match_stderr,flexible-extract": 0.013735191956468648, | |
| "alias": "gsm8k" | |
| } | |
| }, | |
| "group_subtasks": { | |
| "gsm8k": [] | |
| }, | |
| "configs": { | |
| "gsm8k": { | |
| "task": "gsm8k", | |
| "group": [ | |
| "math_word_problems" | |
| ], | |
| "dataset_path": "gsm8k", | |
| "dataset_name": "main", | |
| "training_split": "train", | |
| "test_split": "test", | |
| "fewshot_split": "train", | |
| "doc_to_text": "Question: {{question}}\nAnswer:", | |
| "doc_to_target": "{{answer}}", | |
| "description": "", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "exact_match", | |
| "aggregation": "mean", | |
| "higher_is_better": true, | |
| "ignore_case": true, | |
| "ignore_punctuation": false, | |
| "regexes_to_ignore": [ | |
| ",", | |
| "\\$", | |
| "(?s).*#### ", | |
| "\\.$" | |
| ] | |
| } | |
| ], | |
| "output_type": "generate_until", | |
| "generation_kwargs": { | |
| "until": [ | |
| "Question:", | |
| "</s>", | |
| "<|im_end|>" | |
| ], | |
| "do_sample": false, | |
| "temperature": 0 | |
| }, | |
| "repeats": 1, | |
| "filter_list": [ | |
| { | |
| "name": "strict-match", | |
| "filter": [ | |
| { | |
| "function": "regex", | |
| "regex_pattern": "#### (\\-?[0-9\\.\\,]+)" | |
| }, | |
| { | |
| "function": "take_first" | |
| } | |
| ] | |
| }, | |
| { | |
| "name": "flexible-extract", | |
| "filter": [ | |
| { | |
| "function": "regex", | |
| "group_select": -1, | |
| "regex_pattern": "(-?[$0-9.,]{2,})|(-?[0-9]+)" | |
| }, | |
| { | |
| "function": "take_first" | |
| } | |
| ] | |
| } | |
| ], | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 3 | |
| } | |
| } | |
| }, | |
| "versions": { | |
| "gsm8k": 3 | |
| }, | |
| "n-shot": { | |
| "gsm8k": 5 | |
| }, | |
| "config": { | |
| "model": "hf", | |
| "model_args": "pretrained=cognitivecomputations/dolphin-2.8-mistral-7b-v02,dtype=auto", | |
| "batch_size": "8", | |
| "batch_sizes": [], | |
| "device": "cuda:0", | |
| "use_cache": null, | |
| "limit": null, | |
| "bootstrap_iters": 100000, | |
| "gen_kwargs": null | |
| }, | |
| "git_hash": "ab7cc6b1", | |
| "date": 1711781684.2771027, | |
| "pretty_env_info": "PyTorch version: 2.2.2+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.11.8 (main, Feb 26 2024, 21:39:34) [GCC 11.2.0] (64-bit runtime)\nPython platform: Linux-5.15.0-101-generic-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L40S\nGPU 1: NVIDIA L40S\nGPU 2: NVIDIA L40S\nGPU 3: NVIDIA L40S\nGPU 4: NVIDIA L40S\nGPU 5: NVIDIA L40S\nGPU 6: NVIDIA L40S\nGPU 7: NVIDIA L40S\nGPU 8: NVIDIA L40S\nGPU 9: NVIDIA L40S\n\nNvidia driver version: 550.54.15\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 57 bits virtual\nByte Order: Little Endian\nCPU(s): 80\nOn-line CPU(s) list: 0-79\nVendor ID: AuthenticAMD\nModel name: AMD EPYC 9254 24-Core Processor\nCPU family: 25\nModel: 17\nThread(s) per core: 2\nCore(s) per socket: 4\nSocket(s): 10\nStepping: 1\nBogoMIPS: 5800.00\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy svm cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext perfctr_core invpcid_single ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves avx512_bf16 clzero xsaveerptr wbnoinvd arat npt lbrv nrip_save tsc_scale vmcb_clean pausefilter pfthreshold v_vmsave_vmload vgif avx512vbmi umip pku ospke avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq la57 rdpid fsrm flush_l1d arch_capabilities\nVirtualization: AMD-V\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 1.3 MiB (40 instances)\nL1i cache: 1.3 MiB (40 instances)\nL2 cache: 40 MiB (40 instances)\nL3 cache: 160 MiB (5 instances)\nNUMA node(s): 2\nNUMA node0 CPU(s): 0-39\nNUMA node1 CPU(s): 40-79\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Not affected\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Not affected\nVulnerability Retbleed: Not affected\nVulnerability Spec rstack overflow: Mitigation; safe RET\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Retpolines, IBPB conditional, IBRS_FW, STIBP always-on, RSB filling, PBRSB-eIBRS Not affected\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Not affected\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.2.2\n[pip3] torchaudio==2.2.2\n[pip3] torchvision==0.17.2\n[pip3] triton==2.2.0\n[conda] numpy 1.26.4 pypi_0 pypi\n[conda] torch 2.2.2 pypi_0 pypi\n[conda] torchaudio 2.2.2 pypi_0 pypi\n[conda] torchvision 0.17.2 pypi_0 pypi\n[conda] triton 2.2.0 pypi_0 pypi", | |
| "transformers_version": "4.39.2", | |
| "upper_git_hash": null | |
| }, | |
| { | |
| "results": { | |
| "hellaswag": { | |
| "acc,none": 0.6389165504879506, | |
| "acc_stderr,none": 0.004793330525656218, | |
| "acc_norm,none": 0.8338976299541924, | |
| "acc_norm_stderr,none": 0.00371411888431746, | |
| "alias": "hellaswag" | |
| } | |
| }, | |
| "group_subtasks": { | |
| "hellaswag": [] | |
| }, | |
| "configs": { | |
| "hellaswag": { | |
| "task": "hellaswag", | |
| "group": [ | |
| "multiple_choice" | |
| ], | |
| "dataset_path": "hellaswag", | |
| "training_split": "train", | |
| "validation_split": "validation", | |
| "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n", | |
| "doc_to_text": "{{query}}", | |
| "doc_to_target": "{{label}}", | |
| "doc_to_choice": "choices", | |
| "description": "", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "num_fewshot": 10, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| }, | |
| { | |
| "metric": "acc_norm", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 1 | |
| } | |
| } | |
| }, | |
| "versions": { | |
| "hellaswag": 1 | |
| }, | |
| "n-shot": { | |
| "hellaswag": 10 | |
| }, | |
| "config": { | |
| "model": "hf", | |
| "model_args": "pretrained=cognitivecomputations/dolphin-2.8-mistral-7b-v02,dtype=auto", | |
| "batch_size": "8", | |
| "batch_sizes": [], | |
| "device": "cuda:0", | |
| "use_cache": null, | |
| "limit": null, | |
| "bootstrap_iters": 100000, | |
| "gen_kwargs": null | |
| }, | |
| "git_hash": "ab7cc6b1", | |
| "date": 1711783556.4633062, | |
| "pretty_env_info": "PyTorch version: 2.2.2+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.11.8 (main, Feb 26 2024, 21:39:34) [GCC 11.2.0] (64-bit runtime)\nPython platform: Linux-5.15.0-101-generic-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L40S\nGPU 1: NVIDIA L40S\nGPU 2: NVIDIA L40S\nGPU 3: NVIDIA L40S\nGPU 4: NVIDIA L40S\nGPU 5: NVIDIA L40S\nGPU 6: NVIDIA L40S\nGPU 7: NVIDIA L40S\nGPU 8: NVIDIA L40S\nGPU 9: NVIDIA L40S\n\nNvidia driver version: 550.54.15\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 57 bits virtual\nByte Order: Little Endian\nCPU(s): 80\nOn-line CPU(s) list: 0-79\nVendor ID: AuthenticAMD\nModel name: AMD EPYC 9254 24-Core Processor\nCPU family: 25\nModel: 17\nThread(s) per core: 2\nCore(s) per socket: 4\nSocket(s): 10\nStepping: 1\nBogoMIPS: 5800.00\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy svm cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext perfctr_core invpcid_single ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves avx512_bf16 clzero xsaveerptr wbnoinvd arat npt lbrv nrip_save tsc_scale vmcb_clean pausefilter pfthreshold v_vmsave_vmload vgif avx512vbmi umip pku ospke avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq la57 rdpid fsrm flush_l1d arch_capabilities\nVirtualization: AMD-V\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 1.3 MiB (40 instances)\nL1i cache: 1.3 MiB (40 instances)\nL2 cache: 40 MiB (40 instances)\nL3 cache: 160 MiB (5 instances)\nNUMA node(s): 2\nNUMA node0 CPU(s): 0-39\nNUMA node1 CPU(s): 40-79\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Not affected\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Not affected\nVulnerability Retbleed: Not affected\nVulnerability Spec rstack overflow: Mitigation; safe RET\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Retpolines, IBPB conditional, IBRS_FW, STIBP always-on, RSB filling, PBRSB-eIBRS Not affected\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Not affected\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.2.2\n[pip3] torchaudio==2.2.2\n[pip3] torchvision==0.17.2\n[pip3] triton==2.2.0\n[conda] numpy 1.26.4 pypi_0 pypi\n[conda] torch 2.2.2 pypi_0 pypi\n[conda] torchaudio 2.2.2 pypi_0 pypi\n[conda] torchvision 0.17.2 pypi_0 pypi\n[conda] triton 2.2.0 pypi_0 pypi", | |
| "transformers_version": "4.39.2", | |
| "upper_git_hash": null | |
| }, | |
| { | |
| "results": { | |
| "mmlu": { | |
| "acc,none": 0.6122347243982339, | |
| "acc_stderr,none": 0.003893774654142997, | |
| "alias": "mmlu" | |
| }, | |
| "mmlu_humanities": { | |
| "alias": " - humanities", | |
| "acc,none": 0.5713071200850159, | |
| "acc_stderr,none": 0.0068534756048584345 | |
| }, | |
| "mmlu_formal_logic": { | |
| "alias": " - formal_logic", | |
| "acc,none": 0.42063492063492064, | |
| "acc_stderr,none": 0.04415438226743744 | |
| }, | |
| "mmlu_high_school_european_history": { | |
| "alias": " - high_school_european_history", | |
| "acc,none": 0.7757575757575758, | |
| "acc_stderr,none": 0.032568666616811015 | |
| }, | |
| "mmlu_high_school_us_history": { | |
| "alias": " - high_school_us_history", | |
| "acc,none": 0.7745098039215687, | |
| "acc_stderr,none": 0.029331162294251728 | |
| }, | |
| "mmlu_high_school_world_history": { | |
| "alias": " - high_school_world_history", | |
| "acc,none": 0.7848101265822784, | |
| "acc_stderr,none": 0.02675082699467616 | |
| }, | |
| "mmlu_international_law": { | |
| "alias": " - international_law", | |
| "acc,none": 0.743801652892562, | |
| "acc_stderr,none": 0.03984979653302872 | |
| }, | |
| "mmlu_jurisprudence": { | |
| "alias": " - jurisprudence", | |
| "acc,none": 0.7962962962962963, | |
| "acc_stderr,none": 0.03893542518824846 | |
| }, | |
| "mmlu_logical_fallacies": { | |
| "alias": " - logical_fallacies", | |
| "acc,none": 0.7423312883435583, | |
| "acc_stderr,none": 0.03436150827846917 | |
| }, | |
| "mmlu_moral_disputes": { | |
| "alias": " - moral_disputes", | |
| "acc,none": 0.6936416184971098, | |
| "acc_stderr,none": 0.024818350129436593 | |
| }, | |
| "mmlu_moral_scenarios": { | |
| "alias": " - moral_scenarios", | |
| "acc,none": 0.3877094972067039, | |
| "acc_stderr,none": 0.016295332328155814 | |
| }, | |
| "mmlu_philosophy": { | |
| "alias": " - philosophy", | |
| "acc,none": 0.6945337620578779, | |
| "acc_stderr,none": 0.026160584450140453 | |
| }, | |
| "mmlu_prehistory": { | |
| "alias": " - prehistory", | |
| "acc,none": 0.6697530864197531, | |
| "acc_stderr,none": 0.026168298456732846 | |
| }, | |
| "mmlu_professional_law": { | |
| "alias": " - professional_law", | |
| "acc,none": 0.4602346805736636, | |
| "acc_stderr,none": 0.012729785386598563 | |
| }, | |
| "mmlu_world_religions": { | |
| "alias": " - world_religions", | |
| "acc,none": 0.8187134502923976, | |
| "acc_stderr,none": 0.029547741687640038 | |
| }, | |
| "mmlu_other": { | |
| "alias": " - other", | |
| "acc,none": 0.6765368522690698, | |
| "acc_stderr,none": 0.008076844905366993 | |
| }, | |
| "mmlu_business_ethics": { | |
| "alias": " - business_ethics", | |
| "acc,none": 0.6, | |
| "acc_stderr,none": 0.049236596391733084 | |
| }, | |
| "mmlu_clinical_knowledge": { | |
| "alias": " - clinical_knowledge", | |
| "acc,none": 0.6490566037735849, | |
| "acc_stderr,none": 0.02937364625323469 | |
| }, | |
| "mmlu_college_medicine": { | |
| "alias": " - college_medicine", | |
| "acc,none": 0.6127167630057804, | |
| "acc_stderr,none": 0.03714325906302065 | |
| }, | |
| "mmlu_global_facts": { | |
| "alias": " - global_facts", | |
| "acc,none": 0.35, | |
| "acc_stderr,none": 0.047937248544110196 | |
| }, | |
| "mmlu_human_aging": { | |
| "alias": " - human_aging", | |
| "acc,none": 0.6636771300448431, | |
| "acc_stderr,none": 0.031708824268455 | |
| }, | |
| "mmlu_management": { | |
| "alias": " - management", | |
| "acc,none": 0.7864077669902912, | |
| "acc_stderr,none": 0.040580420156460344 | |
| }, | |
| "mmlu_marketing": { | |
| "alias": " - marketing", | |
| "acc,none": 0.8846153846153846, | |
| "acc_stderr,none": 0.020930193185179323 | |
| }, | |
| "mmlu_medical_genetics": { | |
| "alias": " - medical_genetics", | |
| "acc,none": 0.7, | |
| "acc_stderr,none": 0.046056618647183814 | |
| }, | |
| "mmlu_miscellaneous": { | |
| "alias": " - miscellaneous", | |
| "acc,none": 0.7956577266922095, | |
| "acc_stderr,none": 0.0144191239809319 | |
| }, | |
| "mmlu_nutrition": { | |
| "alias": " - nutrition", | |
| "acc,none": 0.7091503267973857, | |
| "acc_stderr,none": 0.02600480036395213 | |
| }, | |
| "mmlu_professional_accounting": { | |
| "alias": " - professional_accounting", | |
| "acc,none": 0.46808510638297873, | |
| "acc_stderr,none": 0.029766675075873866 | |
| }, | |
| "mmlu_professional_medicine": { | |
| "alias": " - professional_medicine", | |
| "acc,none": 0.6066176470588235, | |
| "acc_stderr,none": 0.02967428828131116 | |
| }, | |
| "mmlu_virology": { | |
| "alias": " - virology", | |
| "acc,none": 0.5180722891566265, | |
| "acc_stderr,none": 0.038899512528272166 | |
| }, | |
| "mmlu_social_sciences": { | |
| "alias": " - social_sciences", | |
| "acc,none": 0.7175820604484888, | |
| "acc_stderr,none": 0.007942844244396587 | |
| }, | |
| "mmlu_econometrics": { | |
| "alias": " - econometrics", | |
| "acc,none": 0.4473684210526316, | |
| "acc_stderr,none": 0.04677473004491199 | |
| }, | |
| "mmlu_high_school_geography": { | |
| "alias": " - high_school_geography", | |
| "acc,none": 0.7777777777777778, | |
| "acc_stderr,none": 0.02962022787479047 | |
| }, | |
| "mmlu_high_school_government_and_politics": { | |
| "alias": " - high_school_government_and_politics", | |
| "acc,none": 0.8652849740932642, | |
| "acc_stderr,none": 0.02463978909770944 | |
| }, | |
| "mmlu_high_school_macroeconomics": { | |
| "alias": " - high_school_macroeconomics", | |
| "acc,none": 0.6333333333333333, | |
| "acc_stderr,none": 0.024433016466052455 | |
| }, | |
| "mmlu_high_school_microeconomics": { | |
| "alias": " - high_school_microeconomics", | |
| "acc,none": 0.6302521008403361, | |
| "acc_stderr,none": 0.03135709599613591 | |
| }, | |
| "mmlu_high_school_psychology": { | |
| "alias": " - high_school_psychology", | |
| "acc,none": 0.8036697247706422, | |
| "acc_stderr,none": 0.017030719339154354 | |
| }, | |
| "mmlu_human_sexuality": { | |
| "alias": " - human_sexuality", | |
| "acc,none": 0.7633587786259542, | |
| "acc_stderr,none": 0.03727673575596914 | |
| }, | |
| "mmlu_professional_psychology": { | |
| "alias": " - professional_psychology", | |
| "acc,none": 0.6519607843137255, | |
| "acc_stderr,none": 0.019270998708223974 | |
| }, | |
| "mmlu_public_relations": { | |
| "alias": " - public_relations", | |
| "acc,none": 0.6818181818181818, | |
| "acc_stderr,none": 0.04461272175910508 | |
| }, | |
| "mmlu_security_studies": { | |
| "alias": " - security_studies", | |
| "acc,none": 0.726530612244898, | |
| "acc_stderr,none": 0.028535560337128445 | |
| }, | |
| "mmlu_sociology": { | |
| "alias": " - sociology", | |
| "acc,none": 0.8208955223880597, | |
| "acc_stderr,none": 0.027113286753111837 | |
| }, | |
| "mmlu_us_foreign_policy": { | |
| "alias": " - us_foreign_policy", | |
| "acc,none": 0.84, | |
| "acc_stderr,none": 0.03684529491774709 | |
| }, | |
| "mmlu_stem": { | |
| "alias": " - stem", | |
| "acc,none": 0.5071360608943863, | |
| "acc_stderr,none": 0.008525934831521783 | |
| }, | |
| "mmlu_abstract_algebra": { | |
| "alias": " - abstract_algebra", | |
| "acc,none": 0.36, | |
| "acc_stderr,none": 0.048241815132442176 | |
| }, | |
| "mmlu_anatomy": { | |
| "alias": " - anatomy", | |
| "acc,none": 0.6074074074074074, | |
| "acc_stderr,none": 0.04218506215368879 | |
| }, | |
| "mmlu_astronomy": { | |
| "alias": " - astronomy", | |
| "acc,none": 0.6578947368421053, | |
| "acc_stderr,none": 0.03860731599316091 | |
| }, | |
| "mmlu_college_biology": { | |
| "alias": " - college_biology", | |
| "acc,none": 0.7083333333333334, | |
| "acc_stderr,none": 0.03800968060554858 | |
| }, | |
| "mmlu_college_chemistry": { | |
| "alias": " - college_chemistry", | |
| "acc,none": 0.43, | |
| "acc_stderr,none": 0.049756985195624284 | |
| }, | |
| "mmlu_college_computer_science": { | |
| "alias": " - college_computer_science", | |
| "acc,none": 0.45, | |
| "acc_stderr,none": 0.05 | |
| }, | |
| "mmlu_college_mathematics": { | |
| "alias": " - college_mathematics", | |
| "acc,none": 0.36, | |
| "acc_stderr,none": 0.048241815132442176 | |
| }, | |
| "mmlu_college_physics": { | |
| "alias": " - college_physics", | |
| "acc,none": 0.3431372549019608, | |
| "acc_stderr,none": 0.04724007352383888 | |
| }, | |
| "mmlu_computer_security": { | |
| "alias": " - computer_security", | |
| "acc,none": 0.74, | |
| "acc_stderr,none": 0.0440844002276808 | |
| }, | |
| "mmlu_conceptual_physics": { | |
| "alias": " - conceptual_physics", | |
| "acc,none": 0.5531914893617021, | |
| "acc_stderr,none": 0.0325005368436584 | |
| }, | |
| "mmlu_electrical_engineering": { | |
| "alias": " - electrical_engineering", | |
| "acc,none": 0.5379310344827586, | |
| "acc_stderr,none": 0.04154659671707548 | |
| }, | |
| "mmlu_elementary_mathematics": { | |
| "alias": " - elementary_mathematics", | |
| "acc,none": 0.40476190476190477, | |
| "acc_stderr,none": 0.025279850397404907 | |
| }, | |
| "mmlu_high_school_biology": { | |
| "alias": " - high_school_biology", | |
| "acc,none": 0.7612903225806451, | |
| "acc_stderr,none": 0.024251071262208837 | |
| }, | |
| "mmlu_high_school_chemistry": { | |
| "alias": " - high_school_chemistry", | |
| "acc,none": 0.458128078817734, | |
| "acc_stderr,none": 0.03505630140785742 | |
| }, | |
| "mmlu_high_school_computer_science": { | |
| "alias": " - high_school_computer_science", | |
| "acc,none": 0.7, | |
| "acc_stderr,none": 0.046056618647183814 | |
| }, | |
| "mmlu_high_school_mathematics": { | |
| "alias": " - high_school_mathematics", | |
| "acc,none": 0.3111111111111111, | |
| "acc_stderr,none": 0.028226446749683515 | |
| }, | |
| "mmlu_high_school_physics": { | |
| "alias": " - high_school_physics", | |
| "acc,none": 0.304635761589404, | |
| "acc_stderr,none": 0.03757949922943342 | |
| }, | |
| "mmlu_high_school_statistics": { | |
| "alias": " - high_school_statistics", | |
| "acc,none": 0.4722222222222222, | |
| "acc_stderr,none": 0.0340470532865388 | |
| }, | |
| "mmlu_machine_learning": { | |
| "alias": " - machine_learning", | |
| "acc,none": 0.48214285714285715, | |
| "acc_stderr,none": 0.047427623612430116 | |
| } | |
| }, | |
| "groups": { | |
| "mmlu": { | |
| "acc,none": 0.6122347243982339, | |
| "acc_stderr,none": 0.003893774654142997, | |
| "alias": "mmlu" | |
| }, | |
| "mmlu_humanities": { | |
| "alias": " - humanities", | |
| "acc,none": 0.5713071200850159, | |
| "acc_stderr,none": 0.0068534756048584345 | |
| }, | |
| "mmlu_other": { | |
| "alias": " - other", | |
| "acc,none": 0.6765368522690698, | |
| "acc_stderr,none": 0.008076844905366993 | |
| }, | |
| "mmlu_social_sciences": { | |
| "alias": " - social_sciences", | |
| "acc,none": 0.7175820604484888, | |
| "acc_stderr,none": 0.007942844244396587 | |
| }, | |
| "mmlu_stem": { | |
| "alias": " - stem", | |
| "acc,none": 0.5071360608943863, | |
| "acc_stderr,none": 0.008525934831521783 | |
| } | |
| }, | |
| "group_subtasks": { | |
| "mmlu_stem": [ | |
| "mmlu_high_school_chemistry", | |
| "mmlu_machine_learning", | |
| "mmlu_high_school_biology", | |
| "mmlu_college_biology", | |
| "mmlu_astronomy", | |
| "mmlu_abstract_algebra", | |
| "mmlu_high_school_computer_science", | |
| "mmlu_electrical_engineering", | |
| "mmlu_college_mathematics", | |
| "mmlu_college_physics", | |
| "mmlu_high_school_statistics", | |
| "mmlu_high_school_physics", | |
| "mmlu_elementary_mathematics", | |
| "mmlu_high_school_mathematics", | |
| "mmlu_anatomy", | |
| "mmlu_computer_security", | |
| "mmlu_college_chemistry", | |
| "mmlu_college_computer_science", | |
| "mmlu_conceptual_physics" | |
| ], | |
| "mmlu_other": [ | |
| "mmlu_professional_medicine", | |
| "mmlu_nutrition", | |
| "mmlu_human_aging", | |
| "mmlu_clinical_knowledge", | |
| "mmlu_professional_accounting", | |
| "mmlu_marketing", | |
| "mmlu_global_facts", | |
| "mmlu_miscellaneous", | |
| "mmlu_college_medicine", | |
| "mmlu_management", | |
| "mmlu_virology", | |
| "mmlu_medical_genetics", | |
| "mmlu_business_ethics" | |
| ], | |
| "mmlu_social_sciences": [ | |
| "mmlu_us_foreign_policy", | |
| "mmlu_high_school_macroeconomics", | |
| "mmlu_high_school_geography", | |
| "mmlu_security_studies", | |
| "mmlu_professional_psychology", | |
| "mmlu_sociology", | |
| "mmlu_econometrics", | |
| "mmlu_high_school_psychology", | |
| "mmlu_high_school_microeconomics", | |
| "mmlu_human_sexuality", | |
| "mmlu_high_school_government_and_politics", | |
| "mmlu_public_relations" | |
| ], | |
| "mmlu_humanities": [ | |
| "mmlu_logical_fallacies", | |
| "mmlu_moral_scenarios", | |
| "mmlu_high_school_european_history", | |
| "mmlu_high_school_world_history", | |
| "mmlu_jurisprudence", | |
| "mmlu_formal_logic", | |
| "mmlu_prehistory", | |
| "mmlu_international_law", | |
| "mmlu_high_school_us_history", | |
| "mmlu_professional_law", | |
| "mmlu_world_religions", | |
| "mmlu_moral_disputes", | |
| "mmlu_philosophy" | |
| ], | |
| "mmlu": [ | |
| "mmlu_humanities", | |
| "mmlu_social_sciences", | |
| "mmlu_other", | |
| "mmlu_stem" | |
| ] | |
| }, | |
| "configs": { | |
| "mmlu_abstract_algebra": { | |
| "task": "mmlu_abstract_algebra", | |
| "task_alias": "abstract_algebra", | |
| "group": "mmlu_stem", | |
| "group_alias": "stem", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "abstract_algebra", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about abstract algebra.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_anatomy": { | |
| "task": "mmlu_anatomy", | |
| "task_alias": "anatomy", | |
| "group": "mmlu_stem", | |
| "group_alias": "stem", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "anatomy", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about anatomy.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_astronomy": { | |
| "task": "mmlu_astronomy", | |
| "task_alias": "astronomy", | |
| "group": "mmlu_stem", | |
| "group_alias": "stem", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "astronomy", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about astronomy.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_business_ethics": { | |
| "task": "mmlu_business_ethics", | |
| "task_alias": "business_ethics", | |
| "group": "mmlu_other", | |
| "group_alias": "other", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "business_ethics", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about business ethics.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_clinical_knowledge": { | |
| "task": "mmlu_clinical_knowledge", | |
| "task_alias": "clinical_knowledge", | |
| "group": "mmlu_other", | |
| "group_alias": "other", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "clinical_knowledge", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about clinical knowledge.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_college_biology": { | |
| "task": "mmlu_college_biology", | |
| "task_alias": "college_biology", | |
| "group": "mmlu_stem", | |
| "group_alias": "stem", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "college_biology", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about college biology.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_college_chemistry": { | |
| "task": "mmlu_college_chemistry", | |
| "task_alias": "college_chemistry", | |
| "group": "mmlu_stem", | |
| "group_alias": "stem", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "college_chemistry", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about college chemistry.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_college_computer_science": { | |
| "task": "mmlu_college_computer_science", | |
| "task_alias": "college_computer_science", | |
| "group": "mmlu_stem", | |
| "group_alias": "stem", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "college_computer_science", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about college computer science.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_college_mathematics": { | |
| "task": "mmlu_college_mathematics", | |
| "task_alias": "college_mathematics", | |
| "group": "mmlu_stem", | |
| "group_alias": "stem", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "college_mathematics", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about college mathematics.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_college_medicine": { | |
| "task": "mmlu_college_medicine", | |
| "task_alias": "college_medicine", | |
| "group": "mmlu_other", | |
| "group_alias": "other", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "college_medicine", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about college medicine.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_college_physics": { | |
| "task": "mmlu_college_physics", | |
| "task_alias": "college_physics", | |
| "group": "mmlu_stem", | |
| "group_alias": "stem", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "college_physics", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about college physics.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_computer_security": { | |
| "task": "mmlu_computer_security", | |
| "task_alias": "computer_security", | |
| "group": "mmlu_stem", | |
| "group_alias": "stem", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "computer_security", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about computer security.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_conceptual_physics": { | |
| "task": "mmlu_conceptual_physics", | |
| "task_alias": "conceptual_physics", | |
| "group": "mmlu_stem", | |
| "group_alias": "stem", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "conceptual_physics", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about conceptual physics.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_econometrics": { | |
| "task": "mmlu_econometrics", | |
| "task_alias": "econometrics", | |
| "group": "mmlu_social_sciences", | |
| "group_alias": "social_sciences", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "econometrics", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about econometrics.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_electrical_engineering": { | |
| "task": "mmlu_electrical_engineering", | |
| "task_alias": "electrical_engineering", | |
| "group": "mmlu_stem", | |
| "group_alias": "stem", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "electrical_engineering", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about electrical engineering.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_elementary_mathematics": { | |
| "task": "mmlu_elementary_mathematics", | |
| "task_alias": "elementary_mathematics", | |
| "group": "mmlu_stem", | |
| "group_alias": "stem", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "elementary_mathematics", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about elementary mathematics.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_formal_logic": { | |
| "task": "mmlu_formal_logic", | |
| "task_alias": "formal_logic", | |
| "group": "mmlu_humanities", | |
| "group_alias": "humanities", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "formal_logic", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about formal logic.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_global_facts": { | |
| "task": "mmlu_global_facts", | |
| "task_alias": "global_facts", | |
| "group": "mmlu_other", | |
| "group_alias": "other", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "global_facts", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about global facts.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_high_school_biology": { | |
| "task": "mmlu_high_school_biology", | |
| "task_alias": "high_school_biology", | |
| "group": "mmlu_stem", | |
| "group_alias": "stem", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "high_school_biology", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about high school biology.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_high_school_chemistry": { | |
| "task": "mmlu_high_school_chemistry", | |
| "task_alias": "high_school_chemistry", | |
| "group": "mmlu_stem", | |
| "group_alias": "stem", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "high_school_chemistry", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about high school chemistry.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_high_school_computer_science": { | |
| "task": "mmlu_high_school_computer_science", | |
| "task_alias": "high_school_computer_science", | |
| "group": "mmlu_stem", | |
| "group_alias": "stem", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "high_school_computer_science", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about high school computer science.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_high_school_european_history": { | |
| "task": "mmlu_high_school_european_history", | |
| "task_alias": "high_school_european_history", | |
| "group": "mmlu_humanities", | |
| "group_alias": "humanities", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "high_school_european_history", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about high school european history.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_high_school_geography": { | |
| "task": "mmlu_high_school_geography", | |
| "task_alias": "high_school_geography", | |
| "group": "mmlu_social_sciences", | |
| "group_alias": "social_sciences", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "high_school_geography", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about high school geography.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_high_school_government_and_politics": { | |
| "task": "mmlu_high_school_government_and_politics", | |
| "task_alias": "high_school_government_and_politics", | |
| "group": "mmlu_social_sciences", | |
| "group_alias": "social_sciences", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "high_school_government_and_politics", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about high school government and politics.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_high_school_macroeconomics": { | |
| "task": "mmlu_high_school_macroeconomics", | |
| "task_alias": "high_school_macroeconomics", | |
| "group": "mmlu_social_sciences", | |
| "group_alias": "social_sciences", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "high_school_macroeconomics", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about high school macroeconomics.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_high_school_mathematics": { | |
| "task": "mmlu_high_school_mathematics", | |
| "task_alias": "high_school_mathematics", | |
| "group": "mmlu_stem", | |
| "group_alias": "stem", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "high_school_mathematics", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about high school mathematics.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_high_school_microeconomics": { | |
| "task": "mmlu_high_school_microeconomics", | |
| "task_alias": "high_school_microeconomics", | |
| "group": "mmlu_social_sciences", | |
| "group_alias": "social_sciences", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "high_school_microeconomics", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about high school microeconomics.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_high_school_physics": { | |
| "task": "mmlu_high_school_physics", | |
| "task_alias": "high_school_physics", | |
| "group": "mmlu_stem", | |
| "group_alias": "stem", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "high_school_physics", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about high school physics.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_high_school_psychology": { | |
| "task": "mmlu_high_school_psychology", | |
| "task_alias": "high_school_psychology", | |
| "group": "mmlu_social_sciences", | |
| "group_alias": "social_sciences", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "high_school_psychology", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about high school psychology.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_high_school_statistics": { | |
| "task": "mmlu_high_school_statistics", | |
| "task_alias": "high_school_statistics", | |
| "group": "mmlu_stem", | |
| "group_alias": "stem", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "high_school_statistics", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about high school statistics.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_high_school_us_history": { | |
| "task": "mmlu_high_school_us_history", | |
| "task_alias": "high_school_us_history", | |
| "group": "mmlu_humanities", | |
| "group_alias": "humanities", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "high_school_us_history", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about high school us history.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_high_school_world_history": { | |
| "task": "mmlu_high_school_world_history", | |
| "task_alias": "high_school_world_history", | |
| "group": "mmlu_humanities", | |
| "group_alias": "humanities", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "high_school_world_history", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about high school world history.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_human_aging": { | |
| "task": "mmlu_human_aging", | |
| "task_alias": "human_aging", | |
| "group": "mmlu_other", | |
| "group_alias": "other", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "human_aging", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about human aging.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_human_sexuality": { | |
| "task": "mmlu_human_sexuality", | |
| "task_alias": "human_sexuality", | |
| "group": "mmlu_social_sciences", | |
| "group_alias": "social_sciences", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "human_sexuality", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about human sexuality.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_international_law": { | |
| "task": "mmlu_international_law", | |
| "task_alias": "international_law", | |
| "group": "mmlu_humanities", | |
| "group_alias": "humanities", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "international_law", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about international law.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_jurisprudence": { | |
| "task": "mmlu_jurisprudence", | |
| "task_alias": "jurisprudence", | |
| "group": "mmlu_humanities", | |
| "group_alias": "humanities", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "jurisprudence", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about jurisprudence.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_logical_fallacies": { | |
| "task": "mmlu_logical_fallacies", | |
| "task_alias": "logical_fallacies", | |
| "group": "mmlu_humanities", | |
| "group_alias": "humanities", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "logical_fallacies", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about logical fallacies.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_machine_learning": { | |
| "task": "mmlu_machine_learning", | |
| "task_alias": "machine_learning", | |
| "group": "mmlu_stem", | |
| "group_alias": "stem", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "machine_learning", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about machine learning.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_management": { | |
| "task": "mmlu_management", | |
| "task_alias": "management", | |
| "group": "mmlu_other", | |
| "group_alias": "other", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "management", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about management.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_marketing": { | |
| "task": "mmlu_marketing", | |
| "task_alias": "marketing", | |
| "group": "mmlu_other", | |
| "group_alias": "other", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "marketing", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about marketing.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_medical_genetics": { | |
| "task": "mmlu_medical_genetics", | |
| "task_alias": "medical_genetics", | |
| "group": "mmlu_other", | |
| "group_alias": "other", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "medical_genetics", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about medical genetics.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_miscellaneous": { | |
| "task": "mmlu_miscellaneous", | |
| "task_alias": "miscellaneous", | |
| "group": "mmlu_other", | |
| "group_alias": "other", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "miscellaneous", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about miscellaneous.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_moral_disputes": { | |
| "task": "mmlu_moral_disputes", | |
| "task_alias": "moral_disputes", | |
| "group": "mmlu_humanities", | |
| "group_alias": "humanities", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "moral_disputes", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about moral disputes.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_moral_scenarios": { | |
| "task": "mmlu_moral_scenarios", | |
| "task_alias": "moral_scenarios", | |
| "group": "mmlu_humanities", | |
| "group_alias": "humanities", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "moral_scenarios", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about moral scenarios.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_nutrition": { | |
| "task": "mmlu_nutrition", | |
| "task_alias": "nutrition", | |
| "group": "mmlu_other", | |
| "group_alias": "other", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "nutrition", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about nutrition.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_philosophy": { | |
| "task": "mmlu_philosophy", | |
| "task_alias": "philosophy", | |
| "group": "mmlu_humanities", | |
| "group_alias": "humanities", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "philosophy", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about philosophy.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_prehistory": { | |
| "task": "mmlu_prehistory", | |
| "task_alias": "prehistory", | |
| "group": "mmlu_humanities", | |
| "group_alias": "humanities", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "prehistory", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about prehistory.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_professional_accounting": { | |
| "task": "mmlu_professional_accounting", | |
| "task_alias": "professional_accounting", | |
| "group": "mmlu_other", | |
| "group_alias": "other", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "professional_accounting", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about professional accounting.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_professional_law": { | |
| "task": "mmlu_professional_law", | |
| "task_alias": "professional_law", | |
| "group": "mmlu_humanities", | |
| "group_alias": "humanities", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "professional_law", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about professional law.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_professional_medicine": { | |
| "task": "mmlu_professional_medicine", | |
| "task_alias": "professional_medicine", | |
| "group": "mmlu_other", | |
| "group_alias": "other", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "professional_medicine", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about professional medicine.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_professional_psychology": { | |
| "task": "mmlu_professional_psychology", | |
| "task_alias": "professional_psychology", | |
| "group": "mmlu_social_sciences", | |
| "group_alias": "social_sciences", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "professional_psychology", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about professional psychology.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_public_relations": { | |
| "task": "mmlu_public_relations", | |
| "task_alias": "public_relations", | |
| "group": "mmlu_social_sciences", | |
| "group_alias": "social_sciences", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "public_relations", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about public relations.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_security_studies": { | |
| "task": "mmlu_security_studies", | |
| "task_alias": "security_studies", | |
| "group": "mmlu_social_sciences", | |
| "group_alias": "social_sciences", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "security_studies", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about security studies.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_sociology": { | |
| "task": "mmlu_sociology", | |
| "task_alias": "sociology", | |
| "group": "mmlu_social_sciences", | |
| "group_alias": "social_sciences", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "sociology", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about sociology.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_us_foreign_policy": { | |
| "task": "mmlu_us_foreign_policy", | |
| "task_alias": "us_foreign_policy", | |
| "group": "mmlu_social_sciences", | |
| "group_alias": "social_sciences", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "us_foreign_policy", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about us foreign policy.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_virology": { | |
| "task": "mmlu_virology", | |
| "task_alias": "virology", | |
| "group": "mmlu_other", | |
| "group_alias": "other", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "virology", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about virology.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| }, | |
| "mmlu_world_religions": { | |
| "task": "mmlu_world_religions", | |
| "task_alias": "world_religions", | |
| "group": "mmlu_humanities", | |
| "group_alias": "humanities", | |
| "dataset_path": "hails/mmlu_no_train", | |
| "dataset_name": "world_religions", | |
| "test_split": "test", | |
| "fewshot_split": "dev", | |
| "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:", | |
| "doc_to_target": "answer", | |
| "doc_to_choice": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "description": "The following are multiple choice questions (with answers) about world religions.\n\n", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "fewshot_config": { | |
| "sampler": "first_n" | |
| }, | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": false, | |
| "metadata": { | |
| "version": 0 | |
| } | |
| } | |
| }, | |
| "versions": { | |
| "mmlu_abstract_algebra": 0, | |
| "mmlu_anatomy": 0, | |
| "mmlu_astronomy": 0, | |
| "mmlu_business_ethics": 0, | |
| "mmlu_clinical_knowledge": 0, | |
| "mmlu_college_biology": 0, | |
| "mmlu_college_chemistry": 0, | |
| "mmlu_college_computer_science": 0, | |
| "mmlu_college_mathematics": 0, | |
| "mmlu_college_medicine": 0, | |
| "mmlu_college_physics": 0, | |
| "mmlu_computer_security": 0, | |
| "mmlu_conceptual_physics": 0, | |
| "mmlu_econometrics": 0, | |
| "mmlu_electrical_engineering": 0, | |
| "mmlu_elementary_mathematics": 0, | |
| "mmlu_formal_logic": 0, | |
| "mmlu_global_facts": 0, | |
| "mmlu_high_school_biology": 0, | |
| "mmlu_high_school_chemistry": 0, | |
| "mmlu_high_school_computer_science": 0, | |
| "mmlu_high_school_european_history": 0, | |
| "mmlu_high_school_geography": 0, | |
| "mmlu_high_school_government_and_politics": 0, | |
| "mmlu_high_school_macroeconomics": 0, | |
| "mmlu_high_school_mathematics": 0, | |
| "mmlu_high_school_microeconomics": 0, | |
| "mmlu_high_school_physics": 0, | |
| "mmlu_high_school_psychology": 0, | |
| "mmlu_high_school_statistics": 0, | |
| "mmlu_high_school_us_history": 0, | |
| "mmlu_high_school_world_history": 0, | |
| "mmlu_human_aging": 0, | |
| "mmlu_human_sexuality": 0, | |
| "mmlu_international_law": 0, | |
| "mmlu_jurisprudence": 0, | |
| "mmlu_logical_fallacies": 0, | |
| "mmlu_machine_learning": 0, | |
| "mmlu_management": 0, | |
| "mmlu_marketing": 0, | |
| "mmlu_medical_genetics": 0, | |
| "mmlu_miscellaneous": 0, | |
| "mmlu_moral_disputes": 0, | |
| "mmlu_moral_scenarios": 0, | |
| "mmlu_nutrition": 0, | |
| "mmlu_philosophy": 0, | |
| "mmlu_prehistory": 0, | |
| "mmlu_professional_accounting": 0, | |
| "mmlu_professional_law": 0, | |
| "mmlu_professional_medicine": 0, | |
| "mmlu_professional_psychology": 0, | |
| "mmlu_public_relations": 0, | |
| "mmlu_security_studies": 0, | |
| "mmlu_sociology": 0, | |
| "mmlu_us_foreign_policy": 0, | |
| "mmlu_virology": 0, | |
| "mmlu_world_religions": 0 | |
| }, | |
| "n-shot": { | |
| "mmlu": 0, | |
| "mmlu_abstract_algebra": 5, | |
| "mmlu_anatomy": 5, | |
| "mmlu_astronomy": 5, | |
| "mmlu_business_ethics": 5, | |
| "mmlu_clinical_knowledge": 5, | |
| "mmlu_college_biology": 5, | |
| "mmlu_college_chemistry": 5, | |
| "mmlu_college_computer_science": 5, | |
| "mmlu_college_mathematics": 5, | |
| "mmlu_college_medicine": 5, | |
| "mmlu_college_physics": 5, | |
| "mmlu_computer_security": 5, | |
| "mmlu_conceptual_physics": 5, | |
| "mmlu_econometrics": 5, | |
| "mmlu_electrical_engineering": 5, | |
| "mmlu_elementary_mathematics": 5, | |
| "mmlu_formal_logic": 5, | |
| "mmlu_global_facts": 5, | |
| "mmlu_high_school_biology": 5, | |
| "mmlu_high_school_chemistry": 5, | |
| "mmlu_high_school_computer_science": 5, | |
| "mmlu_high_school_european_history": 5, | |
| "mmlu_high_school_geography": 5, | |
| "mmlu_high_school_government_and_politics": 5, | |
| "mmlu_high_school_macroeconomics": 5, | |
| "mmlu_high_school_mathematics": 5, | |
| "mmlu_high_school_microeconomics": 5, | |
| "mmlu_high_school_physics": 5, | |
| "mmlu_high_school_psychology": 5, | |
| "mmlu_high_school_statistics": 5, | |
| "mmlu_high_school_us_history": 5, | |
| "mmlu_high_school_world_history": 5, | |
| "mmlu_human_aging": 5, | |
| "mmlu_human_sexuality": 5, | |
| "mmlu_humanities": 5, | |
| "mmlu_international_law": 5, | |
| "mmlu_jurisprudence": 5, | |
| "mmlu_logical_fallacies": 5, | |
| "mmlu_machine_learning": 5, | |
| "mmlu_management": 5, | |
| "mmlu_marketing": 5, | |
| "mmlu_medical_genetics": 5, | |
| "mmlu_miscellaneous": 5, | |
| "mmlu_moral_disputes": 5, | |
| "mmlu_moral_scenarios": 5, | |
| "mmlu_nutrition": 5, | |
| "mmlu_other": 5, | |
| "mmlu_philosophy": 5, | |
| "mmlu_prehistory": 5, | |
| "mmlu_professional_accounting": 5, | |
| "mmlu_professional_law": 5, | |
| "mmlu_professional_medicine": 5, | |
| "mmlu_professional_psychology": 5, | |
| "mmlu_public_relations": 5, | |
| "mmlu_security_studies": 5, | |
| "mmlu_social_sciences": 5, | |
| "mmlu_sociology": 5, | |
| "mmlu_stem": 5, | |
| "mmlu_us_foreign_policy": 5, | |
| "mmlu_virology": 5, | |
| "mmlu_world_religions": 5 | |
| }, | |
| "config": { | |
| "model": "hf", | |
| "model_args": "pretrained=cognitivecomputations/dolphin-2.8-mistral-7b-v02,dtype=auto", | |
| "batch_size": "8", | |
| "batch_sizes": [], | |
| "device": "cuda:0", | |
| "use_cache": null, | |
| "limit": null, | |
| "bootstrap_iters": 100000, | |
| "gen_kwargs": null | |
| }, | |
| "git_hash": "ab7cc6b1", | |
| "date": 1711788028.1817935, | |
| "pretty_env_info": "PyTorch version: 2.2.2+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.11.8 (main, Feb 26 2024, 21:39:34) [GCC 11.2.0] (64-bit runtime)\nPython platform: Linux-5.15.0-101-generic-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L40S\nGPU 1: NVIDIA L40S\nGPU 2: NVIDIA L40S\nGPU 3: NVIDIA L40S\nGPU 4: NVIDIA L40S\nGPU 5: NVIDIA L40S\nGPU 6: NVIDIA L40S\nGPU 7: NVIDIA L40S\nGPU 8: NVIDIA L40S\nGPU 9: NVIDIA L40S\n\nNvidia driver version: 550.54.15\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 57 bits virtual\nByte Order: Little Endian\nCPU(s): 80\nOn-line CPU(s) list: 0-79\nVendor ID: AuthenticAMD\nModel name: AMD EPYC 9254 24-Core Processor\nCPU family: 25\nModel: 17\nThread(s) per core: 2\nCore(s) per socket: 4\nSocket(s): 10\nStepping: 1\nBogoMIPS: 5800.00\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy svm cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext perfctr_core invpcid_single ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves avx512_bf16 clzero xsaveerptr wbnoinvd arat npt lbrv nrip_save tsc_scale vmcb_clean pausefilter pfthreshold v_vmsave_vmload vgif avx512vbmi umip pku ospke avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq la57 rdpid fsrm flush_l1d arch_capabilities\nVirtualization: AMD-V\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 1.3 MiB (40 instances)\nL1i cache: 1.3 MiB (40 instances)\nL2 cache: 40 MiB (40 instances)\nL3 cache: 160 MiB (5 instances)\nNUMA node(s): 2\nNUMA node0 CPU(s): 0-39\nNUMA node1 CPU(s): 40-79\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Not affected\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Not affected\nVulnerability Retbleed: Not affected\nVulnerability Spec rstack overflow: Mitigation; safe RET\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Retpolines, IBPB conditional, IBRS_FW, STIBP always-on, RSB filling, PBRSB-eIBRS Not affected\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Not affected\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.2.2\n[pip3] torchaudio==2.2.2\n[pip3] torchvision==0.17.2\n[pip3] triton==2.2.0\n[conda] numpy 1.26.4 pypi_0 pypi\n[conda] torch 2.2.2 pypi_0 pypi\n[conda] torchaudio 2.2.2 pypi_0 pypi\n[conda] torchvision 0.17.2 pypi_0 pypi\n[conda] triton 2.2.0 pypi_0 pypi", | |
| "transformers_version": "4.39.2", | |
| "upper_git_hash": null | |
| }, | |
| { | |
| "results": { | |
| "truthfulqa": { | |
| "rouge2_diff,none": 8.67940525480841, | |
| "rouge2_diff_stderr,none": 1.4198080901104466, | |
| "rougeL_acc,none": 0.5006119951040392, | |
| "rougeL_acc_stderr,none": 0.01750348793889251, | |
| "bleu_diff,none": 6.113558118723995, | |
| "bleu_diff_stderr,none": 0.9835911873788621, | |
| "bleu_max,none": 31.58553386517857, | |
| "bleu_max_stderr,none": 0.8624877973356571, | |
| "rougeL_diff,none": 8.01633158328263, | |
| "rougeL_diff_stderr,none": 1.3219926011549958, | |
| "rouge1_acc,none": 0.5201958384332925, | |
| "rouge1_acc_stderr,none": 0.017489216849737043, | |
| "rouge1_max,none": 58.02723518350246, | |
| "rouge1_max_stderr,none": 0.888784126120325, | |
| "rouge2_max,none": 44.60344255474271, | |
| "rouge2_max_stderr,none": 1.0820687663065565, | |
| "acc,none": 0.4345242323941256, | |
| "acc_stderr,none": 0.011190008265641689, | |
| "rouge1_diff,none": 8.368475511958406, | |
| "rouge1_diff_stderr,none": 1.3027365232348227, | |
| "rougeL_max,none": 54.8884498682059, | |
| "rougeL_max_stderr,none": 0.9253643022959754, | |
| "bleu_acc,none": 0.5091799265605875, | |
| "bleu_acc_stderr,none": 0.017500550724819743, | |
| "rouge2_acc,none": 0.4638922888616891, | |
| "rouge2_acc_stderr,none": 0.017457800422268615, | |
| "alias": "truthfulqa" | |
| }, | |
| "truthfulqa_gen": { | |
| "bleu_max,none": 31.58553386517857, | |
| "bleu_max_stderr,none": 0.862487797335657, | |
| "bleu_acc,none": 0.5091799265605875, | |
| "bleu_acc_stderr,none": 0.017500550724819743, | |
| "bleu_diff,none": 6.113558118723995, | |
| "bleu_diff_stderr,none": 0.9835911873788621, | |
| "rouge1_max,none": 58.02723518350246, | |
| "rouge1_max_stderr,none": 0.888784126120325, | |
| "rouge1_acc,none": 0.5201958384332925, | |
| "rouge1_acc_stderr,none": 0.01748921684973704, | |
| "rouge1_diff,none": 8.368475511958406, | |
| "rouge1_diff_stderr,none": 1.3027365232348225, | |
| "rouge2_max,none": 44.60344255474271, | |
| "rouge2_max_stderr,none": 1.0820687663065565, | |
| "rouge2_acc,none": 0.4638922888616891, | |
| "rouge2_acc_stderr,none": 0.01745780042226862, | |
| "rouge2_diff,none": 8.67940525480841, | |
| "rouge2_diff_stderr,none": 1.4198080901104464, | |
| "rougeL_max,none": 54.8884498682059, | |
| "rougeL_max_stderr,none": 0.9253643022959754, | |
| "rougeL_acc,none": 0.5006119951040392, | |
| "rougeL_acc_stderr,none": 0.01750348793889251, | |
| "rougeL_diff,none": 8.01633158328263, | |
| "rougeL_diff_stderr,none": 1.3219926011549958, | |
| "alias": " - truthfulqa_gen" | |
| }, | |
| "truthfulqa_mc1": { | |
| "acc,none": 0.35006119951040393, | |
| "acc_stderr,none": 0.016697949420151025, | |
| "alias": " - truthfulqa_mc1" | |
| }, | |
| "truthfulqa_mc2": { | |
| "acc,none": 0.5189872652778472, | |
| "acc_stderr,none": 0.014901128316426086, | |
| "alias": " - truthfulqa_mc2" | |
| } | |
| }, | |
| "groups": { | |
| "truthfulqa": { | |
| "rouge2_diff,none": 8.67940525480841, | |
| "rouge2_diff_stderr,none": 1.4198080901104466, | |
| "rougeL_acc,none": 0.5006119951040392, | |
| "rougeL_acc_stderr,none": 0.01750348793889251, | |
| "bleu_diff,none": 6.113558118723995, | |
| "bleu_diff_stderr,none": 0.9835911873788621, | |
| "bleu_max,none": 31.58553386517857, | |
| "bleu_max_stderr,none": 0.8624877973356571, | |
| "rougeL_diff,none": 8.01633158328263, | |
| "rougeL_diff_stderr,none": 1.3219926011549958, | |
| "rouge1_acc,none": 0.5201958384332925, | |
| "rouge1_acc_stderr,none": 0.017489216849737043, | |
| "rouge1_max,none": 58.02723518350246, | |
| "rouge1_max_stderr,none": 0.888784126120325, | |
| "rouge2_max,none": 44.60344255474271, | |
| "rouge2_max_stderr,none": 1.0820687663065565, | |
| "acc,none": 0.4345242323941256, | |
| "acc_stderr,none": 0.011190008265641689, | |
| "rouge1_diff,none": 8.368475511958406, | |
| "rouge1_diff_stderr,none": 1.3027365232348227, | |
| "rougeL_max,none": 54.8884498682059, | |
| "rougeL_max_stderr,none": 0.9253643022959754, | |
| "bleu_acc,none": 0.5091799265605875, | |
| "bleu_acc_stderr,none": 0.017500550724819743, | |
| "rouge2_acc,none": 0.4638922888616891, | |
| "rouge2_acc_stderr,none": 0.017457800422268615, | |
| "alias": "truthfulqa" | |
| } | |
| }, | |
| "group_subtasks": { | |
| "truthfulqa": [ | |
| "truthfulqa_mc1", | |
| "truthfulqa_gen", | |
| "truthfulqa_mc2" | |
| ] | |
| }, | |
| "configs": { | |
| "truthfulqa_gen": { | |
| "task": "truthfulqa_gen", | |
| "group": [ | |
| "truthfulqa" | |
| ], | |
| "dataset_path": "truthful_qa", | |
| "dataset_name": "generation", | |
| "validation_split": "validation", | |
| "process_docs": "def process_docs_gen(dataset: datasets.Dataset) -> datasets.Dataset:\n return dataset.map(preprocess_function)\n", | |
| "doc_to_text": "{% set prompt_qa = 'Q: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: I have no comment.\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.'%}{{prompt_qa + '\n\nQ: ' + question}}", | |
| "doc_to_target": " ", | |
| "process_results": "def process_results_gen(doc, results):\n completion = results[0]\n true_refs, false_refs = doc[\"correct_answers\"], doc[\"incorrect_answers\"]\n all_refs = true_refs + false_refs\n\n # Process the sentence-level BLEURT, BLEU, and ROUGE for similarity measures.\n\n # # BLEURT\n # bleurt_scores_true = self.bleurt.compute(\n # predictions=[completion] * len(true_refs), references=true_refs\n # )[\"scores\"]\n # bleurt_scores_false = self.bleurt.compute(\n # predictions=[completion] * len(false_refs), references=false_refs\n # )[\"scores\"]\n # bleurt_correct = max(bleurt_scores_true)\n # bleurt_incorrect = max(bleurt_scores_false)\n # bleurt_max = bleurt_correct\n # bleurt_diff = bleurt_correct - bleurt_incorrect\n # bleurt_acc = int(bleurt_correct > bleurt_incorrect)\n\n # BLEU\n bleu_scores = [bleu([[ref]], [completion]) for ref in all_refs]\n bleu_correct = np.nanmax(bleu_scores[: len(true_refs)])\n bleu_incorrect = np.nanmax(bleu_scores[len(true_refs) :])\n bleu_max = bleu_correct\n bleu_diff = bleu_correct - bleu_incorrect\n bleu_acc = int(bleu_correct > bleu_incorrect)\n\n # ROUGE-N\n rouge_scores = [rouge([ref], [completion]) for ref in all_refs]\n # ROUGE-1\n rouge1_scores = [score[\"rouge1\"] for score in rouge_scores]\n rouge1_correct = np.nanmax(rouge1_scores[: len(true_refs)])\n rouge1_incorrect = np.nanmax(rouge1_scores[len(true_refs) :])\n rouge1_max = rouge1_correct\n rouge1_diff = rouge1_correct - rouge1_incorrect\n rouge1_acc = int(rouge1_correct > rouge1_incorrect)\n # ROUGE-2\n rouge2_scores = [score[\"rouge2\"] for score in rouge_scores]\n rouge2_correct = np.nanmax(rouge2_scores[: len(true_refs)])\n rouge2_incorrect = np.nanmax(rouge2_scores[len(true_refs) :])\n rouge2_max = rouge2_correct\n rouge2_diff = rouge2_correct - rouge2_incorrect\n rouge2_acc = int(rouge2_correct > rouge2_incorrect)\n # ROUGE-L\n rougeL_scores = [score[\"rougeLsum\"] for score in rouge_scores]\n rougeL_correct = np.nanmax(rougeL_scores[: len(true_refs)])\n rougeL_incorrect = np.nanmax(rougeL_scores[len(true_refs) :])\n rougeL_max = rougeL_correct\n rougeL_diff = rougeL_correct - rougeL_incorrect\n rougeL_acc = int(rougeL_correct > rougeL_incorrect)\n\n return {\n # \"bleurt_max\": bleurt_max,\n # \"bleurt_acc\": bleurt_acc,\n # \"bleurt_diff\": bleurt_diff,\n \"bleu_max\": bleu_max,\n \"bleu_acc\": bleu_acc,\n \"bleu_diff\": bleu_diff,\n \"rouge1_max\": rouge1_max,\n \"rouge1_acc\": rouge1_acc,\n \"rouge1_diff\": rouge1_diff,\n \"rouge2_max\": rouge2_max,\n \"rouge2_acc\": rouge2_acc,\n \"rouge2_diff\": rouge2_diff,\n \"rougeL_max\": rougeL_max,\n \"rougeL_acc\": rougeL_acc,\n \"rougeL_diff\": rougeL_diff,\n }\n", | |
| "description": "", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "num_fewshot": 0, | |
| "metric_list": [ | |
| { | |
| "metric": "bleu_max", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| }, | |
| { | |
| "metric": "bleu_acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| }, | |
| { | |
| "metric": "bleu_diff", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| }, | |
| { | |
| "metric": "rouge1_max", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| }, | |
| { | |
| "metric": "rouge1_acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| }, | |
| { | |
| "metric": "rouge1_diff", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| }, | |
| { | |
| "metric": "rouge2_max", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| }, | |
| { | |
| "metric": "rouge2_acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| }, | |
| { | |
| "metric": "rouge2_diff", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| }, | |
| { | |
| "metric": "rougeL_max", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| }, | |
| { | |
| "metric": "rougeL_acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| }, | |
| { | |
| "metric": "rougeL_diff", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "generate_until", | |
| "generation_kwargs": { | |
| "until": [ | |
| "\n\n" | |
| ], | |
| "do_sample": false | |
| }, | |
| "repeats": 1, | |
| "should_decontaminate": true, | |
| "doc_to_decontamination_query": "question", | |
| "metadata": { | |
| "version": 3 | |
| } | |
| }, | |
| "truthfulqa_mc1": { | |
| "task": "truthfulqa_mc1", | |
| "group": [ | |
| "truthfulqa" | |
| ], | |
| "dataset_path": "truthful_qa", | |
| "dataset_name": "multiple_choice", | |
| "validation_split": "validation", | |
| "doc_to_text": "{% set prompt_qa = 'Q: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: I have no comment.\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.'%}{{prompt_qa + '\n\nQ: ' + question + '\nA:'}}", | |
| "doc_to_target": 0, | |
| "doc_to_choice": "{{mc1_targets.choices}}", | |
| "description": "", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "num_fewshot": 0, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": true, | |
| "doc_to_decontamination_query": "question", | |
| "metadata": { | |
| "version": 2 | |
| } | |
| }, | |
| "truthfulqa_mc2": { | |
| "task": "truthfulqa_mc2", | |
| "group": [ | |
| "truthfulqa" | |
| ], | |
| "dataset_path": "truthful_qa", | |
| "dataset_name": "multiple_choice", | |
| "validation_split": "validation", | |
| "doc_to_text": "{% set prompt_qa = 'Q: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: I have no comment.\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.'%}{{prompt_qa + '\n\nQ: ' + question + '\nA:'}}", | |
| "doc_to_target": 0, | |
| "doc_to_choice": "{{mc2_targets.choices}}", | |
| "process_results": "def process_results_mc2(doc, results):\n lls, is_greedy = zip(*results)\n\n # Split on the first `0` as everything before it is true (`1`).\n split_idx = list(doc[\"mc2_targets\"][\"labels\"]).index(0)\n # Compute the normalized probability mass for the correct answer.\n ll_true, ll_false = lls[:split_idx], lls[split_idx:]\n p_true, p_false = np.exp(np.array(ll_true)), np.exp(np.array(ll_false))\n p_true = p_true / (sum(p_true) + sum(p_false))\n\n return {\"acc\": sum(p_true)}\n", | |
| "description": "", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "num_fewshot": 0, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": true, | |
| "doc_to_decontamination_query": "question", | |
| "metadata": { | |
| "version": 2 | |
| } | |
| } | |
| }, | |
| "versions": { | |
| "truthfulqa_gen": 3, | |
| "truthfulqa_mc1": 2, | |
| "truthfulqa_mc2": 2 | |
| }, | |
| "n-shot": { | |
| "truthfulqa": 0, | |
| "truthfulqa_gen": 0, | |
| "truthfulqa_mc1": 0, | |
| "truthfulqa_mc2": 0 | |
| }, | |
| "config": { | |
| "model": "hf", | |
| "model_args": "pretrained=cognitivecomputations/dolphin-2.8-mistral-7b-v02,dtype=auto", | |
| "batch_size": "8", | |
| "batch_sizes": [], | |
| "device": "cuda:0", | |
| "use_cache": null, | |
| "limit": null, | |
| "bootstrap_iters": 100000, | |
| "gen_kwargs": null | |
| }, | |
| "git_hash": "ab7cc6b1", | |
| "date": 1711780604.5790782, | |
| "pretty_env_info": "PyTorch version: 2.2.2+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.11.8 (main, Feb 26 2024, 21:39:34) [GCC 11.2.0] (64-bit runtime)\nPython platform: Linux-5.15.0-101-generic-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L40S\nGPU 1: NVIDIA L40S\nGPU 2: NVIDIA L40S\nGPU 3: NVIDIA L40S\nGPU 4: NVIDIA L40S\nGPU 5: NVIDIA L40S\nGPU 6: NVIDIA L40S\nGPU 7: NVIDIA L40S\nGPU 8: NVIDIA L40S\nGPU 9: NVIDIA L40S\n\nNvidia driver version: 550.54.15\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 57 bits virtual\nByte Order: Little Endian\nCPU(s): 80\nOn-line CPU(s) list: 0-79\nVendor ID: AuthenticAMD\nModel name: AMD EPYC 9254 24-Core Processor\nCPU family: 25\nModel: 17\nThread(s) per core: 2\nCore(s) per socket: 4\nSocket(s): 10\nStepping: 1\nBogoMIPS: 5800.00\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy svm cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext perfctr_core invpcid_single ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves avx512_bf16 clzero xsaveerptr wbnoinvd arat npt lbrv nrip_save tsc_scale vmcb_clean pausefilter pfthreshold v_vmsave_vmload vgif avx512vbmi umip pku ospke avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq la57 rdpid fsrm flush_l1d arch_capabilities\nVirtualization: AMD-V\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 1.3 MiB (40 instances)\nL1i cache: 1.3 MiB (40 instances)\nL2 cache: 40 MiB (40 instances)\nL3 cache: 160 MiB (5 instances)\nNUMA node(s): 2\nNUMA node0 CPU(s): 0-39\nNUMA node1 CPU(s): 40-79\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Not affected\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Not affected\nVulnerability Retbleed: Not affected\nVulnerability Spec rstack overflow: Mitigation; safe RET\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Retpolines, IBPB conditional, IBRS_FW, STIBP always-on, RSB filling, PBRSB-eIBRS Not affected\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Not affected\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.2.2\n[pip3] torchaudio==2.2.2\n[pip3] torchvision==0.17.2\n[pip3] triton==2.2.0\n[conda] numpy 1.26.4 pypi_0 pypi\n[conda] torch 2.2.2 pypi_0 pypi\n[conda] torchaudio 2.2.2 pypi_0 pypi\n[conda] torchvision 0.17.2 pypi_0 pypi\n[conda] triton 2.2.0 pypi_0 pypi", | |
| "transformers_version": "4.39.2", | |
| "upper_git_hash": null | |
| }, | |
| { | |
| "results": { | |
| "winogrande": { | |
| "acc,none": 0.7971586424625099, | |
| "acc_stderr,none": 0.011301439925936643, | |
| "alias": "winogrande" | |
| } | |
| }, | |
| "group_subtasks": { | |
| "winogrande": [] | |
| }, | |
| "configs": { | |
| "winogrande": { | |
| "task": "winogrande", | |
| "dataset_path": "winogrande", | |
| "dataset_name": "winogrande_xl", | |
| "training_split": "train", | |
| "validation_split": "validation", | |
| "doc_to_text": "def doc_to_text(doc):\n answer_to_num = {\"1\": 0, \"2\": 1}\n return answer_to_num[doc[\"answer\"]]\n", | |
| "doc_to_target": "def doc_to_target(doc):\n idx = doc[\"sentence\"].index(\"_\") + 1\n return doc[\"sentence\"][idx:].strip()\n", | |
| "doc_to_choice": "def doc_to_choice(doc):\n idx = doc[\"sentence\"].index(\"_\")\n options = [doc[\"option1\"], doc[\"option2\"]]\n return [doc[\"sentence\"][:idx] + opt for opt in options]\n", | |
| "description": "", | |
| "target_delimiter": " ", | |
| "fewshot_delimiter": "\n\n", | |
| "num_fewshot": 5, | |
| "metric_list": [ | |
| { | |
| "metric": "acc", | |
| "aggregation": "mean", | |
| "higher_is_better": true | |
| } | |
| ], | |
| "output_type": "multiple_choice", | |
| "repeats": 1, | |
| "should_decontaminate": true, | |
| "doc_to_decontamination_query": "sentence", | |
| "metadata": { | |
| "version": 1 | |
| } | |
| } | |
| }, | |
| "versions": { | |
| "winogrande": 1 | |
| }, | |
| "n-shot": { | |
| "winogrande": 5 | |
| }, | |
| "config": { | |
| "model": "hf", | |
| "model_args": "pretrained=cognitivecomputations/dolphin-2.8-mistral-7b-v02,dtype=auto", | |
| "batch_size": "8", | |
| "batch_sizes": [], | |
| "device": "cuda:0", | |
| "use_cache": null, | |
| "limit": null, | |
| "bootstrap_iters": 100000, | |
| "gen_kwargs": null | |
| }, | |
| "git_hash": "ab7cc6b1", | |
| "date": 1711781102.7529685, | |
| "pretty_env_info": "PyTorch version: 2.2.2+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.11.8 (main, Feb 26 2024, 21:39:34) [GCC 11.2.0] (64-bit runtime)\nPython platform: Linux-5.15.0-101-generic-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L40S\nGPU 1: NVIDIA L40S\nGPU 2: NVIDIA L40S\nGPU 3: NVIDIA L40S\nGPU 4: NVIDIA L40S\nGPU 5: NVIDIA L40S\nGPU 6: NVIDIA L40S\nGPU 7: NVIDIA L40S\nGPU 8: NVIDIA L40S\nGPU 9: NVIDIA L40S\n\nNvidia driver version: 550.54.15\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 57 bits virtual\nByte Order: Little Endian\nCPU(s): 80\nOn-line CPU(s) list: 0-79\nVendor ID: AuthenticAMD\nModel name: AMD EPYC 9254 24-Core Processor\nCPU family: 25\nModel: 17\nThread(s) per core: 2\nCore(s) per socket: 4\nSocket(s): 10\nStepping: 1\nBogoMIPS: 5800.00\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy svm cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext perfctr_core invpcid_single ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves avx512_bf16 clzero xsaveerptr wbnoinvd arat npt lbrv nrip_save tsc_scale vmcb_clean pausefilter pfthreshold v_vmsave_vmload vgif avx512vbmi umip pku ospke avx512_vbmi2 gfni vaes vpclmulqdq avx512_vnni avx512_bitalg avx512_vpopcntdq la57 rdpid fsrm flush_l1d arch_capabilities\nVirtualization: AMD-V\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 1.3 MiB (40 instances)\nL1i cache: 1.3 MiB (40 instances)\nL2 cache: 40 MiB (40 instances)\nL3 cache: 160 MiB (5 instances)\nNUMA node(s): 2\nNUMA node0 CPU(s): 0-39\nNUMA node1 CPU(s): 40-79\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Not affected\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Not affected\nVulnerability Retbleed: Not affected\nVulnerability Spec rstack overflow: Mitigation; safe RET\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Retpolines, IBPB conditional, IBRS_FW, STIBP always-on, RSB filling, PBRSB-eIBRS Not affected\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Not affected\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] torch==2.2.2\n[pip3] torchaudio==2.2.2\n[pip3] torchvision==0.17.2\n[pip3] triton==2.2.0\n[conda] numpy 1.26.4 pypi_0 pypi\n[conda] torch 2.2.2 pypi_0 pypi\n[conda] torchaudio 2.2.2 pypi_0 pypi\n[conda] torchvision 0.17.2 pypi_0 pypi\n[conda] triton 2.2.0 pypi_0 pypi", | |
| "transformers_version": "4.39.2", | |
| "upper_git_hash": null | |
| } | |
| ] | |