|
| 1 | +import os |
| 2 | +from opencompass.openicl.icl_prompt_template import PromptTemplate |
| 3 | +from opencompass.openicl.icl_retriever import ZeroRetriever |
| 4 | +from opencompass.openicl.icl_inferencer import GenInferencer |
| 5 | +from opencompass.openicl.icl_evaluator import AccEvaluator |
| 6 | +from opencompass.datasets import BBHDataset, BBHEvaluator, bbh_mcq_postprocess, BBHEvaluator_mcq |
| 7 | + |
| 8 | +bbh_reader_cfg = dict(input_columns=['input'], output_column='target') |
| 9 | + |
| 10 | +bbh_multiple_choice_sets = [ |
| 11 | + 'temporal_sequences', |
| 12 | + 'disambiguation_qa', |
| 13 | + 'date_understanding', |
| 14 | + 'tracking_shuffled_objects_three_objects', |
| 15 | + 'penguins_in_a_table', |
| 16 | + 'geometric_shapes', |
| 17 | + 'snarks', |
| 18 | + 'ruin_names', |
| 19 | + 'tracking_shuffled_objects_seven_objects', |
| 20 | + 'tracking_shuffled_objects_five_objects', |
| 21 | + 'logical_deduction_three_objects', |
| 22 | + 'hyperbaton', |
| 23 | + 'logical_deduction_five_objects', |
| 24 | + 'logical_deduction_seven_objects', |
| 25 | + 'movie_recommendation', |
| 26 | + 'salient_translation_error_detection', |
| 27 | + 'reasoning_about_colored_objects', |
| 28 | +] |
| 29 | +bbh_free_form_sets = [ |
| 30 | + 'multistep_arithmetic_two', |
| 31 | + 'navigate', |
| 32 | + 'dyck_languages', |
| 33 | + 'word_sorting', |
| 34 | + 'sports_understanding', |
| 35 | + 'boolean_expressions', |
| 36 | + 'object_counting', |
| 37 | + 'formal_fallacies', |
| 38 | + 'causal_judgement', |
| 39 | + 'web_of_lies', |
| 40 | +] |
| 41 | + |
| 42 | +bbh_datasets = [] |
| 43 | +for _name in bbh_multiple_choice_sets: |
| 44 | + bbh_infer_cfg = dict( |
| 45 | + prompt_template=dict( |
| 46 | + type=PromptTemplate, |
| 47 | + template=dict(round=[ |
| 48 | + dict( |
| 49 | + role='HUMAN', |
| 50 | + prompt= |
| 51 | + f"Follow the given examples and answer the question.\n\nQuestion: {{input}}\n You must give your final answer by starting with 'So the answer is' " |
| 52 | + ) |
| 53 | + ])), |
| 54 | + retriever=dict(type=ZeroRetriever), |
| 55 | + inferencer=dict(type=GenInferencer, max_out_len=512)) |
| 56 | + bbh_eval_cfg = dict( |
| 57 | + evaluator=dict(type=BBHEvaluator_mcq), |
| 58 | + pred_role='BOT', |
| 59 | + pred_postprocessor=dict(type=bbh_mcq_postprocess), |
| 60 | + dataset_postprocessor=dict(type=bbh_mcq_postprocess)) |
| 61 | + |
| 62 | + bbh_datasets.append( |
| 63 | + dict( |
| 64 | + type=BBHDataset, |
| 65 | + path='opencompass/bbh', |
| 66 | + name=_name, |
| 67 | + abbr='bbh-' + _name, |
| 68 | + reader_cfg=bbh_reader_cfg, |
| 69 | + infer_cfg=bbh_infer_cfg.copy(), |
| 70 | + eval_cfg=bbh_eval_cfg.copy())) |
| 71 | + |
| 72 | +for _name in bbh_free_form_sets: |
| 73 | + |
| 74 | + bbh_infer_cfg = dict( |
| 75 | + prompt_template=dict( |
| 76 | + type=PromptTemplate, |
| 77 | + template=dict(round=[ |
| 78 | + dict( |
| 79 | + role='HUMAN', |
| 80 | + prompt= |
| 81 | + f"Follow the given examples and answer the question.\n\nQuestion: {{input}}\n You must give your final answer by starting with 'So the answer is' " |
| 82 | + ) |
| 83 | + ])), |
| 84 | + retriever=dict(type=ZeroRetriever), |
| 85 | + inferencer=dict(type=GenInferencer, max_out_len=512)) |
| 86 | + bbh_eval_cfg = dict(evaluator=dict(type=BBHEvaluator), pred_role='BOT') |
| 87 | + |
| 88 | + bbh_datasets.append( |
| 89 | + dict( |
| 90 | + type=BBHDataset, |
| 91 | + path='opencompass/bbh', |
| 92 | + name=_name, |
| 93 | + abbr='bbh-' + _name, |
| 94 | + reader_cfg=bbh_reader_cfg, |
| 95 | + infer_cfg=bbh_infer_cfg.copy(), |
| 96 | + eval_cfg=bbh_eval_cfg.copy())) |
0 commit comments