|
| 1 | +from opencompass.openicl.icl_prompt_template import PromptTemplate |
| 2 | +from opencompass.openicl.icl_retriever import ZeroRetriever |
| 3 | +from opencompass.openicl.icl_inferencer import GenInferencer |
| 4 | +from opencompass.openicl.icl_evaluator import CircularEvaluator, AccEvaluator |
| 5 | +from opencompass.datasets.compassbench_obj import CompassBenchObjectiveV1_3, compassbench_objective_v1_3_postprocess |
| 6 | +from opencompass.utils.text_postprocessors import first_option_postprocess |
| 7 | + |
| 8 | + |
| 9 | +prompt_cn = { |
| 10 | + 'single_choice_cn': '以下是一道单项选择题,请你根据你了解的知识给出正确的答案选项。请你一步步推理并在最后用“答案选项为X”来回答,其中X是ABCD中你认为正确的选项序号\n下面是你要回答的题目:\n{question}\n让我们一步步解决这个问题:', |
| 11 | + 'cloze_cn': '以下是一道填空题,请你根据你了解的知识一步步思考后把你的最终答案放到\\boxed{}中。\n下面是你要回答的题目:\n{question}\n让我们一步步解决这个问题:', |
| 12 | +} |
| 13 | + |
| 14 | +prompt_en = { |
| 15 | + 'single_choice_en': "Here is a single-choice question. Please give the correct answer based on your knowledge. Please reason step by step and answer with 'The answer is X' at the end, where X is the option number you think is correct.\nHere is the question you need to answer:\n{question}\nLet's solve this problem step by step:", |
| 16 | + 'cloze_en': "Here is a fill-in-the-blank question. Please think step by step based on your knowledge and put your final answer in \\boxed{}. Here is the question you need to answer:\n{question}\nLet's solve this problem step by step:", |
| 17 | +} |
| 18 | + |
| 19 | + |
| 20 | +douknow_sets = { |
| 21 | + 'knowledge': ['single_choice_cn'], |
| 22 | + 'math': ['single_choice_cn'], |
| 23 | +} |
| 24 | + |
| 25 | +# Set up the prompts |
| 26 | +CircularEval = True |
| 27 | + |
| 28 | + |
| 29 | +compassbench_aug_datasets = [] |
| 30 | + |
| 31 | +for _split in list(douknow_sets.keys()): |
| 32 | + for _name in douknow_sets[_split]: |
| 33 | + if 'cn' in _name: |
| 34 | + single_choice_prompts = prompt_cn |
| 35 | + cloze_prompts = prompt_cn |
| 36 | + else: |
| 37 | + single_choice_prompts = prompt_en |
| 38 | + cloze_prompts = prompt_en |
| 39 | + douknow_infer_cfg = dict( |
| 40 | + ice_template=dict( |
| 41 | + type=PromptTemplate, |
| 42 | + template=dict( |
| 43 | + begin='</E>', |
| 44 | + round=[ |
| 45 | + dict( |
| 46 | + role='HUMAN', |
| 47 | + prompt= single_choice_prompts[_name], |
| 48 | + ), |
| 49 | + dict(role='BOT', prompt='{answer}'),] if 'choice' in _name else cloze_prompts[_name], |
| 50 | + ), |
| 51 | + ice_token='</E>', |
| 52 | + ), |
| 53 | + retriever=dict(type=ZeroRetriever), |
| 54 | + inferencer=dict(type=GenInferencer), |
| 55 | + ) |
| 56 | + douknow_eval_cfg = dict( |
| 57 | + evaluator=dict(type=CircularEvaluator if CircularEval else AccEvaluator) if 'single_choice' in _name else dict(type=AccEvaluator), |
| 58 | + pred_postprocessor=dict(type=first_option_postprocess, options='ABCD' ) if 'single_choice' in _name else dict(type=compassbench_objective_v1_3_postprocess, name=_name)) |
| 59 | + |
| 60 | + compassbench_aug_datasets.append( |
| 61 | + dict( |
| 62 | + type=CompassBenchObjectiveV1_3, |
| 63 | + path=f'./data/compassbench_v1_3/{_split}/{_name}.jsonl', |
| 64 | + name='circular_' + _name if CircularEval else _name, |
| 65 | + abbr='compassbench-' + _split + '-' + _name + 'circular'if CircularEval else '', |
| 66 | + reader_cfg=dict( |
| 67 | + input_columns=['question'], |
| 68 | + output_column='answer' |
| 69 | + ), |
| 70 | + infer_cfg=douknow_infer_cfg, |
| 71 | + eval_cfg=douknow_eval_cfg, |
| 72 | + )) |
| 73 | + |
| 74 | +del _split, _name |
0 commit comments