Skip to content

Commit 8127fc3

Browse files
authoredJul 23, 2024··
CompassBench subjective summarizer added (#1349)
* subjective summarizer added * fix lint
1 parent a244453 commit 8127fc3

File tree

2 files changed

+178
-6
lines changed

2 files changed

+178
-6
lines changed
 

‎configs/eval_compassbench_v1_3_subjective.py

+9-6
Original file line numberDiff line numberDiff line change
@@ -4,21 +4,24 @@
44
from .datasets.subjective.compassbench.compassbench_checklist import (
55
checklist_datasets,
66
)
7-
from opencompass.partitioners import NaivePartitioner, SizePartitioner
7+
from opencompass.partitioners import NaivePartitioner
88
from opencompass.partitioners.sub_naive import SubjectiveNaivePartitioner
99
from opencompass.runners import LocalRunner
1010
from opencompass.tasks import OpenICLInferTask
1111
from opencompass.tasks.subjective_eval import SubjectiveEvalTask
12-
# from opencompass.summarizers import SubjectiveSummarizer
12+
13+
from opencompass.summarizers.subjective.compassbench_v13 import CompassBenchSummarizer
1314
from opencompass.models import HuggingFacewithChatTemplate
1415
from opencompass.models import TurboMindModelwithChatTemplate
16+
1517
api_meta_template = dict(
1618
round=[
1719
dict(role='HUMAN', api_role='HUMAN'),
1820
dict(role='BOT', api_role='BOT', generate=True),
1921
]
2022
)
2123
models = [
24+
# Choose different engines to start the job
2225
# dict(
2326
# type=HuggingFacewithChatTemplate,
2427
# abbr="internlm2-chat-1.8b",
@@ -46,17 +49,18 @@
4649
batch_size=16,
4750
run_cfg=dict(num_gpus=1),
4851
),
52+
# Mock as gpt4o
4953
dict(
5054
type=TurboMindModelwithChatTemplate,
51-
abbr='judgellm',
55+
abbr='gpt4o',
5256
path='internlm/internlm2-chat-1_8b',
5357
engine_config=dict(session_len=7168, max_batch_size=16, tp=1),
5458
gen_config=dict(top_k=1000, temperature=1, top_p=0.9, max_new_tokens=2048),
5559
max_seq_len=7168,
5660
max_out_len=2048,
5761
batch_size=16,
5862
run_cfg=dict(num_gpus=1),
59-
)
63+
),
6064
]
6165
# -------------Inference Stage ----------------------------------------
6266
# For subjective evaluation, we often set do sample for models
@@ -79,6 +83,5 @@
7983
type=LocalRunner, max_num_workers=16, task=dict(type=SubjectiveEvalTask)
8084
),
8185
)
82-
# TODO summarizer to be implemented
83-
# summarizer = dict(type=SubjectiveSummarizer, function='subjective')
86+
summarizer = dict(type=CompassBenchSummarizer)
8487
work_dir = 'outputs/debug_checklist/'
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,169 @@
1+
# flake8: noqa
2+
# yapf: disable
3+
import csv
4+
import os
5+
import os.path as osp
6+
import re
7+
from collections import defaultdict
8+
from datetime import datetime
9+
from itertools import product
10+
11+
import numpy as np
12+
from mmengine import ConfigDict
13+
from tabulate import tabulate
14+
15+
from opencompass.partitioners.sub_naive import remove_duplicate_pairs
16+
from opencompass.utils import dataset_abbr_from_cfg, model_abbr_from_cfg
17+
18+
from .compass_arena import (check_position_bias,
19+
model_abbr_from_cfg_used_in_summarizer)
20+
from .utils import get_judgeanswer_and_reference, get_outdir
21+
22+
23+
def post_process_wildbench_pair(judgement: str):
24+
pattern = r'\"choice\": \"(.*?)\"'
25+
matched_result = re.findall(pattern, judgement)
26+
if matched_result:
27+
return matched_result[0]
28+
else:
29+
return None
30+
31+
32+
class CompassBenchSummarizer:
33+
"""Do the subjectivity analyze based on evaluation results.
34+
35+
Args:
36+
config (ConfigDict): The configuration object of the evaluation task.
37+
It's expected to be filled out at runtime.
38+
"""
39+
40+
def __init__(self, config: ConfigDict, check_pos_bias=False) -> None:
41+
self.tasks = []
42+
self.cfg = config
43+
self.base_models = self.cfg['datasets'][0]['base_models']
44+
self.compare_models = self.cfg['eval']['partitioner']['models']
45+
self.judge_models = self.cfg.get('judge_models', None)
46+
self.meta_judge_model = self.cfg.eval.partitioner.get('meta_judge_model', None)
47+
self.judge_abbr = model_abbr_from_cfg(self.cfg['judge_models'][0])
48+
self.judge_function = post_process_wildbench_pair
49+
self.check_pos_bias = check_pos_bias
50+
51+
def get_score(self, time_str):
52+
output_dir, results_folder = get_outdir(self.cfg, time_str)
53+
model_combinations = list(product(self.base_models, self.compare_models))
54+
unique_combinations = remove_duplicate_pairs([combo for combo in model_combinations if combo[0] != combo[1]])
55+
56+
if self.meta_judge_model is not None:
57+
self.judge_models.append(self.meta_judge_model)
58+
59+
scores = {}
60+
for idx, judge_model_cfg in enumerate(self.judge_models):
61+
judge_model = model_abbr_from_cfg(judge_model_cfg)
62+
for dataset in self.cfg['datasets']:
63+
dataset_abbr = dataset_abbr_from_cfg(dataset)
64+
for model_pair in unique_combinations:
65+
base_model = model_pair[0]['abbr']
66+
compare_model = model_pair[1]['abbr']
67+
if idx == len(self.judge_models):
68+
subdir = base_model + '_' + compare_model + '_summarized-by--' + judge_model
69+
else:
70+
subdir = base_model + '_' + compare_model + '_judged-by--' + judge_model
71+
subdir_path = os.path.join(results_folder, subdir)
72+
if not os.path.isdir(subdir_path):
73+
print(subdir_path + ' is not exist! please check!')
74+
continue
75+
judged_answers, references = get_judgeanswer_and_reference(dataset, subdir_path, self.judge_function)
76+
if self.check_pos_bias:
77+
bias_num = check_position_bias(judged_answers, references)
78+
else:
79+
bias_num = 0
80+
win_base_model = defaultdict(float)
81+
win_compare_model = defaultdict(float)
82+
categories = defaultdict(float)
83+
score_mapping = {'A++': 1, 'A+': 0.5, 'A=B': 0, 'B+': -0.5, 'B++': -1}
84+
for prediction, reference in zip(judged_answers, references):
85+
if prediction not in score_mapping:
86+
continue
87+
88+
categories[dataset_abbr] += 1
89+
flag = 1 if reference['answer1'] == base_model else -1
90+
score_1 = score_mapping[prediction]*flag
91+
score_2 = -score_1
92+
win_compare_model[dataset_abbr] += score_2
93+
win_base_model[dataset_abbr] += score_1
94+
95+
for capability in categories:
96+
win_base_model[capability] = win_base_model[capability] / categories[capability] * 100
97+
win_base_model[capability] = round(win_base_model[capability], 2)
98+
win_compare_model[capability] = win_compare_model[capability] / categories[capability] * 100
99+
win_compare_model[capability] = round(win_compare_model[capability], 2)
100+
101+
win_base_model['position_bias'] = bias_num
102+
win_compare_model['position_bias'] = bias_num
103+
104+
if judge_model not in scores:
105+
scores[judge_model] = {}
106+
if dataset_abbr not in scores[judge_model]:
107+
scores[judge_model][dataset_abbr] = {}
108+
scores[judge_model][dataset_abbr][base_model + '/' + compare_model] = win_compare_model
109+
110+
return scores
111+
112+
def summarize(
113+
self,
114+
time_str: str = datetime.now().strftime('%Y%m%d_%H%M%S'),
115+
):
116+
"""Summarize the subjectivity analysis based on evaluation results.
117+
118+
Args:
119+
time_str (str): Timestamp for file naming.
120+
121+
Returns:
122+
pd.DataFrame: The summary results.
123+
"""
124+
scores = self.get_score(time_str)
125+
output_dir, results_folder = get_outdir(self.cfg, time_str)
126+
for idx, judge_model in enumerate(self.judge_models):
127+
judge_abbr = model_abbr_from_cfg(judge_model)
128+
table = []
129+
for dataset in self.cfg['datasets']:
130+
dataset_abbr = dataset_abbr_from_cfg(dataset)
131+
summarizer_model_abbrs = [model_abbr_from_cfg_used_in_summarizer(i) for i in self.compare_models]
132+
one_column = list(scores[judge_abbr][dataset_abbr].values())[0]
133+
row_headers = [i for i in one_column.keys() if i not in [dataset_abbr, 'position_bias']]
134+
# row_headers = [dataset_abbr, 'position_bias'] + row_headers
135+
row_headers = [dataset_abbr] + row_headers
136+
for row_header in row_headers:
137+
row = [row_header]
138+
headers = ['']
139+
for model_cfg in self.compare_models:
140+
model_abbr = model_abbr_from_cfg(model_cfg)
141+
avg = 0
142+
for base_model_cfg in self.base_models:
143+
base_model_abbr = model_abbr_from_cfg(base_model_cfg)
144+
base_compare = base_model_abbr + '/' + model_abbr
145+
headers.append(base_compare)
146+
s = scores[judge_abbr][dataset_abbr][base_compare].get(row_header, '')
147+
if isinstance(s, float):
148+
avg += s
149+
s = f'{s:.2f}'
150+
if isinstance(s, int):
151+
s = str(s)
152+
row.append(s)
153+
# avg = avg/len(self.base_models)
154+
# row.append(f'{avg:.2f}')
155+
# headers.append('Avg')
156+
table.append(row)
157+
txt = tabulate(table, headers=headers)
158+
print(txt)
159+
160+
if idx == len(self.judge_models):
161+
output_filename = osp.join(output_dir, 'summarized-by--' + judge_abbr + '-' + '-report.csv')
162+
else:
163+
output_filename = osp.join(output_dir, 'judged-by--' + judge_abbr + '-' + '-report.csv')
164+
os.makedirs(osp.dirname(output_filename), exist_ok=True)
165+
with open(output_filename, 'w') as f:
166+
f.write(','.join(headers) + '\n')
167+
for line in table:
168+
f.write(','.join(line) + '\n')
169+
print(output_filename)

0 commit comments

Comments
 (0)
Please sign in to comment.