Skip to content

Commit 1d3a26c

Browse files
authoredJul 5, 2024··
[Doc] quick start swap tabs (#1263)
* [doc] quick start swap tabs * update docs * update * update * update * update * update * update * update
1 parent 68ca484 commit 1d3a26c

23 files changed

+515
-191
lines changed
 
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
from mmengine import read_base
2+
3+
with read_base():
4+
from ..cmmlu.cmmlu_ppl_041cbf import cmmlu_datasets
5+
6+
for d in cmmlu_datasets:
7+
d['abbr'] = 'demo_' + d['abbr']
8+
d['reader_cfg']['test_range'] = '[0:4]'
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
from mmengine import read_base
2+
3+
with read_base():
4+
from ..cmmlu.cmmlu_gen_c13365 import cmmlu_datasets
5+
6+
for d in cmmlu_datasets:
7+
d['abbr'] = 'demo_' + d['abbr']
8+
d['reader_cfg']['test_range'] = '[0:4]'
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
from mmengine import read_base
2+
3+
with read_base():
4+
from ..gsm8k.gsm8k_gen_17d0dc import gsm8k_datasets
5+
6+
gsm8k_datasets[0]['abbr'] = 'demo_' + gsm8k_datasets[0]['abbr']
7+
gsm8k_datasets[0]['reader_cfg']['test_range'] = '[0:64]'
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
from mmengine import read_base
2+
3+
with read_base():
4+
from ..gsm8k.gsm8k_gen_1d7fe4 import gsm8k_datasets
5+
6+
gsm8k_datasets[0]['abbr'] = 'demo_' + gsm8k_datasets[0]['abbr']
7+
gsm8k_datasets[0]['reader_cfg']['test_range'] = '[0:64]'
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
from mmengine import read_base
2+
3+
with read_base():
4+
from ..math.math_4shot_base_gen_db136b import math_datasets
5+
6+
math_datasets[0]['abbr'] = 'demo_' + math_datasets[0]['abbr']
7+
math_datasets[0]['reader_cfg']['test_range'] = '[0:64]'
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
from mmengine import read_base
2+
3+
with read_base():
4+
from ..math.math_0shot_gen_393424 import math_datasets
5+
6+
math_datasets[0]['abbr'] = 'demo_' + math_datasets[0]['abbr']
7+
math_datasets[0]['reader_cfg']['test_range'] = '[0:64]'

‎configs/eval_base_demo.py

+10
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
from mmengine.config import read_base
2+
3+
with read_base():
4+
from .datasets.demo.demo_gsm8k_base_gen import gsm8k_datasets
5+
from .datasets.demo.demo_math_base_gen import math_datasets
6+
from .models.qwen.hf_qwen2_1_5b import models as hf_qwen2_1_5b_models
7+
from .models.hf_internlm.hf_internlm2_1_8b import models as hf_internlm2_1_8b_models
8+
9+
datasets = gsm8k_datasets + math_datasets
10+
models = hf_qwen2_1_5b_models + hf_internlm2_1_8b_models

‎configs/eval_chat_demo.py

+10
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
from mmengine.config import read_base
2+
3+
with read_base():
4+
from .datasets.demo.demo_gsm8k_chat_gen import gsm8k_datasets
5+
from .datasets.demo.demo_math_chat_gen import math_datasets
6+
from .models.qwen.hf_qwen2_1_5b_instruct import models as hf_qwen2_1_5b_instruct_models
7+
from .models.hf_internlm.hf_internlm2_chat_1_8b import models as hf_internlm2_chat_1_8b_models
8+
9+
datasets = gsm8k_datasets + math_datasets
10+
models = hf_qwen2_1_5b_instruct_models + hf_internlm2_chat_1_8b_models

‎configs/eval_demo.py

-10
This file was deleted.

‎configs/models/hf_internlm/hf_internlm2_5_7b_chat.py

-1
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,5 @@
88
max_out_len=1024,
99
batch_size=8,
1010
run_cfg=dict(num_gpus=1),
11-
stop_words=['</s>', '<|im_end|>'],
1211
)
1312
]

‎configs/models/hf_internlm/hf_internlm2_chat_1_8b.py

-1
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,5 @@
88
max_out_len=1024,
99
batch_size=8,
1010
run_cfg=dict(num_gpus=1),
11-
stop_words=['</s>', '<|im_end|>'],
1211
)
1312
]

‎configs/models/hf_internlm/hf_internlm2_chat_1_8b_sft.py

-1
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,5 @@
88
max_out_len=1024,
99
batch_size=8,
1010
run_cfg=dict(num_gpus=1),
11-
stop_words=['</s>', '<|im_end|>'],
1211
)
1312
]

‎configs/models/hf_internlm/hf_internlm2_chat_20b.py

-1
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,5 @@
88
max_out_len=1024,
99
batch_size=8,
1010
run_cfg=dict(num_gpus=2),
11-
stop_words=['</s>', '<|im_end|>'],
1211
)
1312
]

‎configs/models/hf_internlm/hf_internlm2_chat_20b_sft.py

-1
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,5 @@
88
max_out_len=1024,
99
batch_size=8,
1010
run_cfg=dict(num_gpus=2),
11-
stop_words=['</s>', '<|im_end|>'],
1211
)
1312
]

‎configs/models/hf_internlm/hf_internlm2_chat_7b.py

-1
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,5 @@
88
max_out_len=1024,
99
batch_size=8,
1010
run_cfg=dict(num_gpus=1),
11-
stop_words=['</s>', '<|im_end|>'],
1211
)
1312
]

‎configs/models/hf_internlm/hf_internlm2_chat_7b_sft.py

-1
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,5 @@
88
max_out_len=1024,
99
batch_size=8,
1010
run_cfg=dict(num_gpus=1),
11-
stop_words=['</s>', '<|im_end|>'],
1211
)
1312
]

‎configs/models/hf_internlm/hf_internlm2_chat_math_20b_with_system.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,9 @@
33

44
_meta_template = dict(
55
round=[
6-
dict(role='HUMAN', begin='[UNUSED_TOKEN_146]user\n', end='[UNUSED_TOKEN_145]\n'),
7-
dict(role='SYSTEM', begin='[UNUSED_TOKEN_146]system\n', end='[UNUSED_TOKEN_145]\n'),
8-
dict(role='BOT', begin='[UNUSED_TOKEN_146]assistant\n', end='[UNUSED_TOKEN_145]\n', generate=True),
6+
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
7+
dict(role='SYSTEM', begin='<|im_start|>system\n', end='<|im_end|>\n'),
8+
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
99
],
1010
)
1111

@@ -30,6 +30,6 @@
3030
batch_size=8,
3131
meta_template=_meta_template,
3232
run_cfg=dict(num_gpus=2, num_procs=1),
33-
end_str='[UNUSED_TOKEN_145]',
33+
end_str='<|im_end|>',
3434
)
3535
]

‎configs/models/hf_internlm/hf_internlm2_chat_math_7b_with_system.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,9 @@
33

44
_meta_template = dict(
55
round=[
6-
dict(role='HUMAN', begin='[UNUSED_TOKEN_146]user\n', end='[UNUSED_TOKEN_145]\n'),
7-
dict(role='SYSTEM', begin='[UNUSED_TOKEN_146]system\n', end='[UNUSED_TOKEN_145]\n'),
8-
dict(role='BOT', begin='[UNUSED_TOKEN_146]assistant\n', end='[UNUSED_TOKEN_145]\n', generate=True),
6+
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
7+
dict(role='SYSTEM', begin='<|im_start|>system\n', end='<|im_end|>\n'),
8+
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
99
],
1010
)
1111

@@ -30,6 +30,6 @@
3030
batch_size=8,
3131
meta_template=_meta_template,
3232
run_cfg=dict(num_gpus=1, num_procs=1),
33-
end_str='[UNUSED_TOKEN_145]',
33+
end_str='<|im_end|>',
3434
)
3535
]

‎docs/en/get_started/quick_start.md

+36-38
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,43 @@ For larger parameterized models like Llama-7B, refer to other examples provided
2222
In OpenCompass, each evaluation task consists of the model to be evaluated and the dataset. The entry point for evaluation is `run.py`. Users can select the model and dataset to be tested either via command line or configuration files.
2323

2424
`````{tabs}
25+
````{tab} Command Line (Custom HF Model)
26+
27+
For HuggingFace models, users can set model parameters directly through the command line without additional configuration files. For instance, for the `facebook/opt-125m` model, you can evaluate it with the following command:
28+
29+
```bash
30+
python run.py --datasets siqa_gen winograd_ppl \
31+
--hf-type base \
32+
--hf-path facebook/opt-125m
33+
```
2534
35+
Note that in this way, OpenCompass only evaluates one model at a time, while other ways can evaluate multiple models at once.
36+
37+
```{caution}
38+
`--hf-num-gpus` does not stand for the actual number of GPUs to use in evaluation, but the minimum required number of GPUs for this model. [More](faq.md#how-does-opencompass-allocate-gpus)
39+
```
40+
41+
:::{dropdown} More detailed example
42+
:animate: fade-in-slide-down
43+
```bash
44+
python run.py --datasets siqa_gen winograd_ppl \
45+
--hf-type base \ # HuggingFace model type, base or chat
46+
--hf-path facebook/opt-125m \ # HuggingFace model path
47+
--tokenizer-path facebook/opt-125m \ # HuggingFace tokenizer path (if the same as the model path, can be omitted)
48+
--tokenizer-kwargs padding_side='left' truncation='left' trust_remote_code=True \ # Arguments to construct the tokenizer
49+
--model-kwargs device_map='auto' \ # Arguments to construct the model
50+
--max-seq-len 2048 \ # Maximum sequence length the model can accept
51+
--max-out-len 100 \ # Maximum number of tokens to generate
52+
--min-out-len 100 \ # Minimum number of tokens to generate
53+
--batch-size 64 \ # Batch size
54+
--hf-num-gpus 1 # Number of GPUs required to run the model
55+
```
56+
```{seealso}
57+
For all HuggingFace related parameters supported by `run.py`, please read [Launching Evaluation Task](../user_guides/experimentation.md#launching-an-evaluation-task).
58+
```
59+
:::
60+
61+
````
2662
````{tab} Command Line
2763
2864
Users can combine the models and datasets they want to test using `--models` and `--datasets`.
@@ -74,44 +110,6 @@ If you want to evaluate other models, please check out the "Command Line (Custom
74110
75111
````
76112
77-
````{tab} Command Line (Custom HF Model)
78-
79-
For HuggingFace models, users can set model parameters directly through the command line without additional configuration files. For instance, for the `facebook/opt-125m` model, you can evaluate it with the following command:
80-
81-
```bash
82-
python run.py --datasets siqa_gen winograd_ppl \
83-
--hf-type base \
84-
--hf-path facebook/opt-125m
85-
```
86-
87-
Note that in this way, OpenCompass only evaluates one model at a time, while other ways can evaluate multiple models at once.
88-
89-
```{caution}
90-
`--hf-num-gpus` does not stand for the actual number of GPUs to use in evaluation, but the minimum required number of GPUs for this model. [More](faq.md#how-does-opencompass-allocate-gpus)
91-
```
92-
93-
:::{dropdown} More detailed example
94-
:animate: fade-in-slide-down
95-
```bash
96-
python run.py --datasets siqa_gen winograd_ppl \
97-
--hf-type base \ # HuggingFace model type, base or chat
98-
--hf-path facebook/opt-125m \ # HuggingFace model path
99-
--tokenizer-path facebook/opt-125m \ # HuggingFace tokenizer path (if the same as the model path, can be omitted)
100-
--tokenizer-kwargs padding_side='left' truncation='left' trust_remote_code=True \ # Arguments to construct the tokenizer
101-
--model-kwargs device_map='auto' \ # Arguments to construct the model
102-
--max-seq-len 2048 \ # Maximum sequence length the model can accept
103-
--max-out-len 100 \ # Maximum number of tokens to generate
104-
--min-out-len 100 \ # Minimum number of tokens to generate
105-
--batch-size 64 \ # Batch size
106-
--hf-num-gpus 1 # Number of GPUs required to run the model
107-
```
108-
```{seealso}
109-
For all HuggingFace related parameters supported by `run.py`, please read [Launching Evaluation Task](../user_guides/experimentation.md#launching-an-evaluation-task).
110-
```
111-
:::
112-
113-
114-
````
115113
````{tab} Configuration File
116114
117115
In addition to configuring the experiment through the command line, OpenCompass also allows users to write the full configuration of the experiment in a configuration file and run it directly through `run.py`. The configuration file is organized in Python format and must include the `datasets` and `models` fields.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,174 @@
1+
# 其他安装说明
2+
3+
欢迎使用浏览器自带的搜索功能。
4+
5+
## 推理后端
6+
7+
- LMDeploy
8+
9+
```bash
10+
pip install lmdeploy
11+
```
12+
13+
- VLLM
14+
15+
```bash
16+
pip install vllm
17+
```
18+
19+
OpenCompass 开发者所使用的 CUDA 版本为 11.8,一个能够应对 2024.07 之前绝大部分模型的依赖版本如下:
20+
21+
```bash
22+
export VLLM_VERSION=0.4.3
23+
export LMDEPLOY_VERSION=0.4.1
24+
export FLASH_ATTN_VERSION=2.5.7
25+
export XFORMERS_VERSION=0.0.25.post1
26+
export TORCH_VERSION=2.2.2
27+
export TORCHVISION_VERSION=0.17.2
28+
export TORCHAUDIO_VERSION=2.2.2
29+
export TRITON_VERSION=2.1.0
30+
export PYTHON_VERSION=310
31+
32+
33+
pip3 install "https://github.com/InternLM/lmdeploy/releases/download/v${LMDEPLOY_VERSION}/lmdeploy-${LMDEPLOY_VERSION}+cu118-cp${PYTHON_VERSION}-cp${PYTHON_VERSION}-manylinux2014_x86_64.whl" --extra-index-url https://download.pytorch.org/whl/cu118
34+
pip3 install "https://github.com/vllm-project/vllm/releases/download/v${VLLM_VERSION}/vllm-${VLLM_VERSION}+cu118-cp${PYTHON_VERSION}-cp${PYTHON_VERSION}-manylinux1_x86_64.whl" --extra-index-url https://download.pytorch.org/whl/cu118
35+
pip3 install "https://github.com/Dao-AILab/flash-attention/releases/download/v${FLASH_ATTN_VERSION}/flash_attn-${FLASH_ATTN_VERSION}+cu118torch2.2cxx11abiFALSE-cp${PYTHON_VERSION}-cp${PYTHON_VERSION}-linux_x86_64.whl" --extra-index-url https://download.pytorch.org/whl/cu118
36+
pip3 install xformers==${XFORMERS_VERSION} --extra-index-url https://download.pytorch.org/whl/cu118
37+
pip3 install torch==${TORCH_VERSION} torchvision==${TORCHVISION_VERSION} torchaudio==${TORCHAUDIO_VERSION} --index-url https://download.pytorch.org/whl/cu118
38+
pip3 install triton==${TRITON_VERSION} --extra-index-url https://download.pytorch.org/whl/cu118
39+
```
40+
41+
请注意,在安装过程中,后一条 `pip install` 命令可能会覆盖前一条命令中部分依赖的版本。并且在最终安装完成后,可能有的软件依赖会不满足,但是对 lmdeploy / vllm / xformers 等有需求的模型都是可以跑起来的。很神秘。
42+
43+
## 模型
44+
45+
- LLAMA (参数,原生, 非 HF 格式)
46+
47+
```bash
48+
git clone https://github.com/facebookresearch/llama.git
49+
cd llama
50+
pip install -r requirements.txt
51+
pip install -e .
52+
```
53+
54+
- Vicuna (参数)
55+
56+
```bash
57+
pip install "fschat[model_worker,webui]
58+
```
59+
60+
- Baichuan / Baichuan2 (参数)
61+
62+
```bash
63+
pip install "transformers<=4.33.3"
64+
```
65+
66+
- ChatGLM-3 / GLM-4 (参数)
67+
68+
```bash
69+
pip install "transformers<=4.41.2"
70+
```
71+
72+
- GPT-3.5-Turbo / GPT-4-Turbo / GPT-4 / GPT-4o (API)
73+
74+
```bash
75+
pip install openai
76+
```
77+
78+
- Claude (API)
79+
80+
```bash
81+
pip install anthropic
82+
```
83+
84+
- 字节豆包 (API)
85+
86+
```bash
87+
pip install volcengine-python-sdk
88+
```
89+
90+
- 腾讯混元 (API)
91+
92+
```bash
93+
pip install tencentcloud-sdk-python
94+
```
95+
96+
- 讯飞星火 (API)
97+
98+
```bash
99+
pip install spark_ai_python "sseclient-py==1.7.2" websocket-client
100+
```
101+
102+
- 智谱 (API)
103+
104+
```bash
105+
pip install zhipuai
106+
```
107+
108+
- 通义千问 (API)
109+
110+
```bash
111+
pip install dashscope
112+
```
113+
114+
## 数据集
115+
116+
- HumanEval
117+
118+
```bash
119+
git clone git@github.com:open-compass/human-eval.git
120+
cd human-eval && pip install -e .
121+
```
122+
123+
该代码库 fork 自 https://github.com/openai/human-eval.git,并且已经注释了 `human_eval/execution.py` **第48-57行** 的提示。该提示告知了直接运行 LLM 生成的代码会有风险。
124+
125+
- HumanEvalX / HumanEval+ / MBPP+
126+
127+
```bash
128+
git clone --recurse-submodules git@github.com:open-compass/human-eval.git
129+
cd human-eval
130+
pip install -e .
131+
pip install -e evalplus
132+
```
133+
134+
- AlpacaEval
135+
136+
```bash
137+
pip install alpaca-eval==0.6 scikit-learn==1.5
138+
```
139+
140+
- CIBench
141+
142+
```bash
143+
pip install -r requirements/agent.txt
144+
```
145+
146+
- T-Eval
147+
148+
```bash
149+
pip install lagent==0.1.2
150+
```
151+
152+
- APPS / TACO
153+
154+
```bash
155+
pip install pyext
156+
```
157+
158+
- IFEval
159+
160+
```bash
161+
pip install langdetect
162+
```
163+
164+
- NPHardEval
165+
166+
```bash
167+
pip install networkx
168+
```
169+
170+
- LawBench
171+
172+
```bash
173+
pip install cn2an
174+
```

‎docs/zh_cn/get_started/installation.md

+2-51
Original file line numberDiff line numberDiff line change
@@ -35,58 +35,9 @@
3535
pip install -e .
3636
```
3737

38-
3. 安装 humaneval(可选):
38+
3. 如果需要使用推理后端,或者进行 API 模型测试,或者进行 代码、智能体、主观 等数据集的评测,请参考 [其他安装说明](./extra-installation.md)
3939

40-
如果你需要**在 humaneval 数据集上评估模型代码能力**,请执行此步骤,否则忽略这一步。
41-
42-
<details>
43-
<summary><b>点击查看详细</b></summary>
44-
45-
```bash
46-
git clone https://github.com/openai/human-eval.git
47-
cd human-eval
48-
pip install -r requirements.txt
49-
pip install -e .
50-
cd ..
51-
```
52-
53-
请仔细阅读 `human_eval/execution.py` **第48-57行**的注释,了解执行模型生成的代码可能存在的风险,如果接受这些风险,请取消**第58行**的注释,启用代码执行评测。
54-
55-
</details>
56-
57-
4. 安装 Llama(可选):
58-
59-
如果你需要**使用官方实现评测 Llama / Llama-2 / Llama-2-chat 模型**,请执行此步骤,否则忽略这一步。
60-
61-
<details>
62-
<summary><b>点击查看详细</b></summary>
63-
64-
```bash
65-
git clone https://github.com/facebookresearch/llama.git
66-
cd llama
67-
pip install -r requirements.txt
68-
pip install -e .
69-
cd ..
70-
```
71-
72-
你可以在 `configs/models` 下找到所有 Llama / Llama-2 / Llama-2-chat 模型的配置文件示例。([示例](https://github.com/open-compass/opencompass/blob/eb4822a94d624a4e16db03adeb7a59bbd10c2012/configs/models/llama2_7b_chat.py))
73-
74-
</details>
75-
76-
5. 安装 alpaca-eval(可选):
77-
78-
如果你需要**使用官方alpaca-eval实现评测 alpaca-eval 数据集**,请执行此步骤,否则忽略这一步。
79-
80-
<details>
81-
<summary><b>点击查看详细</b></summary>
82-
83-
```bash
84-
pip install alpaca-eval
85-
```
86-
87-
</details>
88-
89-
# 数据集准备
40+
## 数据集准备
9041

9142
OpenCompass 支持的数据集主要包括两个部分:
9243

‎docs/zh_cn/get_started/quick_start.md

+215-73
Large diffs are not rendered by default.

‎opencompass/utils/run.py

+16-4
Original file line numberDiff line numberDiff line change
@@ -18,17 +18,25 @@
1818
from opencompass.utils import get_logger, match_files
1919

2020

21-
def match_cfg_file(workdir: str,
21+
def match_cfg_file(workdir: Union[str, List[str]],
2222
pattern: Union[str, List[str]]) -> List[Tuple[str, str]]:
2323
"""Match the config file in workdir recursively given the pattern.
2424
2525
Additionally, if the pattern itself points to an existing file, it will be
2626
directly returned.
2727
"""
28+
def _mf_with_multi_workdirs(workdir, pattern, fuzzy=False):
29+
if isinstance(workdir, str):
30+
workdir = [workdir]
31+
files = []
32+
for wd in workdir:
33+
files += match_files(wd, pattern, fuzzy=fuzzy)
34+
return files
35+
2836
if isinstance(pattern, str):
2937
pattern = [pattern]
3038
pattern = [p + '.py' if not p.endswith('.py') else p for p in pattern]
31-
files = match_files(workdir, pattern, fuzzy=False)
39+
files = _mf_with_multi_workdirs(workdir, pattern, fuzzy=False)
3240
if len(files) != len(pattern):
3341
nomatched = []
3442
ambiguous = []
@@ -37,7 +45,7 @@ def match_cfg_file(workdir: str,
3745
'You may use tools/list_configs.py to list or '
3846
'locate the configurations.\n')
3947
for p in pattern:
40-
files = match_files(workdir, p, fuzzy=False)
48+
files = _mf_with_multi_workdirs(workdir, p, fuzzy=False)
4149
if len(files) == 0:
4250
nomatched.append([p[:-3]])
4351
elif len(files) > 1:
@@ -101,7 +109,10 @@ def get_config_from_arg(args) -> Config:
101109
raise ValueError('You must specify "--datasets" or "--custom-dataset-path" if you do not specify a config file path.')
102110
datasets = []
103111
if args.datasets:
104-
datasets_dir = os.path.join(args.config_dir, 'datasets')
112+
datasets_dir = [
113+
os.path.join(args.config_dir, 'datasets'),
114+
os.path.join(args.config_dir, 'dataset_collections')
115+
]
105116
for dataset_arg in args.datasets:
106117
if '/' in dataset_arg:
107118
dataset_name, dataset_suffix = dataset_arg.split('/', 1)
@@ -150,6 +161,7 @@ def get_config_from_arg(args) -> Config:
150161
model_kwargs=args.model_kwargs,
151162
tokenizer_path=args.tokenizer_path,
152163
tokenizer_kwargs=args.tokenizer_kwargs,
164+
generation_kwargs=args.generation_kwargs,
153165
peft_path=args.peft_path,
154166
peft_kwargs=args.peft_kwargs,
155167
max_seq_len=args.max_seq_len,

0 commit comments

Comments
 (0)
Please sign in to comment.