Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add llama 7B config #100

Merged
merged 1 commit into from
Feb 29, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 40 additions & 0 deletions train_configs/llama_7b.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# TorchTrain Config.toml
[job]
dump_folder = "./outputs"

[profiling]
run_profiler = true
save_traces_folder = "profiling/traces"
# profiling frequency - example: 10 means every 10th iter will be profiled
profile_every_x_iter = 100

[metrics]
enable_tensorboard = true
save_tb_folder = "tb"
log_freq = 10

[model]
name = "llama"
flavor = "7B"
tokenizer_path = "./torchtrain/datasets/tokenizer/tokenizer.model"

[optimizer]
name = "AdamW"
lr = 3e-4


[training]
batch_size = 8
seq_len = 2048
warmup_steps = 200 # lr scheduler warm up
max_norm = 1.0 # grad norm clipping
steps = 1000
# only dp would be sufficient for 7B
data_parallel_degree = -1
sequence_parallel_degree = 1
pipeline_parallel_degree = 1
compile = false
checkpoint_interval = 3600
checkpoint_interval_type = "steps"
checkpoint_folder = ""
dataset = "alpaca"
Loading