Skip to content

Commit

Permalink
cl training script
Browse files Browse the repository at this point in the history
  • Loading branch information
sfc-gh-hazhang committed Dec 28, 2023
1 parent e67b21d commit 7b9bfb3
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 3 deletions.
7 changes: 4 additions & 3 deletions fastchat/train/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,15 +142,15 @@ def preprocess(
# "-2" is hardcoded for the Llama tokenizer to make the offset correct.
instruction_len = len(tokenizer(parts[0]).input_ids) - 2

if i != 0 and not tokenizer.legacy:
if i != 0:# and not tokenizer.legacy:
# The legacy and non-legacy modes handle special tokens differently
instruction_len -= 1

# Ignore the user instructions
target[cur_len : cur_len + instruction_len] = IGNORE_TOKEN_ID
cur_len += turn_len

if i != 0 and not tokenizer.legacy:
if i != 0:# and not tokenizer.legacy:
# The legacy and non-legacy modes handle special tokens differently
cur_len -= 1

Expand Down Expand Up @@ -261,7 +261,6 @@ def train():
)
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
local_rank = training_args.local_rank

# Set RoPE scaling factor
config = transformers.AutoConfig.from_pretrained(
model_args.model_name_or_path,
Expand All @@ -280,6 +279,8 @@ def train():
config=config,
cache_dir=training_args.cache_dir,
trust_remote_code=model_args.trust_remote_code,
torch_dtype=torch.bfloat16,
attn_implementation="flash_attention_2",
)
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
Expand Down
26 changes: 26 additions & 0 deletions scripts/train_codellama_34b_sharegpt_full.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
python -m torch.distributed.run --nnodes 2 --nproc-per-node 8 --master-addr 10.6.47.248 --master-port 5000 --node-rank 0 fastchat/train/train.py \
--model_name_or_path /home/mltraining/models/code-llama2-34B-instruct-hf-2 \
--data_path ~/data/sharegpt_20230521_4k_clean_lang_split_identity.json \
--bf16 True \
--output_dir output_codellama_34b_sharegpt_full \
--num_train_epochs 3 \
--per_device_train_batch_size 1 \
--per_device_eval_batch_size 32 \
--gradient_accumulation_steps 8 \
--evaluation_strategy "no" \
--eval_steps 1500 \
--save_strategy "epoch" \
--save_steps 1500 \
--save_total_limit 8 \
--learning_rate 2e-5 \
--weight_decay 0. \
--warmup_ratio 0.04 \
--lr_scheduler_type "cosine" \
--logging_steps 1 \
--fsdp "full_shard auto_wrap" \
--fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
--tf32 True \
--model_max_length 1024 \
--gradient_checkpointing True \
--lazy_preprocess True

0 comments on commit 7b9bfb3

Please sign in to comment.