jd_train/train_mm_zero3_lora.sh

17 lines
619 B
Bash
Executable File

FORCE_COLOR=1 deepspeed --hostfile hostfile \
--num_nodes 6 --num_gpus 4 \
train_sft_lora.py \
--model_name_or_path /home/test/Qwen3-32B \
--data_glob "/home/test/datasets/my_corpus/train*.jsonl" \
--output_dir /home/test/checkpoints/q3-32b-lora \
--seq_len 1024 \
--bf16 \
--gradient_accumulation_steps 1 \
--per_device_train_batch_size 1 \
--learning_rate 1e-4 \
--warmup_ratio 0.03 \
--lora_r 16 --lora_alpha 32 --lora_dropout 0.05 \
--max_steps 62 \
--deepspeed /home/test/jd_train/ds_config_zero3_lora.json \
--report_to wandb --wandb_project ds-qwen3-lora