FORCE_COLOR=1 deepspeed --hostfile hostfile \ --num_nodes 6 --num_gpus 4 \ train_sft_lora.py \ --model_name_or_path /home/test/Qwen3-32B \ --data_glob "/home/test/datasets/my_corpus/train*.jsonl" \ --output_dir /home/test/checkpoints/q3-32b-lora \ --seq_len 1024 \ --bf16 \ --per_device_train_batch_size 1 \ --gradient_accumulation_steps 4 \ --learning_rate 2e-4 \ --warmup_ratio 0.03 \ --lora_r 16 --lora_alpha 64 --lora_dropout 0.05 \ --lora_exclude lm_head \ --max_steps 3000 \ --log_interval 10 \ --eval_steps 50 \ --gradient_checkpointing \ --deepspeed /home/test/jd_train/ds_config_zero3_lora.json \ --report_to wandb --wandb_project ds-qwen3-lora