FORCE_COLOR=1 deepspeed --hostfile hostfile \ --num_nodes 6 --num_gpus 4 \ train_sft_lora.py \ --model_name_or_path /home/test/Qwen3-32B \ --data_glob "/home/test/datasets/my_corpus/train*.jsonl" \ --eval_data_glob "/home/test/jd_train/datasets/test/*.jsonl" \ --output_dir /home/test/checkpoints/q3-32b-lora \ --seq_len 512 \ --bf16 \ --per_device_train_batch_size 1 \ --gradient_accumulation_steps 1 \ --learning_rate 2e-4 \ --warmup_ratio 0.03 \ --lora_r 32 --lora_alpha 64 --lora_dropout 0.05 \ --lora_exclude lm_head \ --max_steps 300 \ --log_interval 10 \ --eval_steps 50 \ --save_steps 50 \ --save_total_limit 4 \ --gradient_checkpointing \ --deepspeed /home/test/jd_train/ds_config_zero3_lora.json \ --report_to wandb --wandb_project ds-qwen3-lora