export TOKENIZERS_PARALLELISM=false export PYTORCH_CUDA_ALLOC_CONF="max_split_size_mb:128" export NCCL_DEBUG=INFO torchrun --nproc_per_node 4 /home/test/jd_train/train_sft_ds.py \ --model_name_or_path /home/test/Qwen3-1.7B \ --data_glob "/home/test/datasets/my_corpus/train.jsonl" \ --output_dir /home/test/checkpoints/q3-1_7b-ds4 \ --seq_len 512 \ --per_device_train_batch_size 1 \ --gradient_accumulation_steps 1 \ --learning_rate 2e-5 --weight_decay 0.1 --warmup_ratio 0.02 \ --max_steps 375 --log_interval 1 \ --bf16 \ --deepspeed /home/test/jd_train/ds_config_zero3.json \ --report_to none \ --eval_data_glob "/home/test/datasets/my_corpus/test.jsonl"