vllm/vllm_v0.10.0/tests/config/test_config_with_model.yaml

8 lines
192 B
YAML

# Same as test_config.yaml but with model specified
model: config-model
port: 12312
served_model_name: mymodel
tensor_parallel_size: 2
trust_remote_code: true
multi_step_stream_outputs: false