60 lines
1.6 KiB
Python
60 lines
1.6 KiB
Python
"""
|
|
Usage:
|
|
python3 -m unittest test_triton_attention_backend.TestTritonAttnBackend.test_mmlu
|
|
"""
|
|
|
|
import unittest
|
|
from types import SimpleNamespace
|
|
|
|
from sglang.srt.utils import kill_process_tree
|
|
from sglang.test.run_eval import run_eval
|
|
from sglang.test.test_utils import (
|
|
DEFAULT_MODEL_NAME_FOR_TEST,
|
|
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
|
|
DEFAULT_URL_FOR_TEST,
|
|
CustomTestCase,
|
|
is_in_ci,
|
|
popen_launch_server,
|
|
run_bench_one_batch,
|
|
)
|
|
|
|
|
|
class TestTorchNativeAttnBackend(CustomTestCase):
|
|
def test_latency(self):
|
|
output_throughput = run_bench_one_batch(
|
|
DEFAULT_MODEL_NAME_FOR_TEST,
|
|
["--attention-backend", "torch_native"],
|
|
)
|
|
|
|
if is_in_ci():
|
|
# Torch native backend is expected to be slower
|
|
assert output_throughput > 50, f"{output_throughput=}"
|
|
|
|
def test_mmlu(self):
|
|
model = DEFAULT_MODEL_NAME_FOR_TEST
|
|
base_url = DEFAULT_URL_FOR_TEST
|
|
process = popen_launch_server(
|
|
model,
|
|
base_url,
|
|
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
|
|
other_args=["--attention-backend", "torch_native"],
|
|
)
|
|
|
|
try:
|
|
args = SimpleNamespace(
|
|
base_url=base_url,
|
|
model=model,
|
|
eval_name="mmlu",
|
|
num_examples=64,
|
|
num_threads=32,
|
|
)
|
|
|
|
metrics = run_eval(args)
|
|
self.assertGreaterEqual(metrics["score"], 0.65)
|
|
finally:
|
|
kill_process_tree(process.pid)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
unittest.main()
|