sglang0.4.5.post1/sgl-kernel/python/sgl_kernel/moe.py

35 lines
721 B
Python

import torch
def moe_align_block_size(
topk_ids,
num_experts,
block_size,
sorted_token_ids,
experts_ids,
num_tokens_post_pad,
token_cnts_buffer,
cumsum_buffer,
):
torch.ops.sgl_kernel.moe_align_block_size.default(
topk_ids,
num_experts,
block_size,
sorted_token_ids,
experts_ids,
num_tokens_post_pad,
token_cnts_buffer,
cumsum_buffer,
)
def topk_softmax(
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
token_expert_indices: torch.Tensor,
gating_output: float,
) -> None:
torch.ops.sgl_kernel.topk_softmax.default(
topk_weights, topk_ids, token_expert_indices, gating_output
)