Skip to content

Commit a352838

Browse files
committed
fix imports
Signed-off-by: Bill Nell <bnell@redhat.com>
1 parent 3a32264 commit a352838

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

vllm/model_executor/layers/fused_moe/rose_prepare_finalize.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,8 @@
22
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
33
from collections.abc import Callable
44

5-
import rose
65
import torch
6+
from rose.kernels.all_to_all import AllToAllKernel
77

88
import vllm.model_executor.layers.fused_moe.modular_kernel as mk
99
from vllm.logger import init_logger
@@ -54,7 +54,7 @@ def rose_hidden_dim_scale(
5454
class RosePrepareAndFinalize(mk.FusedMoEPrepareAndFinalize):
5555
def __init__(
5656
self,
57-
a2a: rose.kernels.all_to_all.AllToAllKernel,
57+
a2a: AllToAllKernel,
5858
max_num_tokens: int,
5959
num_local_experts: int,
6060
num_dispatchers: int,

0 commit comments

Comments
 (0)