Skip to content

Commit

Permalink
add use case for custom kernel for matvec operation
Browse files Browse the repository at this point in the history
  • Loading branch information
charlifu committed Mar 21, 2024
1 parent 9cb2bbd commit 51ce9f5
Showing 1 changed file with 15 additions and 0 deletions.
15 changes: 15 additions & 0 deletions vllm/model_executor/layers/linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
divide, split_tensor_along_last_dim)
from vllm.model_executor.utils import set_weight_attrs
from vllm.logger import init_logger
from vllm import custom_ops

logger = init_logger(__name__)

Expand Down Expand Up @@ -72,6 +73,20 @@ def apply_weights(self,
x: torch.Tensor,
bias: Optional[torch.Tensor] = None) -> torch.Tensor:
weight = weights["weight"]
if x.shape[0] == 1:
m, n, k = weight.shape[0], x.shape[0], x.shape[1]
out = torch.empty(x.shape[0], weight.shape[0], dtype=x.dtype)
if k == 8192 and (m == 1280 or m == 7168):
custom_ops.LLMM1(weight, x, out, 8)
elif k == 3584 and m == 8192:
custom_ops.LLMM1(weight, x, out, 8)
elif k <= 8192 and k % 8 == 0 and m % 4 == 0:
custom_ops.LLMM1(weight, x, out, 4)
else:
out = F.linear(x, weight)
if bias != None:
out = out + bias
return out
if self.separate_bias_add:
if bias is not None:
return F.linear(x, weight) + bias
Expand Down

0 comments on commit 51ce9f5

Please sign in to comment.