Skip to content

Commit 446e514

Browse files
rasmithJC1DA
authored andcommitted
[Kernel][Triton] Add Triton implementation for scaled_mm_triton to support fp8 and int8 SmoothQuant, symmetric case (vllm-project#9857)
Signed-off-by: Randall Smith <[email protected]> Signed-off-by: Loc Huynh <[email protected]>
1 parent 6c5f335 commit 446e514

File tree

3 files changed

+299
-0
lines changed

3 files changed

+299
-0
lines changed
Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
"""Tests for the triton_scaled_mm kernel
2+
3+
Run `pytest tests/kernels/test_triton_scaled_mm.py`.
4+
"""
5+
import importlib
6+
from typing import Optional, Type
7+
8+
import pytest
9+
import torch
10+
11+
from vllm.platforms import current_platform
12+
13+
device = "cuda"
14+
15+
16+
def scaled_mm_torch(a: torch.Tensor,
17+
b: torch.Tensor,
18+
scale_a: torch.Tensor,
19+
scale_b: torch.Tensor,
20+
out_dtype: Type[torch.dtype],
21+
bias: Optional[torch.Tensor] = None) -> torch.Tensor:
22+
out = torch.mm(a.to(torch.float32), b.to(torch.float32))
23+
out = scale_a * out
24+
out = scale_b.T * out
25+
out = out.to(out_dtype)
26+
if bias is not None:
27+
out = out + bias
28+
29+
return out
30+
31+
32+
def get_8bit_types():
33+
types = [torch.int8]
34+
supports_fp8 = current_platform.has_device_capability(89)
35+
if current_platform.is_rocm() and supports_fp8:
36+
types.append(torch.float8_e4m3fnuz)
37+
elif current_platform.is_cuda() and supports_fp8:
38+
types.append(torch.float8_e4m3fn)
39+
return types
40+
41+
42+
@pytest.mark.parametrize("M", [1, 33, 64, 512])
43+
@pytest.mark.parametrize("N", [256, 971, 20486])
44+
@pytest.mark.parametrize("K", [128, 496, 1024])
45+
@pytest.mark.parametrize("out_dtype", [torch.float16, torch.bfloat16])
46+
@pytest.mark.parametrize("in_dtype", get_8bit_types())
47+
@pytest.mark.parametrize("use_scalar_scale_a", [True, False])
48+
@pytest.mark.parametrize("use_scalar_scale_b", [True, False])
49+
@pytest.mark.parametrize("use_bias", [True, False])
50+
def test_scaled_mm(M, N, K, in_dtype, out_dtype, use_scalar_scale_a,
51+
use_scalar_scale_b, use_bias):
52+
is_floating_point_type = lambda t: torch.tensor([1, 1], dtype=t
53+
).is_floating_point()
54+
55+
current_platform.seed_everything(0)
56+
57+
# NOTE: There are cases, where if the matrix is large enough, an output
58+
# like 65504.4 can be produced, and can easily turn into inf when
59+
# multiplied when using float16/bfloat16. This means one function, e.g.,
60+
# testing function, and another function, e.g. golden function, can
61+
# produce a non-inf value while the other produces an inf value, and
62+
# will cause assert_close/allclose to fail, even though if overflow
63+
# wouldn't have occurred, the values would have been "close."
64+
#
65+
# So, the values here are kept small enough to avoid this situation.
66+
if is_floating_point_type(in_dtype):
67+
a = (0.25 * torch.rand(
68+
(M, K), dtype=torch.float32, device=device)).to(in_dtype)
69+
b = (0.25 * torch.rand(
70+
(K, N), dtype=torch.float32, device=device)).to(in_dtype)
71+
else:
72+
a = torch.randint(-32, 32, (M, K), dtype=in_dtype, device=device)
73+
b = torch.randint(-32, 32, (K, N), dtype=in_dtype, device=device)
74+
75+
if use_scalar_scale_a:
76+
scale_a = torch.rand((1, 1), device=device)
77+
else:
78+
scale_a = 0.25 * torch.rand((M, 1), device=device)
79+
80+
if use_scalar_scale_b:
81+
scale_b = torch.rand((1, 1), device=device)
82+
else:
83+
scale_b = 0.25 * torch.rand((N, 1), device=device)
84+
85+
bias = None
86+
if use_bias:
87+
bias = torch.rand((N, ), device=device, dtype=out_dtype)
88+
89+
triton_scaled_mm_module = importlib.import_module(
90+
"vllm.model_executor.layers.quantization.compressed_tensors."
91+
"triton_scaled_mm")
92+
triton_scaled_mm = triton_scaled_mm_module.triton_scaled_mm
93+
94+
c_check = triton_scaled_mm(a, b, scale_a, scale_b, out_dtype, bias)
95+
96+
a_cpu = a.cpu()
97+
b_cpu = b.cpu()
98+
scale_a_cpu = scale_a.cpu()
99+
scale_b_cpu = scale_b.cpu()
100+
bias_cpu = None if bias is None else bias.cpu()
101+
102+
c_actual = scaled_mm_torch(a_cpu, b_cpu, scale_a_cpu, scale_b_cpu,
103+
out_dtype, bias_cpu)
104+
105+
c_check_cpu = c_check.cpu()
106+
torch.testing.assert_close(c_check_cpu, c_actual, rtol=1e-1, atol=1e-1)

vllm/_custom_ops.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import contextlib
22
import functools
3+
import importlib
34
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
45

56
import torch
@@ -486,6 +487,14 @@ def cutlass_scaled_mm(a: torch.Tensor,
486487

487488
m = a.shape[0]
488489
n = b.shape[1]
490+
491+
if current_platform.is_rocm():
492+
triton_scaled_mm_module = importlib.import_module(
493+
"vllm.model_executor.layers.quantization.compressed_tensors."
494+
"triton_scaled_mm")
495+
triton_scaled_mm = triton_scaled_mm_module.triton_scaled_mm
496+
return triton_scaled_mm(a, b, scale_a, scale_b, out_dtype, bias)
497+
489498
out = torch.empty((m, n), dtype=out_dtype, device=a.device)
490499

491500
torch.ops._C.cutlass_scaled_mm(out, a, b, scale_a, scale_b, bias)
Lines changed: 184 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,184 @@
1+
from typing import Optional, Type
2+
3+
import torch
4+
import triton
5+
import triton.language as tl
6+
7+
8+
def is_weak_contiguous(x: torch.Tensor):
9+
strides = x.stride()
10+
sizes = x.shape
11+
is_not_transpose = strides[0] == 1 and (strides[1] >= max(1, sizes[0]))
12+
is_transpose = strides[1] == 1 and (strides[0] >= max(1, sizes[1]))
13+
return is_transpose or is_not_transpose
14+
15+
16+
@triton.jit
17+
def scaled_mm_kernel(a_ptr, b_ptr, scale_a_ptr, scale_b_ptr, c_ptr, bias_ptr,
18+
M, N, K, stride_am, stride_ak, stride_bk, stride_bn,
19+
stride_cm, stride_cn, ACCUMULATOR_DTYPE: tl.constexpr,
20+
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr,
21+
BLOCK_SIZE_K: tl.constexpr,
22+
BLOCK_SIZE_SCALE_A: tl.constexpr,
23+
BLOCK_SIZE_SCALE_B: tl.constexpr):
24+
pid = tl.program_id(axis=0)
25+
26+
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
27+
28+
pid_m = pid // num_pid_n
29+
pid_n = pid % num_pid_n
30+
31+
accumulator_dtype = ACCUMULATOR_DTYPE
32+
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N),
33+
dtype=accumulator_dtype)
34+
35+
# NOTE: Some tensor inputs are so large, they will cause int32 overflow
36+
# so it is necessary to use tl.int64 for all the offsets, else SEGV will
37+
# eventually occur.
38+
39+
# Offsets and masks.
40+
offsets_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M).to(tl.int64)
41+
masks_am = offsets_am < M
42+
43+
offsets_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N).to(tl.int64)
44+
masks_bn = offsets_bn < N
45+
46+
offsets_k = tl.arange(0, BLOCK_SIZE_K).to(tl.int64)
47+
offsets_a = (stride_am * offsets_am[:, None] +
48+
stride_ak * offsets_k[None, :])
49+
offsets_b = (stride_bk * offsets_k[:, None] +
50+
stride_bn * offsets_bn[None, :])
51+
52+
# NOTE: BLOCK_SIZE_SCALE_A could be 1 or BLOCK_SIZE_M, so need to create
53+
# appropriate offsets and masks for each case. Same goes for
54+
# BLOCK_SIZE_SCALE_B.
55+
offsets_scale_am = (tl.arange(0, BLOCK_SIZE_SCALE_A) +
56+
(BLOCK_SIZE_SCALE_A > 1) * pid_m * BLOCK_SIZE_M)
57+
masks_scale_am = offsets_scale_am < M
58+
59+
offsets_scale_bn = (tl.arange(0, BLOCK_SIZE_SCALE_B) +
60+
(BLOCK_SIZE_SCALE_B > 1) * pid_n * BLOCK_SIZE_N)
61+
masks_scale_bn = offsets_scale_bn < N
62+
63+
a_ptrs = a_ptr + offsets_a
64+
b_ptrs = b_ptr + offsets_b
65+
66+
scale_a_ptrs = scale_a_ptr + offsets_scale_am
67+
scale_b_ptrs = scale_b_ptr + offsets_scale_bn
68+
69+
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
70+
masks_k = offsets_k < K
71+
masks_a = masks_am[:, None] & masks_k[None, :]
72+
a = tl.load(a_ptrs, mask=masks_a)
73+
74+
masks_b = masks_k[:, None] & masks_bn[None, :]
75+
b = tl.load(b_ptrs, mask=masks_b)
76+
77+
# Accumulate results.
78+
accumulator = tl.dot(a, b, accumulator, out_dtype=accumulator_dtype)
79+
80+
offsets_k += BLOCK_SIZE_K
81+
a_ptrs += BLOCK_SIZE_K * stride_ak
82+
b_ptrs += BLOCK_SIZE_K * stride_bk
83+
84+
# Apply scale at end.
85+
masks_scale_a = masks_scale_am[:, None] & (tl.arange(0, 1) < 1)[:, None]
86+
scale_a = tl.load(scale_a_ptrs[:, None], masks_scale_a)
87+
# Need to broadcast to the appropriate size, if scale_a is already
88+
# (BLOCK_SIZE_M, 1) then it will broadcast to its own shape. Same goes
89+
# for scale_b below.
90+
scale_a = scale_a.broadcast_to((BLOCK_SIZE_M, 1))
91+
accumulator = scale_a * accumulator.to(tl.float32)
92+
93+
masks_scale_b = masks_scale_bn[:, None] & (tl.arange(0, 1) < 1)[None, :]
94+
scale_b = tl.load(scale_b_ptrs[:, None], masks_scale_b)
95+
scale_b = scale_b.broadcast_to((BLOCK_SIZE_N, 1))
96+
accumulator = scale_b.T * accumulator.to(tl.float32)
97+
98+
# Convert to output format.
99+
c = accumulator.to(c_ptr.type.element_ty)
100+
101+
# Add bias, it's already in output format, so add it after conversion.
102+
if bias_ptr:
103+
offsets_bias = offsets_bn
104+
bias_ptrs = bias_ptr + offsets_bias
105+
bias_mask = offsets_bias < N
106+
bias = tl.load(bias_ptrs, bias_mask)
107+
c += bias
108+
109+
# Save output
110+
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M).to(tl.int64)
111+
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N).to(tl.int64)
112+
offs_cm = offs_cm.to(tl.int64)
113+
offs_cn = offs_cn.to(tl.int64)
114+
c_ptrs = (c_ptr + stride_cm * offs_cm[:, None] +
115+
stride_cn * offs_cn[None, :])
116+
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
117+
118+
tl.store(c_ptrs, c, mask=c_mask)
119+
120+
121+
# input - [M, K]
122+
# weight - [K, N]
123+
def triton_scaled_mm(input: torch.Tensor,
124+
weight: torch.Tensor,
125+
scale_a: torch.Tensor,
126+
scale_b: torch.Tensor,
127+
out_dtype: Type[torch.dtype],
128+
bias: Optional[torch.Tensor] = None,
129+
block_size_m: int = 32,
130+
block_size_n: int = 32,
131+
block_size_k: int = 32) -> torch.Tensor:
132+
M, K = input.shape
133+
N = weight.shape[1]
134+
135+
assert N > 0 and K > 0 and M > 0
136+
assert weight.shape[0] == K
137+
assert input.dtype == weight.dtype
138+
assert scale_a.dtype == scale_b.dtype and scale_a.is_floating_point()
139+
assert scale_a.shape == torch.Size([1, 1]) or scale_a.shape == torch.Size(
140+
[M, 1])
141+
assert scale_b.shape == torch.Size([1, 1]) or scale_b.shape == torch.Size(
142+
[N, 1])
143+
assert out_dtype.is_floating_point
144+
assert bias is None or bias.is_floating_point()
145+
assert is_weak_contiguous(input)
146+
assert is_weak_contiguous(weight)
147+
148+
grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(
149+
N, META['BLOCK_SIZE_N']), )
150+
151+
result = torch.empty((M, N), dtype=out_dtype, device=input.device)
152+
153+
has_scalar = lambda x: x.shape[0] == 1 and x.shape[1] == 1
154+
155+
block_size_sa = 1 if has_scalar(scale_a) else block_size_m
156+
block_size_sb = 1 if has_scalar(scale_b) else block_size_n
157+
158+
accumulator_dtype = tl.float32 if input.is_floating_point() else tl.int32
159+
160+
# A = input, B = weight, C = result
161+
# A = M x K, B = K x N, C = M x N
162+
scaled_mm_kernel[grid](input,
163+
weight,
164+
scale_a,
165+
scale_b,
166+
result,
167+
bias,
168+
M,
169+
N,
170+
K,
171+
input.stride(0),
172+
input.stride(1),
173+
weight.stride(0),
174+
weight.stride(1),
175+
result.stride(0),
176+
result.stride(1),
177+
accumulator_dtype,
178+
BLOCK_SIZE_M=block_size_m,
179+
BLOCK_SIZE_N=block_size_n,
180+
BLOCK_SIZE_K=block_size_k,
181+
BLOCK_SIZE_SCALE_A=block_size_sa,
182+
BLOCK_SIZE_SCALE_B=block_size_sb)
183+
184+
return result.to(out_dtype)

0 commit comments

Comments
 (0)