Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[LLVMGPU] Add 32x32x16 F8 MFMA intrinsic #19106

Merged
merged 2 commits into from
Nov 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
// GFX942: target = #iree_gpu.target<arch = "gfx942",
// GFX942-SAME: wgp = <compute = fp64|fp32|fp16|int64|int32|int16|int8, storage = b64|b32|b16|b8,
// GFX942-SAME: subgroup = shuffle|arithmetic, dot = dp4xi8toi32,
// GFX942-SAME: mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>],
// GFX942-SAME: mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>],
// GFX942-SAME: subgroup_size_choices = [64], max_workgroup_sizes = [1024, 1024, 1024],
// GFX942-SAME: max_thread_count_per_workgroup = 1024, max_workgroup_memory_bytes = 65536,
// GFX942-SAME: max_workgroup_counts = [2147483647, 2147483647, 2147483647],
Expand All @@ -26,7 +26,7 @@
// GFX941-SAME: features = "+sramecc,-xnack"

// GFX940: target = #iree_gpu.target<arch = "gfx940",
// GFX940-SAME: mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>],
// GFX940-SAME: mma = [<MFMA_F32_16x16x4_F32>, <MFMA_F32_16x16x16_F16>, <MFMA_F32_32x32x8_F16>, <MFMA_F64_16x16x4_F64>, <MFMA_F32_16x16x16_BF16>, <MFMA_F32_32x32x8_BF16>, <MFMA_F32_16x16x32_F8E5M2FNUZ>, <MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ>, <MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ>, <MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ>, <MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ>, <MFMA_I32_16x16x32_I8>, <MFMA_I32_32x32x16_I8>],

// GFX1100: target = #iree_gpu.target<arch = "gfx1100",
// GFX1100-SAME: mma = [<WMMA_F32_16x16x16_F16>, <WMMA_F16_16x16x16_F16>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>, <WMMA_I32_16x16x16_I8>]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -256,6 +256,18 @@ static std::tuple<Type, Type, Type> getABCElementTypes(MLIRContext *context,
case MMAIntrinsic::MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ: {
return {f8E5M2FNUZ, f8E4M3FNUZ, f32};
}
case MMAIntrinsic::MFMA_F32_32x32x16_F8E4M3FNUZ: {
return {f8E4M3FNUZ, f8E4M3FNUZ, f32};
}
case MMAIntrinsic::MFMA_F32_32x32x16_F8E5M2FNUZ: {
return {f8E5M2FNUZ, f8E5M2FNUZ, f32};
}
case MMAIntrinsic::MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ: {
return {f8E4M3FNUZ, f8E5M2FNUZ, f32};
}
case MMAIntrinsic::MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ: {
return {f8E5M2FNUZ, f8E4M3FNUZ, f32};
}
case MMAIntrinsic::MFMA_I32_16x16x32_I8: {
return {i8, i8, i32};
}
Expand Down Expand Up @@ -608,6 +620,10 @@ MMASingleSubgroupLayout getSingleSubgroupLayout(MMAIntrinsic intrinsic,
return {/*outer=*/{1, 1}, /*thread=*/{4, 16}, /*tstrides=*/{16, 1},
/*element=*/{4, 1}};
}
case MMAIntrinsic::MFMA_F32_32x32x16_F8E4M3FNUZ:
case MMAIntrinsic::MFMA_F32_32x32x16_F8E5M2FNUZ:
case MMAIntrinsic::MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ:
case MMAIntrinsic::MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ:
case MMAIntrinsic::MFMA_I32_32x32x16_I8:
switch (fragment) {
case MMAFragment::Lhs:
Expand Down Expand Up @@ -675,6 +691,8 @@ SmallVector<VirtualMMAIntrinsic> MMAAttr::getVirtualIntrinsics() const {
return {VirtualMMAIntrinsic::VMFMA_F32_32x32x16_F16};
case MMAIntrinsic::MFMA_F32_16x16x32_F8E4M3FNUZ:
return {VirtualMMAIntrinsic::VMFMA_F32_16x16x32_F8E4M3FNUZ};
case MMAIntrinsic::MFMA_F32_32x32x16_F8E4M3FNUZ:
return {VirtualMMAIntrinsic::VMFMA_F32_32x32x16_F8E4M3FNUZ};
default:
return {};
}
Expand Down Expand Up @@ -1218,6 +1236,9 @@ static OpaqueMmaLayout getOpaqueVMMALayout(MLIRContext *context,
case VirtualMMAIntrinsic::VMFMA_F32_16x16x32_F8E4M3FNUZ: {
return OpaqueMmaLayout{16, 16, 32, f8E4M3FNUZ, f8E4M3FNUZ, f32};
}
case VirtualMMAIntrinsic::VMFMA_F32_32x32x16_F8E4M3FNUZ: {
return OpaqueMmaLayout{32, 32, 16, f8E4M3FNUZ, f8E4M3FNUZ, f32};
}
// V(Virtual)MFMA instructions which have 2 mfma instructions interleaved
// along the k dimension.
case VirtualMMAIntrinsic::VMFMA_F32_16x16x32_F16: {
Expand Down Expand Up @@ -1252,6 +1273,7 @@ VirtualMMAAttr::getABCVectorTypes() const {
auto cType = VectorType::get({4}, C);
return {aType, bType, cType};
}
case VirtualMMAIntrinsic::VMFMA_F32_32x32x16_F8E4M3FNUZ:
case VirtualMMAIntrinsic::VMFMA_F32_32x32x16_F16: {
auto aType = VectorType::get({8}, A);
auto bType = VectorType::get({8}, B);
Expand All @@ -1274,6 +1296,7 @@ int64_t VirtualMMAAttr::getSubgroupSize() const {
switch (getIntrinsic().getValue()) {
case VirtualMMAIntrinsic::VMFMA_F32_16x16x32_F8E4M3FNUZ:
case VirtualMMAIntrinsic::VMFMA_F32_16x16x32_F16:
case VirtualMMAIntrinsic::VMFMA_F32_32x32x16_F8E4M3FNUZ:
case VirtualMMAIntrinsic::VMFMA_F32_32x32x16_F16: {
return 64;
}
Expand Down Expand Up @@ -1328,7 +1351,8 @@ int64_t VirtualMMAAttr::getUnrollK() const {
case VirtualMMAIntrinsic::VMFMA_F32_32x32x16_F16: {
return 2;
}
case VirtualMMAIntrinsic::VMFMA_F32_16x16x32_F8E4M3FNUZ: {
case VirtualMMAIntrinsic::VMFMA_F32_16x16x32_F8E4M3FNUZ:
case VirtualMMAIntrinsic::VMFMA_F32_32x32x16_F8E4M3FNUZ: {
return 1;
}
}
Expand Down Expand Up @@ -1356,6 +1380,7 @@ FailureOr<Value> VirtualMMAAttr::buildMmaOperation(OpBuilder &builder,
switch (getIntrinsic().getValue()) {
case VirtualMMAIntrinsic::VMFMA_F32_16x16x32_F8E4M3FNUZ:
case VirtualMMAIntrinsic::VMFMA_F32_16x16x32_F16:
case VirtualMMAIntrinsic::VMFMA_F32_32x32x16_F8E4M3FNUZ:
case VirtualMMAIntrinsic::VMFMA_F32_32x32x16_F16: {
// Generate mfma's for K with unrolled kernels.
const int64_t unrollKFactor = getUnrollK();
Expand Down Expand Up @@ -1394,6 +1419,7 @@ int64_t VirtualMMAAttr::getBlockSize() const {
switch (getIntrinsic().getValue()) {
case VirtualMMAIntrinsic::VMFMA_F32_16x16x32_F8E4M3FNUZ:
case VirtualMMAIntrinsic::VMFMA_F32_16x16x32_F16:
case VirtualMMAIntrinsic::VMFMA_F32_32x32x16_F8E4M3FNUZ:
case VirtualMMAIntrinsic::VMFMA_F32_32x32x16_F16: {
return 1;
}
Expand Down Expand Up @@ -1442,6 +1468,18 @@ MMASingleSubgroupLayout getSingleSubgroupLayout(VirtualMMAIntrinsic intrinsic,
return {/*outer=*/{4, 1}, /*thread=*/{2, 32}, /*tstrides=*/{32, 1},
/*element=*/{4, 1}};
}
case VirtualMMAIntrinsic::VMFMA_F32_32x32x16_F8E4M3FNUZ:
switch (fragment) {
case MMAFragment::Lhs:
return {/*outer=*/{1, 2}, /*thread=*/{32, 2}, /*tstrides=*/{1, 32},
/*element=*/{1, 4}};
case MMAFragment::Rhs:
return {/*outer=*/{2, 1}, /*thread=*/{2, 32}, /*tstrides=*/{32, 1},
/*element=*/{4, 1}};
case MMAFragment::Acc:
return {/*outer=*/{4, 1}, /*thread=*/{2, 32}, /*tstrides=*/{32, 1},
/*element=*/{4, 1}};
}
}
assert(false && "unhandled virtual mma layout type.");
return {};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,10 @@ def MFMA_F32_16x16x32_F8E5M2FNUZ : I32EnumAttrCase<"MFMA_F32_16x16x32_F8E5M2FNUZ
def MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ : I32EnumAttrCase<"MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ", 0x1231>;
def MFMA_F32_16x16x32_F8E4M3FNUZ : I32EnumAttrCase<"MFMA_F32_16x16x32_F8E4M3FNUZ", 0x1232>;
def MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ : I32EnumAttrCase<"MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ", 0x1233>;
def MFMA_F32_32x32x16_F8E5M2FNUZ : I32EnumAttrCase<"MFMA_F32_32x32x16_F8E5M2FNUZ", 0x1234>;
def MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ : I32EnumAttrCase<"MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ", 0x1235>;
def MFMA_F32_32x32x16_F8E4M3FNUZ : I32EnumAttrCase<"MFMA_F32_32x32x16_F8E4M3FNUZ", 0x1236>;
def MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ : I32EnumAttrCase<"MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ", 0x1237>;
def MFMA_I32_16x16x32_I8 : I32EnumAttrCase<"MFMA_I32_16x16x32_I8", 0x12C0>;
def MFMA_I32_32x32x16_I8 : I32EnumAttrCase<"MFMA_I32_32x32x16_I8", 0x12C1>;

Expand Down Expand Up @@ -193,6 +197,10 @@ def IREEGPU_MMAIntrinsic : IREEGPU_I32MmaEnumAttr<"MMAIntrinsic",
MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ,
MFMA_F32_16x16x32_F8E4M3FNUZ,
MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ,
MFMA_F32_32x32x16_F8E5M2FNUZ,
MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ,
MFMA_F32_32x32x16_F8E4M3FNUZ,
MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ,
MFMA_I32_16x16x32_I8,
MFMA_I32_32x32x16_I8,

Expand All @@ -211,12 +219,14 @@ def IREEGPU_MMAIntrinsic : IREEGPU_I32MmaEnumAttr<"MMAIntrinsic",
def VMFMA_F32_16x16x32_F16 : I32EnumAttrCase<"VMFMA_F32_16x16x32_F16", 0>;
def VMFMA_F32_32x32x16_F16 : I32EnumAttrCase<"VMFMA_F32_32x32x16_F16", 1>;
def VMFMA_F32_16x16x32_F8E4M3FNUZ : I32EnumAttrCase<"VMFMA_F32_16x16x32_F8E4M3FNUZ", 2>;
def VMFMA_F32_32x32x16_F8E4M3FNUZ : I32EnumAttrCase<"VMFMA_F32_32x32x16_F8E4M3FNUZ", 3>;

def IREEGPU_VirtualMMAIntrinsic : IREEGPU_I32MmaEnumAttr<"VirtualMMAIntrinsic",
"Descriptor for different Virtual MMA intrinsics", [
VMFMA_F32_16x16x32_F16,
VMFMA_F32_32x32x16_F16,
VMFMA_F32_16x16x32_F8E4M3FNUZ,
VMFMA_F32_32x32x16_F8E4M3FNUZ,
]>;

def MMA_LHS : I32EnumAttrCase<"Lhs", 0>;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,10 @@ const WgpDetails *getCDNA3WgpDetails() {
MMAIntrinsic::MFMA_F32_16x16x32_F8E5M2FNUZ_F8E4M3FNUZ,
MMAIntrinsic::MFMA_F32_16x16x32_F8E4M3FNUZ,
MMAIntrinsic::MFMA_F32_16x16x32_F8E4M3FNUZ_F8E5M2FNUZ,
MMAIntrinsic::MFMA_F32_32x32x16_F8E5M2FNUZ,
MMAIntrinsic::MFMA_F32_32x32x16_F8E5M2FNUZ_F8E4M3FNUZ,
MMAIntrinsic::MFMA_F32_32x32x16_F8E4M3FNUZ,
MMAIntrinsic::MFMA_F32_32x32x16_F8E4M3FNUZ_F8E5M2FNUZ,
MMAIntrinsic::MFMA_I32_16x16x32_I8,
MMAIntrinsic::MFMA_I32_32x32x16_I8,
};
Expand Down
Loading
Loading