|
| 1 | +// RUN: mlir-opt -convert-to-spirv -cse %s | FileCheck %s |
| 2 | + |
| 3 | +module attributes { |
| 4 | + gpu.container_module, |
| 5 | + spirv.target_env = #spirv.target_env<#spirv.vce<v1.3, [Shader, Groups, GroupNonUniformArithmetic, GroupNonUniformBallot], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>> |
| 6 | +} { |
| 7 | + // CHECK-LABEL: spirv.module @{{.*}} Logical GLSL450 |
| 8 | + // CHECK-DAG: spirv.GlobalVariable @[[$LOCALINVOCATIONIDVAR:.*]] built_in("LocalInvocationId") : !spirv.ptr<vector<3xi32>, Input> |
| 9 | + // CHECK-LABEL: spirv.func @argmax |
| 10 | + // CHECK-SAME: %[[ARG0:.*]]: !spirv.ptr<!spirv.struct<(!spirv.array<4 x f32, stride=4> [0])>, StorageBuffer> |
| 11 | + // CHECK-SAME: %[[ARG1:.*]]: !spirv.ptr<!spirv.struct<(!spirv.array<1 x i32, stride=4> [0])>, StorageBuffer> |
| 12 | + gpu.module @kernels { |
| 13 | + gpu.func @argmax(%input : memref<4xf32>, %output : memref<i32>) kernel |
| 14 | + attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [32, 1, 1]>} { |
| 15 | + // CHECK: %[[C0:.*]] = spirv.Constant 0 : i32 |
| 16 | + // CHECK: %[[C1:.*]] = spirv.Constant 1 : i32 |
| 17 | + // CHECK: %[[C32:.*]] = spirv.Constant 32 : i32 |
| 18 | + // CHECK: %[[ADDRESSLOCALINVOCATIONID:.*]] = spirv.mlir.addressof @[[$LOCALINVOCATIONIDVAR]] |
| 19 | + // CHECK: %[[LOCALINVOCATIONID:.*]] = spirv.Load "Input" %[[ADDRESSLOCALINVOCATIONID]] |
| 20 | + // CHECK: %[[LOCALINVOCATIONIDX:.*]] = spirv.CompositeExtract %[[LOCALINVOCATIONID]]{{\[}}0 : i32{{\]}} |
| 21 | + // CHECK: %[[AC0:.*]] = spirv.AccessChain %[[ARG0]][%[[C0]], %[[LOCALINVOCATIONIDX]]] : !spirv.ptr<!spirv.struct<(!spirv.array<4 x f32, stride=4> [0])>, StorageBuffer>, i32, i32 |
| 22 | + // CHECK: %[[LOAD0:.*]] = spirv.Load "StorageBuffer" %[[AC0]] : f32 |
| 23 | + // CHECK: %[[FUNC0:.*]] = spirv.Variable : !spirv.ptr<i32, Function> |
| 24 | + // CHECK: %[[FUNC1:.*]] = spirv.Variable : !spirv.ptr<f32, Function> |
| 25 | + %cst_0_idx = arith.constant 0 : index |
| 26 | + %cst_1_i32 = arith.constant 1 : i32 |
| 27 | + %cst_1_idx = arith.constant 1 : index |
| 28 | + %cst_32 = arith.constant 32 : i32 |
| 29 | + %num_batches = arith.divui %cst_1_i32, %cst_32 : i32 |
| 30 | + %tx = gpu.thread_id x |
| 31 | + %tx_i32 = index.castu %tx : index to i32 |
| 32 | + %ub = index.castu %num_batches : i32 to index |
| 33 | + %lane_res_init = arith.constant 0 : i32 |
| 34 | + %lane_max_init = memref.load %input[%tx] : memref<4xf32> |
| 35 | + |
| 36 | + // CHECK: spirv.mlir.loop { |
| 37 | + // CHECK: spirv.Branch ^[[HEADER:.*]](%[[C1]], %[[C0]], %[[LOAD0]] : i32, i32, f32) |
| 38 | + // CHECK: ^[[HEADER]](%[[INDVAR0:.*]]: i32, %[[INDVAR1:.*]]: i32, %[[INDVAR2:.*]]: f32): |
| 39 | + // CHECK: %[[SLT:.*]] = spirv.SLessThan %[[INDVAR0]], %[[C0]] : i32 |
| 40 | + // CHECK: spirv.BranchConditional %[[SLT]], ^[[BODY:.*]], ^[[MERGE:.*]] |
| 41 | + // CHECK: ^[[BODY]]: |
| 42 | + // CHECK: %[[MUL:.*]] = spirv.IMul %[[INDVAR0]], %[[C32]] : i32 |
| 43 | + // CHECK: %[[ADD:.*]] = spirv.IAdd %[[MUL]], %[[LOCALINVOCATIONIDX]] : i32 |
| 44 | + // CHECK: %[[AC1:.*]] = spirv.AccessChain %[[ARG0]][%[[C0]], %[[ADD]]] : !spirv.ptr<!spirv.struct<(!spirv.array<4 x f32, stride=4> [0])>, StorageBuffer>, i32, i32 |
| 45 | + // CHECK: %[[LOAD1:.*]] = spirv.Load "StorageBuffer" %[[AC1]] : f32 |
| 46 | + // CHECK: %[[OGT:.*]] = spirv.FOrdGreaterThan %[[LOAD1]], %[[INDVAR2]] : f32 |
| 47 | + // CHECK: %[[SELECT0:.*]] = spirv.Select %[[OGT]], %[[ADD]], %[[INDVAR1]] : i1, i32 |
| 48 | + // CHECK: %[[SELECT1:.*]] = spirv.Select %[[OGT]], %[[LOAD1]], %[[INDVAR2]] : i1, f32 |
| 49 | + // CHECK: spirv.Store "Function" %[[FUNC0]], %[[SELECT0]] : i32 |
| 50 | + // CHECK: spirv.Store "Function" %[[FUNC1]], %[[SELECT1]] : f32 |
| 51 | + // CHECK: %[[ADD1:.*]] = spirv.IAdd %[[INDVAR0]], %[[C1]] : i32 |
| 52 | + // CHECK: spirv.Branch ^[[HEADER]](%[[ADD1]], %[[SELECT0]], %[[SELECT1]] : i32, i32, f32) |
| 53 | + // CHECK: ^[[MERGE]]: |
| 54 | + // CHECK: spirv.mlir.merge |
| 55 | + // CHECK: } |
| 56 | + // CHECK-DAG: %[[LANE_RES:.*]] = spirv.Load "Function" %[[FUNC0]] : i32 |
| 57 | + // CHECK-DAG: %[[LANE_MAX:.*]] = spirv.Load "Function" %[[FUNC1]] : f32 |
| 58 | + %lane_res, %lane_max = scf.for %iter = %cst_1_idx to %ub step %cst_1_idx |
| 59 | + iter_args(%lane_res_iter = %lane_res_init, %lane_max_iter = %lane_max_init) -> (i32, f32) { |
| 60 | + %iter_i32 = index.castu %iter : index to i32 |
| 61 | + %mul = arith.muli %cst_32, %iter_i32 : i32 |
| 62 | + %idx_i32 = arith.addi %mul, %tx_i32 : i32 |
| 63 | + %idx = index.castu %idx_i32 : i32 to index |
| 64 | + %elem = memref.load %input[%idx] : memref<4xf32> |
| 65 | + %gt = arith.cmpf ogt, %elem, %lane_max_iter : f32 |
| 66 | + %lane_res_next = arith.select %gt, %idx_i32, %lane_res_iter : i32 |
| 67 | + %lane_max_next = arith.select %gt, %elem, %lane_max_iter : f32 |
| 68 | + scf.yield %lane_res_next, %lane_max_next : i32, f32 |
| 69 | + } |
| 70 | + |
| 71 | + // CHECK: %[[SUBGROUP_MAX:.*]] = spirv.GroupNonUniformFMax "Subgroup" "Reduce" %[[LANE_MAX]] : f32 |
| 72 | + // CHECK: %[[OEQ:.*]] = spirv.FOrdEqual %[[LANE_MAX]], %[[SUBGROUP_MAX]] : f32 |
| 73 | + // CHECK: %[[BALLOT:.*]] = spirv.GroupNonUniformBallot <Subgroup> %[[OEQ]] : vector<4xi32> |
| 74 | + // CHECK: %[[BALLOTLSB:.*]] = spirv.GroupNonUniformBallotFindLSB <Subgroup> %[[BALLOT]] : vector<4xi32>, i32 |
| 75 | + // CHECK: %[[EQ:.*]] = spirv.IEqual %[[LOCALINVOCATIONIDX]], %[[C1]] : i32 |
| 76 | + %subgroup_max = gpu.subgroup_reduce maximumf %lane_max : (f32) -> (f32) |
| 77 | + %eq = arith.cmpf oeq, %lane_max, %subgroup_max : f32 |
| 78 | + %ballot = spirv.GroupNonUniformBallot <Subgroup> %eq : vector<4xi32> |
| 79 | + %lsb = spirv.GroupNonUniformBallotFindLSB <Subgroup> %ballot : vector<4xi32>, i32 |
| 80 | + %cond = arith.cmpi eq, %cst_1_i32, %tx_i32 : i32 |
| 81 | + |
| 82 | + // CHECK: spirv.mlir.selection { |
| 83 | + // CHECK: spirv.BranchConditional %[[EQ]], ^[[TRUE:.*]], ^[[FALSE:.*]] |
| 84 | + // CHECK: ^[[TRUE]]: |
| 85 | + // CHECK: %[[AC2:.*]] = spirv.AccessChain %[[ARG1]][%[[C0]], %[[C0]]] : !spirv.ptr<!spirv.struct<(!spirv.array<1 x i32, stride=4> [0])>, StorageBuffer>, i32, i32 |
| 86 | + // CHECK: spirv.Store "StorageBuffer" %[[AC2]], %[[LANE_RES]] : i32 |
| 87 | + // CHECK: spirv.Branch ^[[FALSE]] |
| 88 | + // CHECK: ^[[FALSE]]: |
| 89 | + // CHECK: spirv.mlir.merge |
| 90 | + // CHECK: } |
| 91 | + scf.if %cond { |
| 92 | + memref.store %lane_res, %output[] : memref<i32> |
| 93 | + } |
| 94 | + |
| 95 | + // CHECK: spirv.Return |
| 96 | + gpu.return |
| 97 | + } |
| 98 | + } |
| 99 | +} |
0 commit comments