|
| 1 | +// RUN: mlir-opt %s --transform-interpreter --split-input-file -canonicalize | FileCheck %s |
| 2 | + |
| 3 | +// Demonstrates what happens when peeling the middle loop (2nd parallel |
| 4 | +// dimension) followed by vectorization in the presence of _scalable_ vectors |
| 5 | +// (these are introduced through scalable tiling). The main goal is to verify |
| 6 | +// that canonicalizations fold away the masks in the main loop. |
| 7 | + |
| 8 | +func.func @matmul(%A: tensor<1024x512xf32>, |
| 9 | + %B: tensor<512x2000xf32>, |
| 10 | + %C: tensor<1024x2000xf32>) -> tensor<1024x2000xf32> { |
| 11 | + |
| 12 | +// CHECK: #[[MAP:.*]] = affine_map<()[s0] -> (-(2000 mod s0) + 2000)> |
| 13 | +// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index |
| 14 | +// CHECK-DAG: %[[C2000:.*]] = arith.constant 2000 : index |
| 15 | +// CHECK-DAG: %[[C8:.*]] = arith.constant 8 : index |
| 16 | +// CHECK-DAG: %[[C1024:.*]] = arith.constant 1024 : index |
| 17 | +// CHECK-DAG: %[[C512:.*]] = arith.constant 512 : index |
| 18 | +// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index |
| 19 | +// CHECK-DAG: %[[C16:.*]] = arith.constant 16 : index |
| 20 | +// CHECK: %[[VSCALE:.*]] = vector.vscale |
| 21 | +// CHECK: %[[STEP:.*]] = arith.muli %[[VSCALE]], %[[C16]] : index |
| 22 | +// CHECK: %2 = scf.for {{.*}} %[[C0]] to %[[C1024]] step %[[C8]] iter_args(%arg4 = %arg2) -> (tensor<1024x2000xf32>) { |
| 23 | + |
| 24 | +// Main loop after vectorisation (without masking) |
| 25 | + |
| 26 | +// CHECK: %[[UB_MAIN:.*]] = affine.apply #[[MAP]]()[%[[STEP]]] |
| 27 | +// CHECK: scf.for {{.*}} %[[C0]] to %[[UB_MAIN]] step %[[STEP]] {{.*}} -> (tensor<1024x2000xf32>) { |
| 28 | +// CHECK: scf.for %arg7 = %[[C0]] to %[[C512]] step %[[C1]] {{.*}} -> (tensor<1024x2000xf32>) { |
| 29 | +// CHECK-NOT: vector.mask |
| 30 | +// CHECK: arith.mulf {{.*}} : vector<8x[16]x1xf32> |
| 31 | +// CHECK-NEXT: vector.shape_cast {{.*}} : vector<8x[16]x1xf32> to vector<8x[16]xf32> |
| 32 | +// CHECK-NEXT: arith.addf {{.*}} : vector<8x[16]xf32> |
| 33 | +// CHECK-NOT: vector.mask |
| 34 | +// CHECK: scf.yield {{.*}} : tensor<1024x2000xf32> |
| 35 | +// CHECK-NEXT: } |
| 36 | +// CHECK-NEXT: scf.yield {{.*}} : tensor<1024x2000xf32> |
| 37 | +// CHECK-NEXT: } |
| 38 | + |
| 39 | +// Remainder loop after vectorisation (with masking) |
| 40 | + |
| 41 | +// CHECK: scf.for {{.*}} %[[UB_MAIN]] to %[[C2000]] step %[[STEP]] {{.*}} -> (tensor<1024x2000xf32>) { |
| 42 | +// CHECK: scf.for {{.*}} %[[C0]] to %[[C512]] step %[[C1]] {{.*}} -> (tensor<1024x2000xf32>) { |
| 43 | +// CHECK: %[[MASK_1:.*]] = vector.create_mask {{.*}} : vector<1x[16]xi1> |
| 44 | +// CHECK: %[[RHS:.*]] = vector.mask %[[MASK_1]] { vector.transfer_read {{.*}} } : vector<1x[16]xi1> -> vector<8x[16]x1xf32> |
| 45 | +// CHECK: %[[MASK_2:.*]] = vector.create_mask {{.*}} : vector<8x[16]xi1> |
| 46 | +// CHECK: %[[LHS:.*]] = vector.mask %[[MASK_2]] { vector.transfer_read {{.*}} } : vector<8x[16]xi1> -> vector<8x[16]xf32> |
| 47 | +// CHECK: %[[MUL:.*]] = arith.mulf %{{.*}}, %[[RHS]] : vector<8x[16]x1xf32> |
| 48 | +// CHECK: %[[MASK_3:.*]] = vector.create_mask {{.*}} : vector<8x[16]xi1> |
| 49 | +// CHECK: vector.shape_cast %[[MUL]] : vector<8x[16]x1xf32> to vector<8x[16]xf32> |
| 50 | +// CHECK: arith.addf %[[LHS]], %{{.*}} : vector<8x[16]xf32> |
| 51 | +// CHECK: arith.select %[[MASK_3]], {{.*}} : vector<8x[16]xi1>, vector<8x[16]xf32> |
| 52 | +// CHECK: vector.mask %[[MASK_2]] { vector.transfer_write {{.*}} } : vector<8x[16]xi1> -> tensor<8x?xf32> |
| 53 | +// CHECK: scf.yield %inserted_slice : tensor<1024x2000xf32> |
| 54 | +// CHECK: } |
| 55 | +// CHECK: scf.yield %7 : tensor<1024x2000xf32> |
| 56 | +// CHECK: } |
| 57 | +// CHECK: scf.yield %5 : tensor<1024x2000xf32> |
| 58 | +// CHECK-NEXT: } |
| 59 | + |
| 60 | + %res = linalg.matmul ins(%A, %B: tensor<1024x512xf32>, tensor<512x2000xf32>) |
| 61 | + outs(%C: tensor<1024x2000xf32>) -> tensor<1024x2000xf32> |
| 62 | + return %res : tensor<1024x2000xf32> |
| 63 | +} |
| 64 | + |
| 65 | +module attributes {transform.with_named_sequence} { |
| 66 | + transform.named_sequence @__transform_main(%root: !transform.any_op {transform.readonly}) { |
| 67 | + %matmul = transform.structured.match ops{["linalg.matmul"]} in %root : (!transform.any_op) -> !transform.any_op |
| 68 | + // 1. Scalable tiling |
| 69 | + %_, %loop_1, %loop_2, %loop_3 = |
| 70 | + transform.structured.tile_using_for %matmul [8, [16], 1] : (!transform.any_op) |
| 71 | + -> (!transform.any_op, !transform.op<"scf.for">, !transform.op<"scf.for">,!transform.op<"scf.for">) |
| 72 | + |
| 73 | + // 2. Loop peeling (only the middle dimension) |
| 74 | + %main_loop, %remainder_loop = transform.loop.peel %loop_2 : (!transform.op<"scf.for">) -> (!transform.op<"scf.for">, !transform.op<"scf.for">) |
| 75 | + |
| 76 | + // 3. Vectorize the main loop |
| 77 | + %matmul_main = transform.structured.match ops{["linalg.matmul"]} in %main_loop : (!transform.op<"scf.for">) -> !transform.any_op |
| 78 | + transform.structured.vectorize %matmul_main vector_sizes [8, [16], 1] : !transform.any_op |
| 79 | + |
| 80 | + // 4. Vectorize the remainder loop |
| 81 | + %matmul_remainder = transform.structured.match ops{["linalg.matmul"]} in %remainder_loop : (!transform.op<"scf.for">) -> !transform.any_op |
| 82 | + transform.structured.vectorize %matmul_remainder vector_sizes [8, [16], 1] : !transform.any_op |
| 83 | + |
| 84 | + transform.yield |
| 85 | + } |
| 86 | +} |
0 commit comments