1 //===- MemoryPromotion.cpp - Utilities for moving data across GPU memories ===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements utilities that allow one to create IR moving the data 10 // across different levels of the GPU memory hierarchy. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "mlir/Dialect/GPU/MemoryPromotion.h" 15 #include "mlir/Dialect/GPU/GPUDialect.h" 16 #include "mlir/Dialect/MemRef/IR/MemRef.h" 17 #include "mlir/Dialect/SCF/SCF.h" 18 #include "mlir/Dialect/StandardOps/IR/Ops.h" 19 #include "mlir/IR/ImplicitLocOpBuilder.h" 20 #include "mlir/Pass/Pass.h" 21 #include "mlir/Transforms/LoopUtils.h" 22 23 using namespace mlir; 24 using namespace mlir::gpu; 25 26 /// Returns the textual name of a GPU dimension. 27 static StringRef getDimName(unsigned dim) { 28 if (dim == 0) 29 return "x"; 30 if (dim == 1) 31 return "y"; 32 if (dim == 2) 33 return "z"; 34 35 llvm_unreachable("dimension ID overflow"); 36 } 37 38 /// Emits the (imperfect) loop nest performing the copy between "from" and "to" 39 /// values using the bounds derived from the "from" value. Emits at least 40 /// GPUDialect::getNumWorkgroupDimensions() loops, completing the nest with 41 /// single-iteration loops. Maps the innermost loops to thread dimensions, in 42 /// reverse order to enable access coalescing in the innermost loop. 43 static void insertCopyLoops(ImplicitLocOpBuilder &b, Value from, Value to) { 44 auto memRefType = from.getType().cast<MemRefType>(); 45 auto rank = memRefType.getRank(); 46 47 SmallVector<Value, 4> lbs, ubs, steps; 48 Value zero = b.create<ConstantIndexOp>(0); 49 Value one = b.create<ConstantIndexOp>(1); 50 51 // Make sure we have enough loops to use all thread dimensions, these trivial 52 // loops should be outermost and therefore inserted first. 53 if (rank < GPUDialect::getNumWorkgroupDimensions()) { 54 unsigned extraLoops = GPUDialect::getNumWorkgroupDimensions() - rank; 55 lbs.resize(extraLoops, zero); 56 ubs.resize(extraLoops, one); 57 steps.resize(extraLoops, one); 58 } 59 60 // Add existing bounds. 61 lbs.append(rank, zero); 62 ubs.reserve(lbs.size()); 63 steps.reserve(lbs.size()); 64 for (auto idx = 0; idx < rank; ++idx) { 65 ubs.push_back( 66 b.createOrFold<memref::DimOp>(from, b.create<ConstantIndexOp>(idx))); 67 steps.push_back(one); 68 } 69 70 // Obtain thread identifiers and block sizes, necessary to map to them. 71 auto indexType = b.getIndexType(); 72 SmallVector<Value, 3> threadIds, blockDims; 73 for (unsigned i = 0; i < 3; ++i) { 74 auto dimName = b.getStringAttr(getDimName(i)); 75 threadIds.push_back(b.create<gpu::ThreadIdOp>(indexType, dimName)); 76 blockDims.push_back(b.create<gpu::BlockDimOp>(indexType, dimName)); 77 } 78 79 // Produce the loop nest with copies. 80 SmallVector<Value, 8> ivs(lbs.size()); 81 mlir::scf::buildLoopNest( 82 b, b.getLoc(), lbs, ubs, steps, 83 [&](OpBuilder &b, Location loc, ValueRange loopIvs) { 84 ivs.assign(loopIvs.begin(), loopIvs.end()); 85 auto activeIvs = llvm::makeArrayRef(ivs).take_back(rank); 86 Value loaded = b.create<memref::LoadOp>(loc, from, activeIvs); 87 b.create<memref::StoreOp>(loc, loaded, to, activeIvs); 88 }); 89 90 // Map the innermost loops to threads in reverse order. 91 for (auto en : 92 llvm::enumerate(llvm::reverse(llvm::makeArrayRef(ivs).take_back( 93 GPUDialect::getNumWorkgroupDimensions())))) { 94 Value v = en.value(); 95 auto loop = cast<scf::ForOp>(v.getParentRegion()->getParentOp()); 96 mapLoopToProcessorIds(loop, {threadIds[en.index()]}, 97 {blockDims[en.index()]}); 98 } 99 } 100 101 /// Emits the loop nests performing the copy to the designated location in the 102 /// beginning of the region, and from the designated location immediately before 103 /// the terminator of the first block of the region. The region is expected to 104 /// have one block. This boils down to the following structure 105 /// 106 /// ^bb(...): 107 /// <loop-bound-computation> 108 /// for %arg0 = ... to ... step ... { 109 /// ... 110 /// for %argN = <thread-id-x> to ... step <block-dim-x> { 111 /// %0 = load %from[%arg0, ..., %argN] 112 /// store %0, %to[%arg0, ..., %argN] 113 /// } 114 /// ... 115 /// } 116 /// gpu.barrier 117 /// <... original body ...> 118 /// gpu.barrier 119 /// for %arg0 = ... to ... step ... { 120 /// ... 121 /// for %argN = <thread-id-x> to ... step <block-dim-x> { 122 /// %1 = load %to[%arg0, ..., %argN] 123 /// store %1, %from[%arg0, ..., %argN] 124 /// } 125 /// ... 126 /// } 127 /// 128 /// Inserts the barriers unconditionally since different threads may be copying 129 /// values and reading them. An analysis would be required to eliminate barriers 130 /// in case where value is only used by the thread that copies it. Both copies 131 /// are inserted unconditionally, an analysis would be required to only copy 132 /// live-in and live-out values when necessary. This copies the entire memref 133 /// pointed to by "from". In case a smaller block would be sufficient, the 134 /// caller can create a subview of the memref and promote it instead. 135 static void insertCopies(Region ®ion, Location loc, Value from, Value to) { 136 auto fromType = from.getType().cast<MemRefType>(); 137 auto toType = to.getType().cast<MemRefType>(); 138 (void)fromType; 139 (void)toType; 140 assert(fromType.getShape() == toType.getShape()); 141 assert(fromType.getRank() != 0); 142 assert(llvm::hasSingleElement(region) && 143 "unstructured control flow not supported"); 144 145 auto b = ImplicitLocOpBuilder::atBlockBegin(loc, ®ion.front()); 146 insertCopyLoops(b, from, to); 147 b.create<gpu::BarrierOp>(); 148 149 b.setInsertionPoint(®ion.front().back()); 150 b.create<gpu::BarrierOp>(); 151 insertCopyLoops(b, to, from); 152 } 153 154 /// Promotes a function argument to workgroup memory in the given function. The 155 /// copies will be inserted in the beginning and in the end of the function. 156 void mlir::promoteToWorkgroupMemory(GPUFuncOp op, unsigned arg) { 157 Value value = op.getArgument(arg); 158 auto type = value.getType().dyn_cast<MemRefType>(); 159 assert(type && type.hasStaticShape() && "can only promote memrefs"); 160 161 // Get the type of the buffer in the workgroup memory. 162 int workgroupMemoryAddressSpace = gpu::GPUDialect::getWorkgroupAddressSpace(); 163 auto bufferType = MemRefType::get(type.getShape(), type.getElementType(), {}, 164 workgroupMemoryAddressSpace); 165 166 Value attribution = op.addWorkgroupAttribution(bufferType); 167 168 // Replace the uses first since only the original uses are currently present. 169 // Then insert the copies. 170 value.replaceAllUsesWith(attribution); 171 insertCopies(op.getBody(), op.getLoc(), value, attribution); 172 } 173