1 //===- MemoryPromotion.cpp - Utilities for moving data across GPU memories ===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements utilities that allow one to create IR moving the data 10 // across different levels of the GPU memory hierarchy. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "mlir/Dialect/GPU/MemoryPromotion.h" 15 #include "mlir/Dialect/GPU/GPUDialect.h" 16 #include "mlir/Dialect/LoopOps/LoopOps.h" 17 #include "mlir/EDSC/Builders.h" 18 #include "mlir/EDSC/Helpers.h" 19 #include "mlir/Pass/Pass.h" 20 #include "mlir/Support/Functional.h" 21 #include "mlir/Transforms/LoopUtils.h" 22 23 using namespace mlir; 24 using namespace mlir::gpu; 25 26 /// Returns the textual name of a GPU dimension. 27 static StringRef getDimName(unsigned dim) { 28 if (dim == 0) 29 return "x"; 30 if (dim == 1) 31 return "y"; 32 if (dim == 2) 33 return "z"; 34 35 llvm_unreachable("dimension ID overflow"); 36 } 37 38 /// Emits the (imperfect) loop nest performing the copy between "from" and "to" 39 /// values using the bounds derived from the "from" value. Emits at least 40 /// GPUDialect::getNumWorkgroupDimensions() loops, completing the nest with 41 /// single-iteration loops. Maps the innermost loops to thread dimensions, in 42 /// reverse order to enable access coalescing in the innermost loop. 43 static void insertCopyLoops(OpBuilder &builder, Location loc, 44 edsc::MemRefView &bounds, Value from, Value to) { 45 // Create EDSC handles for bounds. 46 unsigned rank = bounds.rank(); 47 SmallVector<edsc::ValueHandle, 4> lbs, ubs, steps; 48 49 // Make sure we have enough loops to use all thread dimensions, these trivial 50 // loops should be outermost and therefore inserted first. 51 if (rank < GPUDialect::getNumWorkgroupDimensions()) { 52 unsigned extraLoops = GPUDialect::getNumWorkgroupDimensions() - rank; 53 edsc::ValueHandle zero = edsc::intrinsics::constant_index(0); 54 edsc::ValueHandle one = edsc::intrinsics::constant_index(1); 55 lbs.resize(extraLoops, zero); 56 ubs.resize(extraLoops, one); 57 steps.resize(extraLoops, one); 58 } 59 60 // Add existing bonuds. 61 lbs.append(bounds.getLbs().begin(), bounds.getLbs().end()); 62 ubs.append(bounds.getUbs().begin(), bounds.getUbs().end()); 63 64 // Emit constant operations for steps. 65 steps.reserve(lbs.size()); 66 llvm::transform( 67 bounds.getSteps(), std::back_inserter(steps), 68 [](int64_t step) { return edsc::intrinsics::constant_index(step); }); 69 70 // Obtain thread identifiers and block sizes, necessary to map to them. 71 auto indexType = builder.getIndexType(); 72 SmallVector<Value, 3> threadIds, blockDims; 73 for (unsigned i = 0; i < 3; ++i) { 74 auto dimName = builder.getStringAttr(getDimName(i)); 75 threadIds.push_back( 76 builder.create<gpu::ThreadIdOp>(loc, indexType, dimName)); 77 blockDims.push_back( 78 builder.create<gpu::BlockDimOp>(loc, indexType, dimName)); 79 } 80 81 // Produce the loop nest with copies. 82 auto ivs = edsc::makeIndexHandles(lbs.size()); 83 auto ivPtrs = 84 edsc::makeHandlePointers(MutableArrayRef<edsc::IndexHandle>(ivs)); 85 edsc::LoopNestBuilder(ivPtrs, lbs, ubs, steps)([&]() { 86 auto activeIvs = llvm::makeArrayRef(ivs).take_back(rank); 87 edsc::StdIndexedValue fromHandle(from), toHandle(to); 88 toHandle(activeIvs) = fromHandle(activeIvs); 89 }); 90 91 // Map the innermost loops to threads in reverse order. 92 for (auto en : 93 llvm::enumerate(llvm::reverse(llvm::makeArrayRef(ivs).take_back( 94 GPUDialect::getNumWorkgroupDimensions())))) { 95 auto loop = cast<loop::ForOp>( 96 en.value().getValue().getParentRegion()->getParentOp()); 97 mapLoopToProcessorIds(loop, {threadIds[en.index()]}, 98 {blockDims[en.index()]}); 99 } 100 } 101 102 /// Emits the loop nests performing the copy to the designated location in the 103 /// beginning of the region, and from the designated location immediately before 104 /// the terminator of the first block of the region. The region is expected to 105 /// have one block. This boils down to the following structure 106 /// 107 /// ^bb(...): 108 /// <loop-bound-computation> 109 /// for %arg0 = ... to ... step ... { 110 /// ... 111 /// for %argN = <thread-id-x> to ... step <block-dim-x> { 112 /// %0 = load %from[%arg0, ..., %argN] 113 /// store %0, %to[%arg0, ..., %argN] 114 /// } 115 /// ... 116 /// } 117 /// gpu.barrier 118 /// <... original body ...> 119 /// gpu.barrier 120 /// for %arg0 = ... to ... step ... { 121 /// ... 122 /// for %argN = <thread-id-x> to ... step <block-dim-x> { 123 /// %1 = load %to[%arg0, ..., %argN] 124 /// store %1, %from[%arg0, ..., %argN] 125 /// } 126 /// ... 127 /// } 128 /// 129 /// Inserts the barriers unconditionally since different threads may be copying 130 /// values and reading them. An analysis would be required to eliminate barriers 131 /// in case where value is only used by the thread that copies it. Both copies 132 /// are inserted unconditionally, an analysis would be required to only copy 133 /// live-in and live-out values when necessary. This copies the entire memref 134 /// pointed to by "from". In case a smaller block would be sufficient, the 135 /// caller can create a subview of the memref and promote it instead. 136 static void insertCopies(Region ®ion, Location loc, Value from, Value to) { 137 auto fromType = from.getType().cast<MemRefType>(); 138 auto toType = to.getType().cast<MemRefType>(); 139 (void)fromType; 140 (void)toType; 141 assert(fromType.getShape() == toType.getShape()); 142 assert(fromType.getRank() != 0); 143 assert(has_single_element(region) && 144 "unstructured control flow not supported"); 145 146 OpBuilder builder(region.getContext()); 147 builder.setInsertionPointToStart(®ion.front()); 148 149 edsc::ScopedContext edscContext(builder, loc); 150 edsc::MemRefView fromView(from); 151 insertCopyLoops(builder, loc, fromView, from, to); 152 builder.create<gpu::BarrierOp>(loc); 153 154 builder.setInsertionPoint(®ion.front().back()); 155 builder.create<gpu::BarrierOp>(loc); 156 insertCopyLoops(builder, loc, fromView, to, from); 157 } 158 159 /// Promotes a function argument to workgroup memory in the given function. The 160 /// copies will be inserted in the beginning and in the end of the function. 161 void mlir::promoteToWorkgroupMemory(GPUFuncOp op, unsigned arg) { 162 Value value = op.getArgument(arg); 163 auto type = value.getType().dyn_cast<MemRefType>(); 164 assert(type && type.hasStaticShape() && "can only promote memrefs"); 165 166 Value attribution = 167 op.addWorkgroupAttribution(type.getShape(), type.getElementType()); 168 169 // Replace the uses first since only the original uses are currently present. 170 // Then insert the copies. 171 value.replaceAllUsesWith(attribution); 172 insertCopies(op.getBody(), op.getLoc(), value, attribution); 173 } 174