1 //===- AffineDataCopyGeneration.cpp - Explicit memref copying pass ------*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements a pass to automatically promote accessed memref regions 10 // to buffers in a faster memory space that is explicitly managed, with the 11 // necessary data movement operations performed through either regular 12 // point-wise load/store's or DMAs. Such explicit copying (also referred to as 13 // array packing/unpacking in the literature), when done on arrays that exhibit 14 // reuse, results in near elimination of conflict misses, TLB misses, reduced 15 // use of hardware prefetch streams, and reduced false sharing. It is also 16 // necessary for hardware that explicitly managed levels in the memory 17 // hierarchy, and where DMAs may have to be used. This optimization is often 18 // performed on already tiled code. 19 // 20 //===----------------------------------------------------------------------===// 21 22 #include "PassDetail.h" 23 #include "mlir/Analysis/Utils.h" 24 #include "mlir/Dialect/Affine/IR/AffineOps.h" 25 #include "mlir/Dialect/Affine/Passes.h" 26 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 27 #include "mlir/Dialect/MemRef/IR/MemRef.h" 28 #include "mlir/Dialect/StandardOps/IR/Ops.h" 29 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 30 #include "mlir/Transforms/LoopUtils.h" 31 #include "llvm/ADT/MapVector.h" 32 #include "llvm/Support/CommandLine.h" 33 #include "llvm/Support/Debug.h" 34 #include <algorithm> 35 36 #define DEBUG_TYPE "affine-data-copy-generate" 37 38 using namespace mlir; 39 40 namespace { 41 42 /// Replaces all loads and stores on memref's living in 'slowMemorySpace' by 43 /// introducing copy operations to transfer data into `fastMemorySpace` and 44 /// rewriting the original load's/store's to instead load/store from the 45 /// allocated fast memory buffers. Additional options specify the identifier 46 /// corresponding to the fast memory space and the amount of fast memory space 47 /// available. The pass traverses through the nesting structure, recursing to 48 /// inner levels if necessary to determine at what depth copies need to be 49 /// placed so that the allocated buffers fit within the memory capacity 50 /// provided. 51 // TODO: We currently can't generate copies correctly when stores 52 // are strided. Check for strided stores. 53 struct AffineDataCopyGeneration 54 : public AffineDataCopyGenerationBase<AffineDataCopyGeneration> { 55 AffineDataCopyGeneration() = default; 56 explicit AffineDataCopyGeneration(unsigned slowMemorySpace, 57 unsigned fastMemorySpace, 58 unsigned tagMemorySpace, 59 int minDmaTransferSize, 60 uint64_t fastMemCapacityBytes) { 61 this->slowMemorySpace = slowMemorySpace; 62 this->fastMemorySpace = fastMemorySpace; 63 this->tagMemorySpace = tagMemorySpace; 64 this->minDmaTransferSize = minDmaTransferSize; 65 this->fastMemoryCapacity = fastMemCapacityBytes / 1024; 66 } 67 68 void runOnFunction() override; 69 LogicalResult runOnBlock(Block *block, DenseSet<Operation *> ©Nests); 70 71 // Constant zero index to avoid too many duplicates. 72 Value zeroIndex = nullptr; 73 }; 74 75 } // end anonymous namespace 76 77 /// Generates copies for memref's living in 'slowMemorySpace' into newly created 78 /// buffers in 'fastMemorySpace', and replaces memory operations to the former 79 /// by the latter. Only load op's handled for now. 80 /// TODO: extend this to store op's. 81 std::unique_ptr<OperationPass<FuncOp>> mlir::createAffineDataCopyGenerationPass( 82 unsigned slowMemorySpace, unsigned fastMemorySpace, unsigned tagMemorySpace, 83 int minDmaTransferSize, uint64_t fastMemCapacityBytes) { 84 return std::make_unique<AffineDataCopyGeneration>( 85 slowMemorySpace, fastMemorySpace, tagMemorySpace, minDmaTransferSize, 86 fastMemCapacityBytes); 87 } 88 std::unique_ptr<OperationPass<FuncOp>> 89 mlir::createAffineDataCopyGenerationPass() { 90 return std::make_unique<AffineDataCopyGeneration>(); 91 } 92 93 /// Generate copies for this block. The block is partitioned into separate 94 /// ranges: each range is either a sequence of one or more operations starting 95 /// and ending with an affine load or store op, or just an affine.forop (which 96 /// could have other affine for op's nested within). 97 LogicalResult 98 AffineDataCopyGeneration::runOnBlock(Block *block, 99 DenseSet<Operation *> ©Nests) { 100 if (block->empty()) 101 return success(); 102 103 uint64_t fastMemCapacityBytes = 104 fastMemoryCapacity != std::numeric_limits<uint64_t>::max() 105 ? fastMemoryCapacity * 1024 106 : fastMemoryCapacity; 107 AffineCopyOptions copyOptions = {generateDma, slowMemorySpace, 108 fastMemorySpace, tagMemorySpace, 109 fastMemCapacityBytes}; 110 111 // Every affine.forop in the block starts and ends a block range for copying; 112 // in addition, a contiguous sequence of operations starting with a 113 // load/store op but not including any copy nests themselves is also 114 // identified as a copy block range. Straightline code (a contiguous chunk of 115 // operations excluding AffineForOp's) are always assumed to not exhaust 116 // memory. As a result, this approach is conservative in some cases at the 117 // moment; we do a check later and report an error with location info. 118 // TODO: An 'affine.if' operation is being treated similar to an 119 // operation. 'affine.if''s could have 'affine.for's in them; 120 // treat them separately. 121 122 // Get to the first load, store, or for op (that is not a copy nest itself). 123 auto curBegin = 124 std::find_if(block->begin(), block->end(), [&](Operation &op) { 125 return isa<AffineLoadOp, AffineStoreOp, AffineForOp>(op) && 126 copyNests.count(&op) == 0; 127 }); 128 129 // Create [begin, end) ranges. 130 auto it = curBegin; 131 while (it != block->end()) { 132 AffineForOp forOp; 133 // If you hit a non-copy for loop, we will split there. 134 if ((forOp = dyn_cast<AffineForOp>(&*it)) && copyNests.count(forOp) == 0) { 135 // Perform the copying up unti this 'for' op first. 136 affineDataCopyGenerate(/*begin=*/curBegin, /*end=*/it, copyOptions, 137 /*filterMemRef=*/llvm::None, copyNests); 138 139 // Returns true if the footprint is known to exceed capacity. 140 auto exceedsCapacity = [&](AffineForOp forOp) { 141 Optional<int64_t> footprint = 142 getMemoryFootprintBytes(forOp, 143 /*memorySpace=*/0); 144 return (footprint.hasValue() && 145 static_cast<uint64_t>(footprint.getValue()) > 146 fastMemCapacityBytes); 147 }; 148 149 // If the memory footprint of the 'affine.for' loop is higher than fast 150 // memory capacity (when provided), we recurse to copy at an inner level 151 // until we find a depth at which footprint fits in fast mem capacity. If 152 // the footprint can't be calculated, we assume for now it fits. Recurse 153 // inside if footprint for 'forOp' exceeds capacity, or when 154 // skipNonUnitStrideLoops is set and the step size is not one. 155 bool recurseInner = skipNonUnitStrideLoops ? forOp.getStep() != 1 156 : exceedsCapacity(forOp); 157 if (recurseInner) { 158 // We'll recurse and do the copies at an inner level for 'forInst'. 159 // Recurse onto the body of this loop. 160 (void)runOnBlock(forOp.getBody(), copyNests); 161 } else { 162 // We have enough capacity, i.e., copies will be computed for the 163 // portion of the block until 'it', and for 'it', which is 'forOp'. Note 164 // that for the latter, the copies are placed just before this loop (for 165 // incoming copies) and right after (for outgoing ones). 166 167 // Inner loop copies have their own scope - we don't thus update 168 // consumed capacity. The footprint check above guarantees this inner 169 // loop's footprint fits. 170 affineDataCopyGenerate(/*begin=*/it, /*end=*/std::next(it), copyOptions, 171 /*filterMemRef=*/llvm::None, copyNests); 172 } 173 // Get to the next load or store op after 'forOp'. 174 curBegin = std::find_if(std::next(it), block->end(), [&](Operation &op) { 175 return isa<AffineLoadOp, AffineStoreOp, AffineForOp>(op) && 176 copyNests.count(&op) == 0; 177 }); 178 it = curBegin; 179 } else { 180 assert(copyNests.count(&*it) == 0 && 181 "all copy nests generated should have been skipped above"); 182 // We simply include this op in the current range and continue for more. 183 ++it; 184 } 185 } 186 187 // Generate the copy for the final block range. 188 if (curBegin != block->end()) { 189 // Can't be a terminator because it would have been skipped above. 190 assert(!curBegin->hasTrait<OpTrait::IsTerminator>() && 191 "can't be a terminator"); 192 // Exclude the affine.yield - hence, the std::prev. 193 affineDataCopyGenerate(/*begin=*/curBegin, /*end=*/std::prev(block->end()), 194 copyOptions, /*filterMemRef=*/llvm::None, copyNests); 195 } 196 197 return success(); 198 } 199 200 void AffineDataCopyGeneration::runOnFunction() { 201 FuncOp f = getFunction(); 202 OpBuilder topBuilder(f.getBody()); 203 zeroIndex = topBuilder.create<arith::ConstantIndexOp>(f.getLoc(), 0); 204 205 // Nests that are copy-in's or copy-out's; the root AffineForOps of those 206 // nests are stored herein. 207 DenseSet<Operation *> copyNests; 208 209 // Clear recorded copy nests. 210 copyNests.clear(); 211 212 for (auto &block : f) 213 (void)runOnBlock(&block, copyNests); 214 215 // Promote any single iteration loops in the copy nests and collect 216 // load/stores to simplify. 217 SmallVector<Operation *, 4> copyOps; 218 for (Operation *nest : copyNests) 219 // With a post order walk, the erasure of loops does not affect 220 // continuation of the walk or the collection of load/store ops. 221 nest->walk([&](Operation *op) { 222 if (auto forOp = dyn_cast<AffineForOp>(op)) 223 (void)promoteIfSingleIteration(forOp); 224 else if (isa<AffineLoadOp, AffineStoreOp>(op)) 225 copyOps.push_back(op); 226 }); 227 228 // Promoting single iteration loops could lead to simplification of 229 // contained load's/store's, and the latter could anyway also be 230 // canonicalized. 231 RewritePatternSet patterns(&getContext()); 232 AffineLoadOp::getCanonicalizationPatterns(patterns, &getContext()); 233 AffineStoreOp::getCanonicalizationPatterns(patterns, &getContext()); 234 FrozenRewritePatternSet frozenPatterns(std::move(patterns)); 235 (void)applyOpPatternsAndFold(copyOps, frozenPatterns, /*strict=*/true); 236 } 237