1 //===- AffineDataCopyGeneration.cpp - Explicit memref copying pass ------*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements a pass to automatically promote accessed memref regions 10 // to buffers in a faster memory space that is explicitly managed, with the 11 // necessary data movement operations performed through either regular 12 // point-wise load/store's or DMAs. Such explicit copying (also referred to as 13 // array packing/unpacking in the literature), when done on arrays that exhibit 14 // reuse, results in near elimination of conflict misses, TLB misses, reduced 15 // use of hardware prefetch streams, and reduced false sharing. It is also 16 // necessary for hardware that explicitly managed levels in the memory 17 // hierarchy, and where DMAs may have to be used. This optimization is often 18 // performed on already tiled code. 19 // 20 //===----------------------------------------------------------------------===// 21 22 #include "PassDetail.h" 23 #include "mlir/Analysis/Utils.h" 24 #include "mlir/Dialect/Affine/IR/AffineOps.h" 25 #include "mlir/Dialect/Affine/Passes.h" 26 #include "mlir/Dialect/MemRef/IR/MemRef.h" 27 #include "mlir/Dialect/StandardOps/IR/Ops.h" 28 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 29 #include "mlir/Transforms/LoopUtils.h" 30 #include "llvm/ADT/MapVector.h" 31 #include "llvm/Support/CommandLine.h" 32 #include "llvm/Support/Debug.h" 33 #include <algorithm> 34 35 #define DEBUG_TYPE "affine-data-copy-generate" 36 37 using namespace mlir; 38 39 namespace { 40 41 /// Replaces all loads and stores on memref's living in 'slowMemorySpace' by 42 /// introducing copy operations to transfer data into `fastMemorySpace` and 43 /// rewriting the original load's/store's to instead load/store from the 44 /// allocated fast memory buffers. Additional options specify the identifier 45 /// corresponding to the fast memory space and the amount of fast memory space 46 /// available. The pass traverses through the nesting structure, recursing to 47 /// inner levels if necessary to determine at what depth copies need to be 48 /// placed so that the allocated buffers fit within the memory capacity 49 /// provided. 50 // TODO: We currently can't generate copies correctly when stores 51 // are strided. Check for strided stores. 52 struct AffineDataCopyGeneration 53 : public AffineDataCopyGenerationBase<AffineDataCopyGeneration> { 54 AffineDataCopyGeneration() = default; 55 explicit AffineDataCopyGeneration(unsigned slowMemorySpace, 56 unsigned fastMemorySpace, 57 unsigned tagMemorySpace, 58 int minDmaTransferSize, 59 uint64_t fastMemCapacityBytes) { 60 this->slowMemorySpace = slowMemorySpace; 61 this->fastMemorySpace = fastMemorySpace; 62 this->tagMemorySpace = tagMemorySpace; 63 this->minDmaTransferSize = minDmaTransferSize; 64 this->fastMemoryCapacity = fastMemCapacityBytes / 1024; 65 } 66 67 void runOnFunction() override; 68 LogicalResult runOnBlock(Block *block, DenseSet<Operation *> ©Nests); 69 70 // Constant zero index to avoid too many duplicates. 71 Value zeroIndex = nullptr; 72 }; 73 74 } // end anonymous namespace 75 76 /// Generates copies for memref's living in 'slowMemorySpace' into newly created 77 /// buffers in 'fastMemorySpace', and replaces memory operations to the former 78 /// by the latter. Only load op's handled for now. 79 /// TODO: extend this to store op's. 80 std::unique_ptr<OperationPass<FuncOp>> mlir::createAffineDataCopyGenerationPass( 81 unsigned slowMemorySpace, unsigned fastMemorySpace, unsigned tagMemorySpace, 82 int minDmaTransferSize, uint64_t fastMemCapacityBytes) { 83 return std::make_unique<AffineDataCopyGeneration>( 84 slowMemorySpace, fastMemorySpace, tagMemorySpace, minDmaTransferSize, 85 fastMemCapacityBytes); 86 } 87 std::unique_ptr<OperationPass<FuncOp>> 88 mlir::createAffineDataCopyGenerationPass() { 89 return std::make_unique<AffineDataCopyGeneration>(); 90 } 91 92 /// Generate copies for this block. The block is partitioned into separate 93 /// ranges: each range is either a sequence of one or more operations starting 94 /// and ending with an affine load or store op, or just an affine.forop (which 95 /// could have other affine for op's nested within). 96 LogicalResult 97 AffineDataCopyGeneration::runOnBlock(Block *block, 98 DenseSet<Operation *> ©Nests) { 99 if (block->empty()) 100 return success(); 101 102 uint64_t fastMemCapacityBytes = 103 fastMemoryCapacity != std::numeric_limits<uint64_t>::max() 104 ? fastMemoryCapacity * 1024 105 : fastMemoryCapacity; 106 AffineCopyOptions copyOptions = {generateDma, slowMemorySpace, 107 fastMemorySpace, tagMemorySpace, 108 fastMemCapacityBytes}; 109 110 // Every affine.forop in the block starts and ends a block range for copying; 111 // in addition, a contiguous sequence of operations starting with a 112 // load/store op but not including any copy nests themselves is also 113 // identified as a copy block range. Straightline code (a contiguous chunk of 114 // operations excluding AffineForOp's) are always assumed to not exhaust 115 // memory. As a result, this approach is conservative in some cases at the 116 // moment; we do a check later and report an error with location info. 117 // TODO: An 'affine.if' operation is being treated similar to an 118 // operation. 'affine.if''s could have 'affine.for's in them; 119 // treat them separately. 120 121 // Get to the first load, store, or for op (that is not a copy nest itself). 122 auto curBegin = 123 std::find_if(block->begin(), block->end(), [&](Operation &op) { 124 return isa<AffineLoadOp, AffineStoreOp, AffineForOp>(op) && 125 copyNests.count(&op) == 0; 126 }); 127 128 // Create [begin, end) ranges. 129 auto it = curBegin; 130 while (it != block->end()) { 131 AffineForOp forOp; 132 // If you hit a non-copy for loop, we will split there. 133 if ((forOp = dyn_cast<AffineForOp>(&*it)) && copyNests.count(forOp) == 0) { 134 // Perform the copying up unti this 'for' op first. 135 affineDataCopyGenerate(/*begin=*/curBegin, /*end=*/it, copyOptions, 136 /*filterMemRef=*/llvm::None, copyNests); 137 138 // Returns true if the footprint is known to exceed capacity. 139 auto exceedsCapacity = [&](AffineForOp forOp) { 140 Optional<int64_t> footprint = 141 getMemoryFootprintBytes(forOp, 142 /*memorySpace=*/0); 143 return (footprint.hasValue() && 144 static_cast<uint64_t>(footprint.getValue()) > 145 fastMemCapacityBytes); 146 }; 147 148 // If the memory footprint of the 'affine.for' loop is higher than fast 149 // memory capacity (when provided), we recurse to copy at an inner level 150 // until we find a depth at which footprint fits in fast mem capacity. If 151 // the footprint can't be calculated, we assume for now it fits. Recurse 152 // inside if footprint for 'forOp' exceeds capacity, or when 153 // skipNonUnitStrideLoops is set and the step size is not one. 154 bool recurseInner = skipNonUnitStrideLoops ? forOp.getStep() != 1 155 : exceedsCapacity(forOp); 156 if (recurseInner) { 157 // We'll recurse and do the copies at an inner level for 'forInst'. 158 // Recurse onto the body of this loop. 159 (void)runOnBlock(forOp.getBody(), copyNests); 160 } else { 161 // We have enough capacity, i.e., copies will be computed for the 162 // portion of the block until 'it', and for 'it', which is 'forOp'. Note 163 // that for the latter, the copies are placed just before this loop (for 164 // incoming copies) and right after (for outgoing ones). 165 166 // Inner loop copies have their own scope - we don't thus update 167 // consumed capacity. The footprint check above guarantees this inner 168 // loop's footprint fits. 169 affineDataCopyGenerate(/*begin=*/it, /*end=*/std::next(it), copyOptions, 170 /*filterMemRef=*/llvm::None, copyNests); 171 } 172 // Get to the next load or store op after 'forOp'. 173 curBegin = std::find_if(std::next(it), block->end(), [&](Operation &op) { 174 return isa<AffineLoadOp, AffineStoreOp, AffineForOp>(op) && 175 copyNests.count(&op) == 0; 176 }); 177 it = curBegin; 178 } else { 179 assert(copyNests.count(&*it) == 0 && 180 "all copy nests generated should have been skipped above"); 181 // We simply include this op in the current range and continue for more. 182 ++it; 183 } 184 } 185 186 // Generate the copy for the final block range. 187 if (curBegin != block->end()) { 188 // Can't be a terminator because it would have been skipped above. 189 assert(!curBegin->hasTrait<OpTrait::IsTerminator>() && 190 "can't be a terminator"); 191 // Exclude the affine.yield - hence, the std::prev. 192 affineDataCopyGenerate(/*begin=*/curBegin, /*end=*/std::prev(block->end()), 193 copyOptions, /*filterMemRef=*/llvm::None, copyNests); 194 } 195 196 return success(); 197 } 198 199 void AffineDataCopyGeneration::runOnFunction() { 200 FuncOp f = getFunction(); 201 OpBuilder topBuilder(f.getBody()); 202 zeroIndex = topBuilder.create<ConstantIndexOp>(f.getLoc(), 0); 203 204 // Nests that are copy-in's or copy-out's; the root AffineForOps of those 205 // nests are stored herein. 206 DenseSet<Operation *> copyNests; 207 208 // Clear recorded copy nests. 209 copyNests.clear(); 210 211 for (auto &block : f) 212 (void)runOnBlock(&block, copyNests); 213 214 // Promote any single iteration loops in the copy nests and collect 215 // load/stores to simplify. 216 SmallVector<Operation *, 4> copyOps; 217 for (Operation *nest : copyNests) 218 // With a post order walk, the erasure of loops does not affect 219 // continuation of the walk or the collection of load/store ops. 220 nest->walk([&](Operation *op) { 221 if (auto forOp = dyn_cast<AffineForOp>(op)) 222 (void)promoteIfSingleIteration(forOp); 223 else if (isa<AffineLoadOp, AffineStoreOp>(op)) 224 copyOps.push_back(op); 225 }); 226 227 // Promoting single iteration loops could lead to simplification of 228 // contained load's/store's, and the latter could anyway also be 229 // canonicalized. 230 RewritePatternSet patterns(&getContext()); 231 AffineLoadOp::getCanonicalizationPatterns(patterns, &getContext()); 232 AffineStoreOp::getCanonicalizationPatterns(patterns, &getContext()); 233 FrozenRewritePatternSet frozenPatterns(std::move(patterns)); 234 (void)applyOpPatternsAndFold(copyOps, frozenPatterns, /*strict=*/true); 235 } 236