1 //===- MemRefToLLVM.cpp - MemRef to LLVM dialect conversion ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h" 10 #include "../PassDetail.h" 11 #include "mlir/Analysis/DataLayoutAnalysis.h" 12 #include "mlir/Conversion/LLVMCommon/ConversionTarget.h" 13 #include "mlir/Conversion/LLVMCommon/Pattern.h" 14 #include "mlir/Conversion/LLVMCommon/TypeConverter.h" 15 #include "mlir/Conversion/MemRefToLLVM/AllocLikeConversion.h" 16 #include "mlir/Dialect/LLVMIR/FunctionCallUtils.h" 17 #include "mlir/Dialect/LLVMIR/LLVMDialect.h" 18 #include "mlir/Dialect/MemRef/IR/MemRef.h" 19 #include "mlir/IR/AffineMap.h" 20 #include "mlir/IR/BlockAndValueMapping.h" 21 22 using namespace mlir; 23 24 namespace { 25 26 struct AllocOpLowering : public AllocLikeOpLLVMLowering { 27 AllocOpLowering(LLVMTypeConverter &converter) 28 : AllocLikeOpLLVMLowering(memref::AllocOp::getOperationName(), 29 converter) {} 30 31 std::tuple<Value, Value> allocateBuffer(ConversionPatternRewriter &rewriter, 32 Location loc, Value sizeBytes, 33 Operation *op) const override { 34 // Heap allocations. 35 memref::AllocOp allocOp = cast<memref::AllocOp>(op); 36 MemRefType memRefType = allocOp.getType(); 37 38 Value alignment; 39 if (auto alignmentAttr = allocOp.alignment()) { 40 alignment = createIndexConstant(rewriter, loc, *alignmentAttr); 41 } else if (!memRefType.getElementType().isSignlessIntOrIndexOrFloat()) { 42 // In the case where no alignment is specified, we may want to override 43 // `malloc's` behavior. `malloc` typically aligns at the size of the 44 // biggest scalar on a target HW. For non-scalars, use the natural 45 // alignment of the LLVM type given by the LLVM DataLayout. 46 alignment = getSizeInBytes(loc, memRefType.getElementType(), rewriter); 47 } 48 49 if (alignment) { 50 // Adjust the allocation size to consider alignment. 51 sizeBytes = rewriter.create<LLVM::AddOp>(loc, sizeBytes, alignment); 52 } 53 54 // Allocate the underlying buffer and store a pointer to it in the MemRef 55 // descriptor. 56 Type elementPtrType = this->getElementPtrType(memRefType); 57 auto allocFuncOp = LLVM::lookupOrCreateMallocFn( 58 allocOp->getParentOfType<ModuleOp>(), getIndexType()); 59 auto results = createLLVMCall(rewriter, loc, allocFuncOp, {sizeBytes}, 60 getVoidPtrType()); 61 Value allocatedPtr = 62 rewriter.create<LLVM::BitcastOp>(loc, elementPtrType, results[0]); 63 64 Value alignedPtr = allocatedPtr; 65 if (alignment) { 66 // Compute the aligned type pointer. 67 Value allocatedInt = 68 rewriter.create<LLVM::PtrToIntOp>(loc, getIndexType(), allocatedPtr); 69 Value alignmentInt = 70 createAligned(rewriter, loc, allocatedInt, alignment); 71 alignedPtr = 72 rewriter.create<LLVM::IntToPtrOp>(loc, elementPtrType, alignmentInt); 73 } 74 75 return std::make_tuple(allocatedPtr, alignedPtr); 76 } 77 }; 78 79 struct AlignedAllocOpLowering : public AllocLikeOpLLVMLowering { 80 AlignedAllocOpLowering(LLVMTypeConverter &converter) 81 : AllocLikeOpLLVMLowering(memref::AllocOp::getOperationName(), 82 converter) {} 83 84 /// Returns the memref's element size in bytes using the data layout active at 85 /// `op`. 86 // TODO: there are other places where this is used. Expose publicly? 87 unsigned getMemRefEltSizeInBytes(MemRefType memRefType, Operation *op) const { 88 const DataLayout *layout = &defaultLayout; 89 if (const DataLayoutAnalysis *analysis = 90 getTypeConverter()->getDataLayoutAnalysis()) { 91 layout = &analysis->getAbove(op); 92 } 93 Type elementType = memRefType.getElementType(); 94 if (auto memRefElementType = elementType.dyn_cast<MemRefType>()) 95 return getTypeConverter()->getMemRefDescriptorSize(memRefElementType, 96 *layout); 97 if (auto memRefElementType = elementType.dyn_cast<UnrankedMemRefType>()) 98 return getTypeConverter()->getUnrankedMemRefDescriptorSize( 99 memRefElementType, *layout); 100 return layout->getTypeSize(elementType); 101 } 102 103 /// Returns true if the memref size in bytes is known to be a multiple of 104 /// factor assuming the data layout active at `op`. 105 bool isMemRefSizeMultipleOf(MemRefType type, uint64_t factor, 106 Operation *op) const { 107 uint64_t sizeDivisor = getMemRefEltSizeInBytes(type, op); 108 for (unsigned i = 0, e = type.getRank(); i < e; i++) { 109 if (type.isDynamic(type.getDimSize(i))) 110 continue; 111 sizeDivisor = sizeDivisor * type.getDimSize(i); 112 } 113 return sizeDivisor % factor == 0; 114 } 115 116 /// Returns the alignment to be used for the allocation call itself. 117 /// aligned_alloc requires the allocation size to be a power of two, and the 118 /// allocation size to be a multiple of alignment, 119 int64_t getAllocationAlignment(memref::AllocOp allocOp) const { 120 if (Optional<uint64_t> alignment = allocOp.alignment()) 121 return *alignment; 122 123 // Whenever we don't have alignment set, we will use an alignment 124 // consistent with the element type; since the allocation size has to be a 125 // power of two, we will bump to the next power of two if it already isn't. 126 auto eltSizeBytes = getMemRefEltSizeInBytes(allocOp.getType(), allocOp); 127 return std::max(kMinAlignedAllocAlignment, 128 llvm::PowerOf2Ceil(eltSizeBytes)); 129 } 130 131 std::tuple<Value, Value> allocateBuffer(ConversionPatternRewriter &rewriter, 132 Location loc, Value sizeBytes, 133 Operation *op) const override { 134 // Heap allocations. 135 memref::AllocOp allocOp = cast<memref::AllocOp>(op); 136 MemRefType memRefType = allocOp.getType(); 137 int64_t alignment = getAllocationAlignment(allocOp); 138 Value allocAlignment = createIndexConstant(rewriter, loc, alignment); 139 140 // aligned_alloc requires size to be a multiple of alignment; we will pad 141 // the size to the next multiple if necessary. 142 if (!isMemRefSizeMultipleOf(memRefType, alignment, op)) 143 sizeBytes = createAligned(rewriter, loc, sizeBytes, allocAlignment); 144 145 Type elementPtrType = this->getElementPtrType(memRefType); 146 auto allocFuncOp = LLVM::lookupOrCreateAlignedAllocFn( 147 allocOp->getParentOfType<ModuleOp>(), getIndexType()); 148 auto results = 149 createLLVMCall(rewriter, loc, allocFuncOp, {allocAlignment, sizeBytes}, 150 getVoidPtrType()); 151 Value allocatedPtr = 152 rewriter.create<LLVM::BitcastOp>(loc, elementPtrType, results[0]); 153 154 return std::make_tuple(allocatedPtr, allocatedPtr); 155 } 156 157 /// The minimum alignment to use with aligned_alloc (has to be a power of 2). 158 static constexpr uint64_t kMinAlignedAllocAlignment = 16UL; 159 160 /// Default layout to use in absence of the corresponding analysis. 161 DataLayout defaultLayout; 162 }; 163 164 // Out of line definition, required till C++17. 165 constexpr uint64_t AlignedAllocOpLowering::kMinAlignedAllocAlignment; 166 167 struct AllocaOpLowering : public AllocLikeOpLLVMLowering { 168 AllocaOpLowering(LLVMTypeConverter &converter) 169 : AllocLikeOpLLVMLowering(memref::AllocaOp::getOperationName(), 170 converter) {} 171 172 /// Allocates the underlying buffer using the right call. `allocatedBytePtr` 173 /// is set to null for stack allocations. `accessAlignment` is set if 174 /// alignment is needed post allocation (for eg. in conjunction with malloc). 175 std::tuple<Value, Value> allocateBuffer(ConversionPatternRewriter &rewriter, 176 Location loc, Value sizeBytes, 177 Operation *op) const override { 178 179 // With alloca, one gets a pointer to the element type right away. 180 // For stack allocations. 181 auto allocaOp = cast<memref::AllocaOp>(op); 182 auto elementPtrType = this->getElementPtrType(allocaOp.getType()); 183 184 auto allocatedElementPtr = rewriter.create<LLVM::AllocaOp>( 185 loc, elementPtrType, sizeBytes, 186 allocaOp.alignment() ? *allocaOp.alignment() : 0); 187 188 return std::make_tuple(allocatedElementPtr, allocatedElementPtr); 189 } 190 }; 191 192 struct AllocaScopeOpLowering 193 : public ConvertOpToLLVMPattern<memref::AllocaScopeOp> { 194 using ConvertOpToLLVMPattern<memref::AllocaScopeOp>::ConvertOpToLLVMPattern; 195 196 LogicalResult 197 matchAndRewrite(memref::AllocaScopeOp allocaScopeOp, OpAdaptor adaptor, 198 ConversionPatternRewriter &rewriter) const override { 199 OpBuilder::InsertionGuard guard(rewriter); 200 Location loc = allocaScopeOp.getLoc(); 201 202 // Split the current block before the AllocaScopeOp to create the inlining 203 // point. 204 auto *currentBlock = rewriter.getInsertionBlock(); 205 auto *remainingOpsBlock = 206 rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint()); 207 Block *continueBlock; 208 if (allocaScopeOp.getNumResults() == 0) { 209 continueBlock = remainingOpsBlock; 210 } else { 211 continueBlock = rewriter.createBlock(remainingOpsBlock, 212 allocaScopeOp.getResultTypes()); 213 rewriter.create<LLVM::BrOp>(loc, ValueRange(), remainingOpsBlock); 214 } 215 216 // Inline body region. 217 Block *beforeBody = &allocaScopeOp.bodyRegion().front(); 218 Block *afterBody = &allocaScopeOp.bodyRegion().back(); 219 rewriter.inlineRegionBefore(allocaScopeOp.bodyRegion(), continueBlock); 220 221 // Save stack and then branch into the body of the region. 222 rewriter.setInsertionPointToEnd(currentBlock); 223 auto stackSaveOp = 224 rewriter.create<LLVM::StackSaveOp>(loc, getVoidPtrType()); 225 rewriter.create<LLVM::BrOp>(loc, ValueRange(), beforeBody); 226 227 // Replace the alloca_scope return with a branch that jumps out of the body. 228 // Stack restore before leaving the body region. 229 rewriter.setInsertionPointToEnd(afterBody); 230 auto returnOp = 231 cast<memref::AllocaScopeReturnOp>(afterBody->getTerminator()); 232 auto branchOp = rewriter.replaceOpWithNewOp<LLVM::BrOp>( 233 returnOp, returnOp.results(), continueBlock); 234 235 // Insert stack restore before jumping out the body of the region. 236 rewriter.setInsertionPoint(branchOp); 237 rewriter.create<LLVM::StackRestoreOp>(loc, stackSaveOp); 238 239 // Replace the op with values return from the body region. 240 rewriter.replaceOp(allocaScopeOp, continueBlock->getArguments()); 241 242 return success(); 243 } 244 }; 245 246 struct AssumeAlignmentOpLowering 247 : public ConvertOpToLLVMPattern<memref::AssumeAlignmentOp> { 248 using ConvertOpToLLVMPattern< 249 memref::AssumeAlignmentOp>::ConvertOpToLLVMPattern; 250 251 LogicalResult 252 matchAndRewrite(memref::AssumeAlignmentOp op, OpAdaptor adaptor, 253 ConversionPatternRewriter &rewriter) const override { 254 Value memref = adaptor.memref(); 255 unsigned alignment = op.alignment(); 256 auto loc = op.getLoc(); 257 258 MemRefDescriptor memRefDescriptor(memref); 259 Value ptr = memRefDescriptor.alignedPtr(rewriter, memref.getLoc()); 260 261 // Emit llvm.assume(memref.alignedPtr & (alignment - 1) == 0). Notice that 262 // the asserted memref.alignedPtr isn't used anywhere else, as the real 263 // users like load/store/views always re-extract memref.alignedPtr as they 264 // get lowered. 265 // 266 // This relies on LLVM's CSE optimization (potentially after SROA), since 267 // after CSE all memref.alignedPtr instances get de-duplicated into the same 268 // pointer SSA value. 269 auto intPtrType = 270 getIntPtrType(memRefDescriptor.getElementPtrType().getAddressSpace()); 271 Value zero = createIndexAttrConstant(rewriter, loc, intPtrType, 0); 272 Value mask = 273 createIndexAttrConstant(rewriter, loc, intPtrType, alignment - 1); 274 Value ptrValue = rewriter.create<LLVM::PtrToIntOp>(loc, intPtrType, ptr); 275 rewriter.create<LLVM::AssumeOp>( 276 loc, rewriter.create<LLVM::ICmpOp>( 277 loc, LLVM::ICmpPredicate::eq, 278 rewriter.create<LLVM::AndOp>(loc, ptrValue, mask), zero)); 279 280 rewriter.eraseOp(op); 281 return success(); 282 } 283 }; 284 285 // A `dealloc` is converted into a call to `free` on the underlying data buffer. 286 // The memref descriptor being an SSA value, there is no need to clean it up 287 // in any way. 288 struct DeallocOpLowering : public ConvertOpToLLVMPattern<memref::DeallocOp> { 289 using ConvertOpToLLVMPattern<memref::DeallocOp>::ConvertOpToLLVMPattern; 290 291 explicit DeallocOpLowering(LLVMTypeConverter &converter) 292 : ConvertOpToLLVMPattern<memref::DeallocOp>(converter) {} 293 294 LogicalResult 295 matchAndRewrite(memref::DeallocOp op, OpAdaptor adaptor, 296 ConversionPatternRewriter &rewriter) const override { 297 // Insert the `free` declaration if it is not already present. 298 auto freeFunc = LLVM::lookupOrCreateFreeFn(op->getParentOfType<ModuleOp>()); 299 MemRefDescriptor memref(adaptor.memref()); 300 Value casted = rewriter.create<LLVM::BitcastOp>( 301 op.getLoc(), getVoidPtrType(), 302 memref.allocatedPtr(rewriter, op.getLoc())); 303 rewriter.replaceOpWithNewOp<LLVM::CallOp>( 304 op, TypeRange(), SymbolRefAttr::get(freeFunc), casted); 305 return success(); 306 } 307 }; 308 309 // A `dim` is converted to a constant for static sizes and to an access to the 310 // size stored in the memref descriptor for dynamic sizes. 311 struct DimOpLowering : public ConvertOpToLLVMPattern<memref::DimOp> { 312 using ConvertOpToLLVMPattern<memref::DimOp>::ConvertOpToLLVMPattern; 313 314 LogicalResult 315 matchAndRewrite(memref::DimOp dimOp, OpAdaptor adaptor, 316 ConversionPatternRewriter &rewriter) const override { 317 Type operandType = dimOp.source().getType(); 318 if (operandType.isa<UnrankedMemRefType>()) { 319 rewriter.replaceOp( 320 dimOp, {extractSizeOfUnrankedMemRef( 321 operandType, dimOp, adaptor.getOperands(), rewriter)}); 322 323 return success(); 324 } 325 if (operandType.isa<MemRefType>()) { 326 rewriter.replaceOp( 327 dimOp, {extractSizeOfRankedMemRef(operandType, dimOp, 328 adaptor.getOperands(), rewriter)}); 329 return success(); 330 } 331 llvm_unreachable("expected MemRefType or UnrankedMemRefType"); 332 } 333 334 private: 335 Value extractSizeOfUnrankedMemRef(Type operandType, memref::DimOp dimOp, 336 OpAdaptor adaptor, 337 ConversionPatternRewriter &rewriter) const { 338 Location loc = dimOp.getLoc(); 339 340 auto unrankedMemRefType = operandType.cast<UnrankedMemRefType>(); 341 auto scalarMemRefType = 342 MemRefType::get({}, unrankedMemRefType.getElementType()); 343 unsigned addressSpace = unrankedMemRefType.getMemorySpaceAsInt(); 344 345 // Extract pointer to the underlying ranked descriptor and bitcast it to a 346 // memref<element_type> descriptor pointer to minimize the number of GEP 347 // operations. 348 UnrankedMemRefDescriptor unrankedDesc(adaptor.source()); 349 Value underlyingRankedDesc = unrankedDesc.memRefDescPtr(rewriter, loc); 350 Value scalarMemRefDescPtr = rewriter.create<LLVM::BitcastOp>( 351 loc, 352 LLVM::LLVMPointerType::get(typeConverter->convertType(scalarMemRefType), 353 addressSpace), 354 underlyingRankedDesc); 355 356 // Get pointer to offset field of memref<element_type> descriptor. 357 Type indexPtrTy = LLVM::LLVMPointerType::get( 358 getTypeConverter()->getIndexType(), addressSpace); 359 Value two = rewriter.create<LLVM::ConstantOp>( 360 loc, typeConverter->convertType(rewriter.getI32Type()), 361 rewriter.getI32IntegerAttr(2)); 362 Value offsetPtr = rewriter.create<LLVM::GEPOp>( 363 loc, indexPtrTy, scalarMemRefDescPtr, 364 ValueRange({createIndexConstant(rewriter, loc, 0), two})); 365 366 // The size value that we have to extract can be obtained using GEPop with 367 // `dimOp.index() + 1` index argument. 368 Value idxPlusOne = rewriter.create<LLVM::AddOp>( 369 loc, createIndexConstant(rewriter, loc, 1), adaptor.index()); 370 Value sizePtr = rewriter.create<LLVM::GEPOp>(loc, indexPtrTy, offsetPtr, 371 ValueRange({idxPlusOne})); 372 return rewriter.create<LLVM::LoadOp>(loc, sizePtr); 373 } 374 375 Optional<int64_t> getConstantDimIndex(memref::DimOp dimOp) const { 376 if (Optional<int64_t> idx = dimOp.getConstantIndex()) 377 return idx; 378 379 if (auto constantOp = dimOp.index().getDefiningOp<LLVM::ConstantOp>()) 380 return constantOp.value().cast<IntegerAttr>().getValue().getSExtValue(); 381 382 return llvm::None; 383 } 384 385 Value extractSizeOfRankedMemRef(Type operandType, memref::DimOp dimOp, 386 OpAdaptor adaptor, 387 ConversionPatternRewriter &rewriter) const { 388 Location loc = dimOp.getLoc(); 389 390 // Take advantage if index is constant. 391 MemRefType memRefType = operandType.cast<MemRefType>(); 392 if (Optional<int64_t> index = getConstantDimIndex(dimOp)) { 393 int64_t i = index.getValue(); 394 if (memRefType.isDynamicDim(i)) { 395 // extract dynamic size from the memref descriptor. 396 MemRefDescriptor descriptor(adaptor.source()); 397 return descriptor.size(rewriter, loc, i); 398 } 399 // Use constant for static size. 400 int64_t dimSize = memRefType.getDimSize(i); 401 return createIndexConstant(rewriter, loc, dimSize); 402 } 403 Value index = adaptor.index(); 404 int64_t rank = memRefType.getRank(); 405 MemRefDescriptor memrefDescriptor(adaptor.source()); 406 return memrefDescriptor.size(rewriter, loc, index, rank); 407 } 408 }; 409 410 /// Returns the LLVM type of the global variable given the memref type `type`. 411 static Type convertGlobalMemrefTypeToLLVM(MemRefType type, 412 LLVMTypeConverter &typeConverter) { 413 // LLVM type for a global memref will be a multi-dimension array. For 414 // declarations or uninitialized global memrefs, we can potentially flatten 415 // this to a 1D array. However, for memref.global's with an initial value, 416 // we do not intend to flatten the ElementsAttribute when going from std -> 417 // LLVM dialect, so the LLVM type needs to me a multi-dimension array. 418 Type elementType = typeConverter.convertType(type.getElementType()); 419 Type arrayTy = elementType; 420 // Shape has the outermost dim at index 0, so need to walk it backwards 421 for (int64_t dim : llvm::reverse(type.getShape())) 422 arrayTy = LLVM::LLVMArrayType::get(arrayTy, dim); 423 return arrayTy; 424 } 425 426 /// GlobalMemrefOp is lowered to a LLVM Global Variable. 427 struct GlobalMemrefOpLowering 428 : public ConvertOpToLLVMPattern<memref::GlobalOp> { 429 using ConvertOpToLLVMPattern<memref::GlobalOp>::ConvertOpToLLVMPattern; 430 431 LogicalResult 432 matchAndRewrite(memref::GlobalOp global, OpAdaptor adaptor, 433 ConversionPatternRewriter &rewriter) const override { 434 MemRefType type = global.type(); 435 if (!isConvertibleAndHasIdentityMaps(type)) 436 return failure(); 437 438 Type arrayTy = convertGlobalMemrefTypeToLLVM(type, *getTypeConverter()); 439 440 LLVM::Linkage linkage = 441 global.isPublic() ? LLVM::Linkage::External : LLVM::Linkage::Private; 442 443 Attribute initialValue = nullptr; 444 if (!global.isExternal() && !global.isUninitialized()) { 445 auto elementsAttr = global.initial_value()->cast<ElementsAttr>(); 446 initialValue = elementsAttr; 447 448 // For scalar memrefs, the global variable created is of the element type, 449 // so unpack the elements attribute to extract the value. 450 if (type.getRank() == 0) 451 initialValue = elementsAttr.getValue({}); 452 } 453 454 uint64_t alignment = global.alignment().getValueOr(0); 455 456 auto newGlobal = rewriter.replaceOpWithNewOp<LLVM::GlobalOp>( 457 global, arrayTy, global.constant(), linkage, global.sym_name(), 458 initialValue, alignment, type.getMemorySpaceAsInt()); 459 if (!global.isExternal() && global.isUninitialized()) { 460 Block *blk = new Block(); 461 newGlobal.getInitializerRegion().push_back(blk); 462 rewriter.setInsertionPointToStart(blk); 463 Value undef[] = { 464 rewriter.create<LLVM::UndefOp>(global.getLoc(), arrayTy)}; 465 rewriter.create<LLVM::ReturnOp>(global.getLoc(), undef); 466 } 467 return success(); 468 } 469 }; 470 471 /// GetGlobalMemrefOp is lowered into a Memref descriptor with the pointer to 472 /// the first element stashed into the descriptor. This reuses 473 /// `AllocLikeOpLowering` to reuse the Memref descriptor construction. 474 struct GetGlobalMemrefOpLowering : public AllocLikeOpLLVMLowering { 475 GetGlobalMemrefOpLowering(LLVMTypeConverter &converter) 476 : AllocLikeOpLLVMLowering(memref::GetGlobalOp::getOperationName(), 477 converter) {} 478 479 /// Buffer "allocation" for memref.get_global op is getting the address of 480 /// the global variable referenced. 481 std::tuple<Value, Value> allocateBuffer(ConversionPatternRewriter &rewriter, 482 Location loc, Value sizeBytes, 483 Operation *op) const override { 484 auto getGlobalOp = cast<memref::GetGlobalOp>(op); 485 MemRefType type = getGlobalOp.result().getType().cast<MemRefType>(); 486 unsigned memSpace = type.getMemorySpaceAsInt(); 487 488 Type arrayTy = convertGlobalMemrefTypeToLLVM(type, *getTypeConverter()); 489 auto addressOf = rewriter.create<LLVM::AddressOfOp>( 490 loc, LLVM::LLVMPointerType::get(arrayTy, memSpace), getGlobalOp.name()); 491 492 // Get the address of the first element in the array by creating a GEP with 493 // the address of the GV as the base, and (rank + 1) number of 0 indices. 494 Type elementType = typeConverter->convertType(type.getElementType()); 495 Type elementPtrType = LLVM::LLVMPointerType::get(elementType, memSpace); 496 497 SmallVector<Value, 4> operands = {addressOf}; 498 operands.insert(operands.end(), type.getRank() + 1, 499 createIndexConstant(rewriter, loc, 0)); 500 auto gep = rewriter.create<LLVM::GEPOp>(loc, elementPtrType, operands); 501 502 // We do not expect the memref obtained using `memref.get_global` to be 503 // ever deallocated. Set the allocated pointer to be known bad value to 504 // help debug if that ever happens. 505 auto intPtrType = getIntPtrType(memSpace); 506 Value deadBeefConst = 507 createIndexAttrConstant(rewriter, op->getLoc(), intPtrType, 0xdeadbeef); 508 auto deadBeefPtr = 509 rewriter.create<LLVM::IntToPtrOp>(loc, elementPtrType, deadBeefConst); 510 511 // Both allocated and aligned pointers are same. We could potentially stash 512 // a nullptr for the allocated pointer since we do not expect any dealloc. 513 return std::make_tuple(deadBeefPtr, gep); 514 } 515 }; 516 517 // Common base for load and store operations on MemRefs. Restricts the match 518 // to supported MemRef types. Provides functionality to emit code accessing a 519 // specific element of the underlying data buffer. 520 template <typename Derived> 521 struct LoadStoreOpLowering : public ConvertOpToLLVMPattern<Derived> { 522 using ConvertOpToLLVMPattern<Derived>::ConvertOpToLLVMPattern; 523 using ConvertOpToLLVMPattern<Derived>::isConvertibleAndHasIdentityMaps; 524 using Base = LoadStoreOpLowering<Derived>; 525 526 LogicalResult match(Derived op) const override { 527 MemRefType type = op.getMemRefType(); 528 return isConvertibleAndHasIdentityMaps(type) ? success() : failure(); 529 } 530 }; 531 532 // Load operation is lowered to obtaining a pointer to the indexed element 533 // and loading it. 534 struct LoadOpLowering : public LoadStoreOpLowering<memref::LoadOp> { 535 using Base::Base; 536 537 LogicalResult 538 matchAndRewrite(memref::LoadOp loadOp, OpAdaptor adaptor, 539 ConversionPatternRewriter &rewriter) const override { 540 auto type = loadOp.getMemRefType(); 541 542 Value dataPtr = getStridedElementPtr( 543 loadOp.getLoc(), type, adaptor.memref(), adaptor.indices(), rewriter); 544 rewriter.replaceOpWithNewOp<LLVM::LoadOp>(loadOp, dataPtr); 545 return success(); 546 } 547 }; 548 549 // Store operation is lowered to obtaining a pointer to the indexed element, 550 // and storing the given value to it. 551 struct StoreOpLowering : public LoadStoreOpLowering<memref::StoreOp> { 552 using Base::Base; 553 554 LogicalResult 555 matchAndRewrite(memref::StoreOp op, OpAdaptor adaptor, 556 ConversionPatternRewriter &rewriter) const override { 557 auto type = op.getMemRefType(); 558 559 Value dataPtr = getStridedElementPtr(op.getLoc(), type, adaptor.memref(), 560 adaptor.indices(), rewriter); 561 rewriter.replaceOpWithNewOp<LLVM::StoreOp>(op, adaptor.value(), dataPtr); 562 return success(); 563 } 564 }; 565 566 // The prefetch operation is lowered in a way similar to the load operation 567 // except that the llvm.prefetch operation is used for replacement. 568 struct PrefetchOpLowering : public LoadStoreOpLowering<memref::PrefetchOp> { 569 using Base::Base; 570 571 LogicalResult 572 matchAndRewrite(memref::PrefetchOp prefetchOp, OpAdaptor adaptor, 573 ConversionPatternRewriter &rewriter) const override { 574 auto type = prefetchOp.getMemRefType(); 575 auto loc = prefetchOp.getLoc(); 576 577 Value dataPtr = getStridedElementPtr(loc, type, adaptor.memref(), 578 adaptor.indices(), rewriter); 579 580 // Replace with llvm.prefetch. 581 auto llvmI32Type = typeConverter->convertType(rewriter.getIntegerType(32)); 582 auto isWrite = rewriter.create<LLVM::ConstantOp>( 583 loc, llvmI32Type, rewriter.getI32IntegerAttr(prefetchOp.isWrite())); 584 auto localityHint = rewriter.create<LLVM::ConstantOp>( 585 loc, llvmI32Type, 586 rewriter.getI32IntegerAttr(prefetchOp.localityHint())); 587 auto isData = rewriter.create<LLVM::ConstantOp>( 588 loc, llvmI32Type, rewriter.getI32IntegerAttr(prefetchOp.isDataCache())); 589 590 rewriter.replaceOpWithNewOp<LLVM::Prefetch>(prefetchOp, dataPtr, isWrite, 591 localityHint, isData); 592 return success(); 593 } 594 }; 595 596 struct MemRefCastOpLowering : public ConvertOpToLLVMPattern<memref::CastOp> { 597 using ConvertOpToLLVMPattern<memref::CastOp>::ConvertOpToLLVMPattern; 598 599 LogicalResult match(memref::CastOp memRefCastOp) const override { 600 Type srcType = memRefCastOp.getOperand().getType(); 601 Type dstType = memRefCastOp.getType(); 602 603 // memref::CastOp reduce to bitcast in the ranked MemRef case and can be 604 // used for type erasure. For now they must preserve underlying element type 605 // and require source and result type to have the same rank. Therefore, 606 // perform a sanity check that the underlying structs are the same. Once op 607 // semantics are relaxed we can revisit. 608 if (srcType.isa<MemRefType>() && dstType.isa<MemRefType>()) 609 return success(typeConverter->convertType(srcType) == 610 typeConverter->convertType(dstType)); 611 612 // At least one of the operands is unranked type 613 assert(srcType.isa<UnrankedMemRefType>() || 614 dstType.isa<UnrankedMemRefType>()); 615 616 // Unranked to unranked cast is disallowed 617 return !(srcType.isa<UnrankedMemRefType>() && 618 dstType.isa<UnrankedMemRefType>()) 619 ? success() 620 : failure(); 621 } 622 623 void rewrite(memref::CastOp memRefCastOp, OpAdaptor adaptor, 624 ConversionPatternRewriter &rewriter) const override { 625 auto srcType = memRefCastOp.getOperand().getType(); 626 auto dstType = memRefCastOp.getType(); 627 auto targetStructType = typeConverter->convertType(memRefCastOp.getType()); 628 auto loc = memRefCastOp.getLoc(); 629 630 // For ranked/ranked case, just keep the original descriptor. 631 if (srcType.isa<MemRefType>() && dstType.isa<MemRefType>()) 632 return rewriter.replaceOp(memRefCastOp, {adaptor.source()}); 633 634 if (srcType.isa<MemRefType>() && dstType.isa<UnrankedMemRefType>()) { 635 // Casting ranked to unranked memref type 636 // Set the rank in the destination from the memref type 637 // Allocate space on the stack and copy the src memref descriptor 638 // Set the ptr in the destination to the stack space 639 auto srcMemRefType = srcType.cast<MemRefType>(); 640 int64_t rank = srcMemRefType.getRank(); 641 // ptr = AllocaOp sizeof(MemRefDescriptor) 642 auto ptr = getTypeConverter()->promoteOneMemRefDescriptor( 643 loc, adaptor.source(), rewriter); 644 // voidptr = BitCastOp srcType* to void* 645 auto voidPtr = 646 rewriter.create<LLVM::BitcastOp>(loc, getVoidPtrType(), ptr) 647 .getResult(); 648 // rank = ConstantOp srcRank 649 auto rankVal = rewriter.create<LLVM::ConstantOp>( 650 loc, typeConverter->convertType(rewriter.getIntegerType(64)), 651 rewriter.getI64IntegerAttr(rank)); 652 // undef = UndefOp 653 UnrankedMemRefDescriptor memRefDesc = 654 UnrankedMemRefDescriptor::undef(rewriter, loc, targetStructType); 655 // d1 = InsertValueOp undef, rank, 0 656 memRefDesc.setRank(rewriter, loc, rankVal); 657 // d2 = InsertValueOp d1, voidptr, 1 658 memRefDesc.setMemRefDescPtr(rewriter, loc, voidPtr); 659 rewriter.replaceOp(memRefCastOp, (Value)memRefDesc); 660 661 } else if (srcType.isa<UnrankedMemRefType>() && dstType.isa<MemRefType>()) { 662 // Casting from unranked type to ranked. 663 // The operation is assumed to be doing a correct cast. If the destination 664 // type mismatches the unranked the type, it is undefined behavior. 665 UnrankedMemRefDescriptor memRefDesc(adaptor.source()); 666 // ptr = ExtractValueOp src, 1 667 auto ptr = memRefDesc.memRefDescPtr(rewriter, loc); 668 // castPtr = BitCastOp i8* to structTy* 669 auto castPtr = 670 rewriter 671 .create<LLVM::BitcastOp>( 672 loc, LLVM::LLVMPointerType::get(targetStructType), ptr) 673 .getResult(); 674 // struct = LoadOp castPtr 675 auto loadOp = rewriter.create<LLVM::LoadOp>(loc, castPtr); 676 rewriter.replaceOp(memRefCastOp, loadOp.getResult()); 677 } else { 678 llvm_unreachable("Unsupported unranked memref to unranked memref cast"); 679 } 680 } 681 }; 682 683 struct MemRefCopyOpLowering : public ConvertOpToLLVMPattern<memref::CopyOp> { 684 using ConvertOpToLLVMPattern<memref::CopyOp>::ConvertOpToLLVMPattern; 685 686 LogicalResult 687 matchAndRewrite(memref::CopyOp op, OpAdaptor adaptor, 688 ConversionPatternRewriter &rewriter) const override { 689 auto loc = op.getLoc(); 690 auto srcType = op.source().getType().cast<BaseMemRefType>(); 691 auto targetType = op.target().getType().cast<BaseMemRefType>(); 692 693 // First make sure we have an unranked memref descriptor representation. 694 auto makeUnranked = [&, this](Value ranked, BaseMemRefType type) { 695 auto rank = rewriter.create<LLVM::ConstantOp>( 696 loc, getIndexType(), rewriter.getIndexAttr(type.getRank())); 697 auto *typeConverter = getTypeConverter(); 698 auto ptr = 699 typeConverter->promoteOneMemRefDescriptor(loc, ranked, rewriter); 700 auto voidPtr = 701 rewriter.create<LLVM::BitcastOp>(loc, getVoidPtrType(), ptr) 702 .getResult(); 703 auto unrankedType = 704 UnrankedMemRefType::get(type.getElementType(), type.getMemorySpace()); 705 return UnrankedMemRefDescriptor::pack(rewriter, loc, *typeConverter, 706 unrankedType, 707 ValueRange{rank, voidPtr}); 708 }; 709 710 Value unrankedSource = srcType.hasRank() 711 ? makeUnranked(adaptor.source(), srcType) 712 : adaptor.source(); 713 Value unrankedTarget = targetType.hasRank() 714 ? makeUnranked(adaptor.target(), targetType) 715 : adaptor.target(); 716 717 // Now promote the unranked descriptors to the stack. 718 auto one = rewriter.create<LLVM::ConstantOp>(loc, getIndexType(), 719 rewriter.getIndexAttr(1)); 720 auto promote = [&](Value desc) { 721 auto ptrType = LLVM::LLVMPointerType::get(desc.getType()); 722 auto allocated = 723 rewriter.create<LLVM::AllocaOp>(loc, ptrType, ValueRange{one}); 724 rewriter.create<LLVM::StoreOp>(loc, desc, allocated); 725 return allocated; 726 }; 727 728 auto sourcePtr = promote(unrankedSource); 729 auto targetPtr = promote(unrankedTarget); 730 731 auto elemSize = rewriter.create<LLVM::ConstantOp>( 732 loc, getIndexType(), 733 rewriter.getIndexAttr(srcType.getElementTypeBitWidth() / 8)); 734 auto copyFn = LLVM::lookupOrCreateMemRefCopyFn( 735 op->getParentOfType<ModuleOp>(), getIndexType(), sourcePtr.getType()); 736 rewriter.create<LLVM::CallOp>(loc, copyFn, 737 ValueRange{elemSize, sourcePtr, targetPtr}); 738 rewriter.eraseOp(op); 739 740 return success(); 741 } 742 }; 743 744 /// Extracts allocated, aligned pointers and offset from a ranked or unranked 745 /// memref type. In unranked case, the fields are extracted from the underlying 746 /// ranked descriptor. 747 static void extractPointersAndOffset(Location loc, 748 ConversionPatternRewriter &rewriter, 749 LLVMTypeConverter &typeConverter, 750 Value originalOperand, 751 Value convertedOperand, 752 Value *allocatedPtr, Value *alignedPtr, 753 Value *offset = nullptr) { 754 Type operandType = originalOperand.getType(); 755 if (operandType.isa<MemRefType>()) { 756 MemRefDescriptor desc(convertedOperand); 757 *allocatedPtr = desc.allocatedPtr(rewriter, loc); 758 *alignedPtr = desc.alignedPtr(rewriter, loc); 759 if (offset != nullptr) 760 *offset = desc.offset(rewriter, loc); 761 return; 762 } 763 764 unsigned memorySpace = 765 operandType.cast<UnrankedMemRefType>().getMemorySpaceAsInt(); 766 Type elementType = operandType.cast<UnrankedMemRefType>().getElementType(); 767 Type llvmElementType = typeConverter.convertType(elementType); 768 Type elementPtrPtrType = LLVM::LLVMPointerType::get( 769 LLVM::LLVMPointerType::get(llvmElementType, memorySpace)); 770 771 // Extract pointer to the underlying ranked memref descriptor and cast it to 772 // ElemType**. 773 UnrankedMemRefDescriptor unrankedDesc(convertedOperand); 774 Value underlyingDescPtr = unrankedDesc.memRefDescPtr(rewriter, loc); 775 776 *allocatedPtr = UnrankedMemRefDescriptor::allocatedPtr( 777 rewriter, loc, underlyingDescPtr, elementPtrPtrType); 778 *alignedPtr = UnrankedMemRefDescriptor::alignedPtr( 779 rewriter, loc, typeConverter, underlyingDescPtr, elementPtrPtrType); 780 if (offset != nullptr) { 781 *offset = UnrankedMemRefDescriptor::offset( 782 rewriter, loc, typeConverter, underlyingDescPtr, elementPtrPtrType); 783 } 784 } 785 786 struct MemRefReinterpretCastOpLowering 787 : public ConvertOpToLLVMPattern<memref::ReinterpretCastOp> { 788 using ConvertOpToLLVMPattern< 789 memref::ReinterpretCastOp>::ConvertOpToLLVMPattern; 790 791 LogicalResult 792 matchAndRewrite(memref::ReinterpretCastOp castOp, OpAdaptor adaptor, 793 ConversionPatternRewriter &rewriter) const override { 794 Type srcType = castOp.source().getType(); 795 796 Value descriptor; 797 if (failed(convertSourceMemRefToDescriptor(rewriter, srcType, castOp, 798 adaptor, &descriptor))) 799 return failure(); 800 rewriter.replaceOp(castOp, {descriptor}); 801 return success(); 802 } 803 804 private: 805 LogicalResult convertSourceMemRefToDescriptor( 806 ConversionPatternRewriter &rewriter, Type srcType, 807 memref::ReinterpretCastOp castOp, 808 memref::ReinterpretCastOp::Adaptor adaptor, Value *descriptor) const { 809 MemRefType targetMemRefType = 810 castOp.getResult().getType().cast<MemRefType>(); 811 auto llvmTargetDescriptorTy = typeConverter->convertType(targetMemRefType) 812 .dyn_cast_or_null<LLVM::LLVMStructType>(); 813 if (!llvmTargetDescriptorTy) 814 return failure(); 815 816 // Create descriptor. 817 Location loc = castOp.getLoc(); 818 auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy); 819 820 // Set allocated and aligned pointers. 821 Value allocatedPtr, alignedPtr; 822 extractPointersAndOffset(loc, rewriter, *getTypeConverter(), 823 castOp.source(), adaptor.source(), &allocatedPtr, 824 &alignedPtr); 825 desc.setAllocatedPtr(rewriter, loc, allocatedPtr); 826 desc.setAlignedPtr(rewriter, loc, alignedPtr); 827 828 // Set offset. 829 if (castOp.isDynamicOffset(0)) 830 desc.setOffset(rewriter, loc, adaptor.offsets()[0]); 831 else 832 desc.setConstantOffset(rewriter, loc, castOp.getStaticOffset(0)); 833 834 // Set sizes and strides. 835 unsigned dynSizeId = 0; 836 unsigned dynStrideId = 0; 837 for (unsigned i = 0, e = targetMemRefType.getRank(); i < e; ++i) { 838 if (castOp.isDynamicSize(i)) 839 desc.setSize(rewriter, loc, i, adaptor.sizes()[dynSizeId++]); 840 else 841 desc.setConstantSize(rewriter, loc, i, castOp.getStaticSize(i)); 842 843 if (castOp.isDynamicStride(i)) 844 desc.setStride(rewriter, loc, i, adaptor.strides()[dynStrideId++]); 845 else 846 desc.setConstantStride(rewriter, loc, i, castOp.getStaticStride(i)); 847 } 848 *descriptor = desc; 849 return success(); 850 } 851 }; 852 853 struct MemRefReshapeOpLowering 854 : public ConvertOpToLLVMPattern<memref::ReshapeOp> { 855 using ConvertOpToLLVMPattern<memref::ReshapeOp>::ConvertOpToLLVMPattern; 856 857 LogicalResult 858 matchAndRewrite(memref::ReshapeOp reshapeOp, OpAdaptor adaptor, 859 ConversionPatternRewriter &rewriter) const override { 860 Type srcType = reshapeOp.source().getType(); 861 862 Value descriptor; 863 if (failed(convertSourceMemRefToDescriptor(rewriter, srcType, reshapeOp, 864 adaptor, &descriptor))) 865 return failure(); 866 rewriter.replaceOp(reshapeOp, {descriptor}); 867 return success(); 868 } 869 870 private: 871 LogicalResult 872 convertSourceMemRefToDescriptor(ConversionPatternRewriter &rewriter, 873 Type srcType, memref::ReshapeOp reshapeOp, 874 memref::ReshapeOp::Adaptor adaptor, 875 Value *descriptor) const { 876 // Conversion for statically-known shape args is performed via 877 // `memref_reinterpret_cast`. 878 auto shapeMemRefType = reshapeOp.shape().getType().cast<MemRefType>(); 879 if (shapeMemRefType.hasStaticShape()) 880 return failure(); 881 882 // The shape is a rank-1 tensor with unknown length. 883 Location loc = reshapeOp.getLoc(); 884 MemRefDescriptor shapeDesc(adaptor.shape()); 885 Value resultRank = shapeDesc.size(rewriter, loc, 0); 886 887 // Extract address space and element type. 888 auto targetType = 889 reshapeOp.getResult().getType().cast<UnrankedMemRefType>(); 890 unsigned addressSpace = targetType.getMemorySpaceAsInt(); 891 Type elementType = targetType.getElementType(); 892 893 // Create the unranked memref descriptor that holds the ranked one. The 894 // inner descriptor is allocated on stack. 895 auto targetDesc = UnrankedMemRefDescriptor::undef( 896 rewriter, loc, typeConverter->convertType(targetType)); 897 targetDesc.setRank(rewriter, loc, resultRank); 898 SmallVector<Value, 4> sizes; 899 UnrankedMemRefDescriptor::computeSizes(rewriter, loc, *getTypeConverter(), 900 targetDesc, sizes); 901 Value underlyingDescPtr = rewriter.create<LLVM::AllocaOp>( 902 loc, getVoidPtrType(), sizes.front(), llvm::None); 903 targetDesc.setMemRefDescPtr(rewriter, loc, underlyingDescPtr); 904 905 // Extract pointers and offset from the source memref. 906 Value allocatedPtr, alignedPtr, offset; 907 extractPointersAndOffset(loc, rewriter, *getTypeConverter(), 908 reshapeOp.source(), adaptor.source(), 909 &allocatedPtr, &alignedPtr, &offset); 910 911 // Set pointers and offset. 912 Type llvmElementType = typeConverter->convertType(elementType); 913 auto elementPtrPtrType = LLVM::LLVMPointerType::get( 914 LLVM::LLVMPointerType::get(llvmElementType, addressSpace)); 915 UnrankedMemRefDescriptor::setAllocatedPtr(rewriter, loc, underlyingDescPtr, 916 elementPtrPtrType, allocatedPtr); 917 UnrankedMemRefDescriptor::setAlignedPtr(rewriter, loc, *getTypeConverter(), 918 underlyingDescPtr, 919 elementPtrPtrType, alignedPtr); 920 UnrankedMemRefDescriptor::setOffset(rewriter, loc, *getTypeConverter(), 921 underlyingDescPtr, elementPtrPtrType, 922 offset); 923 924 // Use the offset pointer as base for further addressing. Copy over the new 925 // shape and compute strides. For this, we create a loop from rank-1 to 0. 926 Value targetSizesBase = UnrankedMemRefDescriptor::sizeBasePtr( 927 rewriter, loc, *getTypeConverter(), underlyingDescPtr, 928 elementPtrPtrType); 929 Value targetStridesBase = UnrankedMemRefDescriptor::strideBasePtr( 930 rewriter, loc, *getTypeConverter(), targetSizesBase, resultRank); 931 Value shapeOperandPtr = shapeDesc.alignedPtr(rewriter, loc); 932 Value oneIndex = createIndexConstant(rewriter, loc, 1); 933 Value resultRankMinusOne = 934 rewriter.create<LLVM::SubOp>(loc, resultRank, oneIndex); 935 936 Block *initBlock = rewriter.getInsertionBlock(); 937 Type indexType = getTypeConverter()->getIndexType(); 938 Block::iterator remainingOpsIt = std::next(rewriter.getInsertionPoint()); 939 940 Block *condBlock = rewriter.createBlock(initBlock->getParent(), {}, 941 {indexType, indexType}); 942 943 // Move the remaining initBlock ops to condBlock. 944 Block *remainingBlock = rewriter.splitBlock(initBlock, remainingOpsIt); 945 rewriter.mergeBlocks(remainingBlock, condBlock, ValueRange()); 946 947 rewriter.setInsertionPointToEnd(initBlock); 948 rewriter.create<LLVM::BrOp>(loc, ValueRange({resultRankMinusOne, oneIndex}), 949 condBlock); 950 rewriter.setInsertionPointToStart(condBlock); 951 Value indexArg = condBlock->getArgument(0); 952 Value strideArg = condBlock->getArgument(1); 953 954 Value zeroIndex = createIndexConstant(rewriter, loc, 0); 955 Value pred = rewriter.create<LLVM::ICmpOp>( 956 loc, IntegerType::get(rewriter.getContext(), 1), 957 LLVM::ICmpPredicate::sge, indexArg, zeroIndex); 958 959 Block *bodyBlock = 960 rewriter.splitBlock(condBlock, rewriter.getInsertionPoint()); 961 rewriter.setInsertionPointToStart(bodyBlock); 962 963 // Copy size from shape to descriptor. 964 Type llvmIndexPtrType = LLVM::LLVMPointerType::get(indexType); 965 Value sizeLoadGep = rewriter.create<LLVM::GEPOp>( 966 loc, llvmIndexPtrType, shapeOperandPtr, ValueRange{indexArg}); 967 Value size = rewriter.create<LLVM::LoadOp>(loc, sizeLoadGep); 968 UnrankedMemRefDescriptor::setSize(rewriter, loc, *getTypeConverter(), 969 targetSizesBase, indexArg, size); 970 971 // Write stride value and compute next one. 972 UnrankedMemRefDescriptor::setStride(rewriter, loc, *getTypeConverter(), 973 targetStridesBase, indexArg, strideArg); 974 Value nextStride = rewriter.create<LLVM::MulOp>(loc, strideArg, size); 975 976 // Decrement loop counter and branch back. 977 Value decrement = rewriter.create<LLVM::SubOp>(loc, indexArg, oneIndex); 978 rewriter.create<LLVM::BrOp>(loc, ValueRange({decrement, nextStride}), 979 condBlock); 980 981 Block *remainder = 982 rewriter.splitBlock(bodyBlock, rewriter.getInsertionPoint()); 983 984 // Hook up the cond exit to the remainder. 985 rewriter.setInsertionPointToEnd(condBlock); 986 rewriter.create<LLVM::CondBrOp>(loc, pred, bodyBlock, llvm::None, remainder, 987 llvm::None); 988 989 // Reset position to beginning of new remainder block. 990 rewriter.setInsertionPointToStart(remainder); 991 992 *descriptor = targetDesc; 993 return success(); 994 } 995 }; 996 997 /// Helper function to convert a vector of `OpFoldResult`s into a vector of 998 /// `Value`s. 999 static SmallVector<Value> getAsValues(OpBuilder &b, Location loc, 1000 Type &llvmIndexType, 1001 ArrayRef<OpFoldResult> valueOrAttrVec) { 1002 return llvm::to_vector<4>( 1003 llvm::map_range(valueOrAttrVec, [&](OpFoldResult value) -> Value { 1004 if (auto attr = value.dyn_cast<Attribute>()) 1005 return b.create<LLVM::ConstantOp>(loc, llvmIndexType, attr); 1006 return value.get<Value>(); 1007 })); 1008 } 1009 1010 /// Compute a map that for a given dimension of the expanded type gives the 1011 /// dimension in the collapsed type it maps to. Essentially its the inverse of 1012 /// the `reassocation` maps. 1013 static DenseMap<int64_t, int64_t> 1014 getExpandedDimToCollapsedDimMap(ArrayRef<ReassociationIndices> reassociation) { 1015 llvm::DenseMap<int64_t, int64_t> expandedDimToCollapsedDim; 1016 for (auto &en : enumerate(reassociation)) { 1017 for (auto dim : en.value()) 1018 expandedDimToCollapsedDim[dim] = en.index(); 1019 } 1020 return expandedDimToCollapsedDim; 1021 } 1022 1023 static OpFoldResult 1024 getExpandedOutputDimSize(OpBuilder &b, Location loc, Type &llvmIndexType, 1025 int64_t outDimIndex, ArrayRef<int64_t> outStaticShape, 1026 MemRefDescriptor &inDesc, 1027 ArrayRef<int64_t> inStaticShape, 1028 ArrayRef<ReassociationIndices> reassocation, 1029 DenseMap<int64_t, int64_t> &outDimToInDimMap) { 1030 int64_t outDimSize = outStaticShape[outDimIndex]; 1031 if (!ShapedType::isDynamic(outDimSize)) 1032 return b.getIndexAttr(outDimSize); 1033 1034 // Calculate the multiplication of all the out dim sizes except the 1035 // current dim. 1036 int64_t inDimIndex = outDimToInDimMap[outDimIndex]; 1037 int64_t otherDimSizesMul = 1; 1038 for (auto otherDimIndex : reassocation[inDimIndex]) { 1039 if (otherDimIndex == static_cast<unsigned>(outDimIndex)) 1040 continue; 1041 int64_t otherDimSize = outStaticShape[otherDimIndex]; 1042 assert(!ShapedType::isDynamic(otherDimSize) && 1043 "single dimension cannot be expanded into multiple dynamic " 1044 "dimensions"); 1045 otherDimSizesMul *= otherDimSize; 1046 } 1047 1048 // outDimSize = inDimSize / otherOutDimSizesMul 1049 int64_t inDimSize = inStaticShape[inDimIndex]; 1050 Value inDimSizeDynamic = 1051 ShapedType::isDynamic(inDimSize) 1052 ? inDesc.size(b, loc, inDimIndex) 1053 : b.create<LLVM::ConstantOp>(loc, llvmIndexType, 1054 b.getIndexAttr(inDimSize)); 1055 Value outDimSizeDynamic = b.create<LLVM::SDivOp>( 1056 loc, inDimSizeDynamic, 1057 b.create<LLVM::ConstantOp>(loc, llvmIndexType, 1058 b.getIndexAttr(otherDimSizesMul))); 1059 return outDimSizeDynamic; 1060 } 1061 1062 static OpFoldResult getCollapsedOutputDimSize( 1063 OpBuilder &b, Location loc, Type &llvmIndexType, int64_t outDimIndex, 1064 int64_t outDimSize, ArrayRef<int64_t> inStaticShape, 1065 MemRefDescriptor &inDesc, ArrayRef<ReassociationIndices> reassocation) { 1066 if (!ShapedType::isDynamic(outDimSize)) 1067 return b.getIndexAttr(outDimSize); 1068 1069 Value c1 = b.create<LLVM::ConstantOp>(loc, llvmIndexType, b.getIndexAttr(1)); 1070 Value outDimSizeDynamic = c1; 1071 for (auto inDimIndex : reassocation[outDimIndex]) { 1072 int64_t inDimSize = inStaticShape[inDimIndex]; 1073 Value inDimSizeDynamic = 1074 ShapedType::isDynamic(inDimSize) 1075 ? inDesc.size(b, loc, inDimIndex) 1076 : b.create<LLVM::ConstantOp>(loc, llvmIndexType, 1077 b.getIndexAttr(inDimSize)); 1078 outDimSizeDynamic = 1079 b.create<LLVM::MulOp>(loc, outDimSizeDynamic, inDimSizeDynamic); 1080 } 1081 return outDimSizeDynamic; 1082 } 1083 1084 static SmallVector<OpFoldResult, 4> 1085 getCollapsedOutputShape(OpBuilder &b, Location loc, Type &llvmIndexType, 1086 ArrayRef<ReassociationIndices> reassocation, 1087 ArrayRef<int64_t> inStaticShape, 1088 MemRefDescriptor &inDesc, 1089 ArrayRef<int64_t> outStaticShape) { 1090 return llvm::to_vector<4>(llvm::map_range( 1091 llvm::seq<int64_t>(0, outStaticShape.size()), [&](int64_t outDimIndex) { 1092 return getCollapsedOutputDimSize(b, loc, llvmIndexType, outDimIndex, 1093 outStaticShape[outDimIndex], 1094 inStaticShape, inDesc, reassocation); 1095 })); 1096 } 1097 1098 static SmallVector<OpFoldResult, 4> 1099 getExpandedOutputShape(OpBuilder &b, Location loc, Type &llvmIndexType, 1100 ArrayRef<ReassociationIndices> reassocation, 1101 ArrayRef<int64_t> inStaticShape, 1102 MemRefDescriptor &inDesc, 1103 ArrayRef<int64_t> outStaticShape) { 1104 DenseMap<int64_t, int64_t> outDimToInDimMap = 1105 getExpandedDimToCollapsedDimMap(reassocation); 1106 return llvm::to_vector<4>(llvm::map_range( 1107 llvm::seq<int64_t>(0, outStaticShape.size()), [&](int64_t outDimIndex) { 1108 return getExpandedOutputDimSize(b, loc, llvmIndexType, outDimIndex, 1109 outStaticShape, inDesc, inStaticShape, 1110 reassocation, outDimToInDimMap); 1111 })); 1112 } 1113 1114 static SmallVector<Value> 1115 getDynamicOutputShape(OpBuilder &b, Location loc, Type &llvmIndexType, 1116 ArrayRef<ReassociationIndices> reassocation, 1117 ArrayRef<int64_t> inStaticShape, MemRefDescriptor &inDesc, 1118 ArrayRef<int64_t> outStaticShape) { 1119 return outStaticShape.size() < inStaticShape.size() 1120 ? getAsValues(b, loc, llvmIndexType, 1121 getCollapsedOutputShape(b, loc, llvmIndexType, 1122 reassocation, inStaticShape, 1123 inDesc, outStaticShape)) 1124 : getAsValues(b, loc, llvmIndexType, 1125 getExpandedOutputShape(b, loc, llvmIndexType, 1126 reassocation, inStaticShape, 1127 inDesc, outStaticShape)); 1128 } 1129 1130 // ReshapeOp creates a new view descriptor of the proper rank. 1131 // For now, the only conversion supported is for target MemRef with static sizes 1132 // and strides. 1133 template <typename ReshapeOp> 1134 class ReassociatingReshapeOpConversion 1135 : public ConvertOpToLLVMPattern<ReshapeOp> { 1136 public: 1137 using ConvertOpToLLVMPattern<ReshapeOp>::ConvertOpToLLVMPattern; 1138 using ReshapeOpAdaptor = typename ReshapeOp::Adaptor; 1139 1140 LogicalResult 1141 matchAndRewrite(ReshapeOp reshapeOp, typename ReshapeOp::Adaptor adaptor, 1142 ConversionPatternRewriter &rewriter) const override { 1143 MemRefType dstType = reshapeOp.getResultType(); 1144 MemRefType srcType = reshapeOp.getSrcType(); 1145 if (!srcType.getLayout().isIdentity() || 1146 !dstType.getLayout().isIdentity()) { 1147 return rewriter.notifyMatchFailure(reshapeOp, 1148 "only empty layout map is supported"); 1149 } 1150 1151 int64_t offset; 1152 SmallVector<int64_t, 4> strides; 1153 if (failed(getStridesAndOffset(dstType, strides, offset))) { 1154 return rewriter.notifyMatchFailure( 1155 reshapeOp, "failed to get stride and offset exprs"); 1156 } 1157 1158 MemRefDescriptor srcDesc(adaptor.src()); 1159 Location loc = reshapeOp->getLoc(); 1160 auto dstDesc = MemRefDescriptor::undef( 1161 rewriter, loc, this->typeConverter->convertType(dstType)); 1162 dstDesc.setAllocatedPtr(rewriter, loc, srcDesc.allocatedPtr(rewriter, loc)); 1163 dstDesc.setAlignedPtr(rewriter, loc, srcDesc.alignedPtr(rewriter, loc)); 1164 dstDesc.setOffset(rewriter, loc, srcDesc.offset(rewriter, loc)); 1165 1166 ArrayRef<int64_t> srcStaticShape = srcType.getShape(); 1167 ArrayRef<int64_t> dstStaticShape = dstType.getShape(); 1168 Type llvmIndexType = 1169 this->typeConverter->convertType(rewriter.getIndexType()); 1170 SmallVector<Value> dstShape = getDynamicOutputShape( 1171 rewriter, loc, llvmIndexType, reshapeOp.getReassociationIndices(), 1172 srcStaticShape, srcDesc, dstStaticShape); 1173 for (auto &en : llvm::enumerate(dstShape)) 1174 dstDesc.setSize(rewriter, loc, en.index(), en.value()); 1175 1176 auto isStaticStride = [](int64_t stride) { 1177 return !ShapedType::isDynamicStrideOrOffset(stride); 1178 }; 1179 if (llvm::all_of(strides, isStaticStride)) { 1180 for (auto &en : llvm::enumerate(strides)) 1181 dstDesc.setConstantStride(rewriter, loc, en.index(), en.value()); 1182 } else { 1183 Value c1 = rewriter.create<LLVM::ConstantOp>(loc, llvmIndexType, 1184 rewriter.getIndexAttr(1)); 1185 Value stride = c1; 1186 for (auto dimIndex : 1187 llvm::reverse(llvm::seq<int64_t>(0, dstShape.size()))) { 1188 dstDesc.setStride(rewriter, loc, dimIndex, stride); 1189 stride = rewriter.create<LLVM::MulOp>(loc, dstShape[dimIndex], stride); 1190 } 1191 } 1192 rewriter.replaceOp(reshapeOp, {dstDesc}); 1193 return success(); 1194 } 1195 }; 1196 1197 /// Conversion pattern that transforms a subview op into: 1198 /// 1. An `llvm.mlir.undef` operation to create a memref descriptor 1199 /// 2. Updates to the descriptor to introduce the data ptr, offset, size 1200 /// and stride. 1201 /// The subview op is replaced by the descriptor. 1202 struct SubViewOpLowering : public ConvertOpToLLVMPattern<memref::SubViewOp> { 1203 using ConvertOpToLLVMPattern<memref::SubViewOp>::ConvertOpToLLVMPattern; 1204 1205 LogicalResult 1206 matchAndRewrite(memref::SubViewOp subViewOp, OpAdaptor adaptor, 1207 ConversionPatternRewriter &rewriter) const override { 1208 auto loc = subViewOp.getLoc(); 1209 1210 auto sourceMemRefType = subViewOp.source().getType().cast<MemRefType>(); 1211 auto sourceElementTy = 1212 typeConverter->convertType(sourceMemRefType.getElementType()); 1213 1214 auto viewMemRefType = subViewOp.getType(); 1215 auto inferredType = memref::SubViewOp::inferResultType( 1216 subViewOp.getSourceType(), 1217 extractFromI64ArrayAttr(subViewOp.static_offsets()), 1218 extractFromI64ArrayAttr(subViewOp.static_sizes()), 1219 extractFromI64ArrayAttr(subViewOp.static_strides())) 1220 .cast<MemRefType>(); 1221 auto targetElementTy = 1222 typeConverter->convertType(viewMemRefType.getElementType()); 1223 auto targetDescTy = typeConverter->convertType(viewMemRefType); 1224 if (!sourceElementTy || !targetDescTy || !targetElementTy || 1225 !LLVM::isCompatibleType(sourceElementTy) || 1226 !LLVM::isCompatibleType(targetElementTy) || 1227 !LLVM::isCompatibleType(targetDescTy)) 1228 return failure(); 1229 1230 // Extract the offset and strides from the type. 1231 int64_t offset; 1232 SmallVector<int64_t, 4> strides; 1233 auto successStrides = getStridesAndOffset(inferredType, strides, offset); 1234 if (failed(successStrides)) 1235 return failure(); 1236 1237 // Create the descriptor. 1238 if (!LLVM::isCompatibleType(adaptor.getOperands().front().getType())) 1239 return failure(); 1240 MemRefDescriptor sourceMemRef(adaptor.getOperands().front()); 1241 auto targetMemRef = MemRefDescriptor::undef(rewriter, loc, targetDescTy); 1242 1243 // Copy the buffer pointer from the old descriptor to the new one. 1244 Value extracted = sourceMemRef.allocatedPtr(rewriter, loc); 1245 Value bitcastPtr = rewriter.create<LLVM::BitcastOp>( 1246 loc, 1247 LLVM::LLVMPointerType::get(targetElementTy, 1248 viewMemRefType.getMemorySpaceAsInt()), 1249 extracted); 1250 targetMemRef.setAllocatedPtr(rewriter, loc, bitcastPtr); 1251 1252 // Copy the aligned pointer from the old descriptor to the new one. 1253 extracted = sourceMemRef.alignedPtr(rewriter, loc); 1254 bitcastPtr = rewriter.create<LLVM::BitcastOp>( 1255 loc, 1256 LLVM::LLVMPointerType::get(targetElementTy, 1257 viewMemRefType.getMemorySpaceAsInt()), 1258 extracted); 1259 targetMemRef.setAlignedPtr(rewriter, loc, bitcastPtr); 1260 1261 size_t inferredShapeRank = inferredType.getRank(); 1262 size_t resultShapeRank = viewMemRefType.getRank(); 1263 1264 // Extract strides needed to compute offset. 1265 SmallVector<Value, 4> strideValues; 1266 strideValues.reserve(inferredShapeRank); 1267 for (unsigned i = 0; i < inferredShapeRank; ++i) 1268 strideValues.push_back(sourceMemRef.stride(rewriter, loc, i)); 1269 1270 // Offset. 1271 auto llvmIndexType = typeConverter->convertType(rewriter.getIndexType()); 1272 if (!ShapedType::isDynamicStrideOrOffset(offset)) { 1273 targetMemRef.setConstantOffset(rewriter, loc, offset); 1274 } else { 1275 Value baseOffset = sourceMemRef.offset(rewriter, loc); 1276 // `inferredShapeRank` may be larger than the number of offset operands 1277 // because of trailing semantics. In this case, the offset is guaranteed 1278 // to be interpreted as 0 and we can just skip the extra dimensions. 1279 for (unsigned i = 0, e = std::min(inferredShapeRank, 1280 subViewOp.getMixedOffsets().size()); 1281 i < e; ++i) { 1282 Value offset = 1283 // TODO: need OpFoldResult ODS adaptor to clean this up. 1284 subViewOp.isDynamicOffset(i) 1285 ? adaptor.getOperands()[subViewOp.getIndexOfDynamicOffset(i)] 1286 : rewriter.create<LLVM::ConstantOp>( 1287 loc, llvmIndexType, 1288 rewriter.getI64IntegerAttr(subViewOp.getStaticOffset(i))); 1289 Value mul = rewriter.create<LLVM::MulOp>(loc, offset, strideValues[i]); 1290 baseOffset = rewriter.create<LLVM::AddOp>(loc, baseOffset, mul); 1291 } 1292 targetMemRef.setOffset(rewriter, loc, baseOffset); 1293 } 1294 1295 // Update sizes and strides. 1296 SmallVector<OpFoldResult> mixedSizes = subViewOp.getMixedSizes(); 1297 SmallVector<OpFoldResult> mixedStrides = subViewOp.getMixedStrides(); 1298 assert(mixedSizes.size() == mixedStrides.size() && 1299 "expected sizes and strides of equal length"); 1300 llvm::SmallDenseSet<unsigned> unusedDims = subViewOp.getDroppedDims(); 1301 for (int i = inferredShapeRank - 1, j = resultShapeRank - 1; 1302 i >= 0 && j >= 0; --i) { 1303 if (unusedDims.contains(i)) 1304 continue; 1305 1306 // `i` may overflow subViewOp.getMixedSizes because of trailing semantics. 1307 // In this case, the size is guaranteed to be interpreted as Dim and the 1308 // stride as 1. 1309 Value size, stride; 1310 if (static_cast<unsigned>(i) >= mixedSizes.size()) { 1311 // If the static size is available, use it directly. This is similar to 1312 // the folding of dim(constant-op) but removes the need for dim to be 1313 // aware of LLVM constants and for this pass to be aware of std 1314 // constants. 1315 int64_t staticSize = 1316 subViewOp.source().getType().cast<MemRefType>().getShape()[i]; 1317 if (staticSize != ShapedType::kDynamicSize) { 1318 size = rewriter.create<LLVM::ConstantOp>( 1319 loc, llvmIndexType, rewriter.getI64IntegerAttr(staticSize)); 1320 } else { 1321 Value pos = rewriter.create<LLVM::ConstantOp>( 1322 loc, llvmIndexType, rewriter.getI64IntegerAttr(i)); 1323 Value dim = 1324 rewriter.create<memref::DimOp>(loc, subViewOp.source(), pos); 1325 auto cast = rewriter.create<UnrealizedConversionCastOp>( 1326 loc, llvmIndexType, dim); 1327 size = cast.getResult(0); 1328 } 1329 stride = rewriter.create<LLVM::ConstantOp>( 1330 loc, llvmIndexType, rewriter.getI64IntegerAttr(1)); 1331 } else { 1332 // TODO: need OpFoldResult ODS adaptor to clean this up. 1333 size = 1334 subViewOp.isDynamicSize(i) 1335 ? adaptor.getOperands()[subViewOp.getIndexOfDynamicSize(i)] 1336 : rewriter.create<LLVM::ConstantOp>( 1337 loc, llvmIndexType, 1338 rewriter.getI64IntegerAttr(subViewOp.getStaticSize(i))); 1339 if (!ShapedType::isDynamicStrideOrOffset(strides[i])) { 1340 stride = rewriter.create<LLVM::ConstantOp>( 1341 loc, llvmIndexType, rewriter.getI64IntegerAttr(strides[i])); 1342 } else { 1343 stride = 1344 subViewOp.isDynamicStride(i) 1345 ? adaptor.getOperands()[subViewOp.getIndexOfDynamicStride(i)] 1346 : rewriter.create<LLVM::ConstantOp>( 1347 loc, llvmIndexType, 1348 rewriter.getI64IntegerAttr( 1349 subViewOp.getStaticStride(i))); 1350 stride = rewriter.create<LLVM::MulOp>(loc, stride, strideValues[i]); 1351 } 1352 } 1353 targetMemRef.setSize(rewriter, loc, j, size); 1354 targetMemRef.setStride(rewriter, loc, j, stride); 1355 j--; 1356 } 1357 1358 rewriter.replaceOp(subViewOp, {targetMemRef}); 1359 return success(); 1360 } 1361 }; 1362 1363 /// Conversion pattern that transforms a transpose op into: 1364 /// 1. A function entry `alloca` operation to allocate a ViewDescriptor. 1365 /// 2. A load of the ViewDescriptor from the pointer allocated in 1. 1366 /// 3. Updates to the ViewDescriptor to introduce the data ptr, offset, size 1367 /// and stride. Size and stride are permutations of the original values. 1368 /// 4. A store of the resulting ViewDescriptor to the alloca'ed pointer. 1369 /// The transpose op is replaced by the alloca'ed pointer. 1370 class TransposeOpLowering : public ConvertOpToLLVMPattern<memref::TransposeOp> { 1371 public: 1372 using ConvertOpToLLVMPattern<memref::TransposeOp>::ConvertOpToLLVMPattern; 1373 1374 LogicalResult 1375 matchAndRewrite(memref::TransposeOp transposeOp, OpAdaptor adaptor, 1376 ConversionPatternRewriter &rewriter) const override { 1377 auto loc = transposeOp.getLoc(); 1378 MemRefDescriptor viewMemRef(adaptor.in()); 1379 1380 // No permutation, early exit. 1381 if (transposeOp.permutation().isIdentity()) 1382 return rewriter.replaceOp(transposeOp, {viewMemRef}), success(); 1383 1384 auto targetMemRef = MemRefDescriptor::undef( 1385 rewriter, loc, typeConverter->convertType(transposeOp.getShapedType())); 1386 1387 // Copy the base and aligned pointers from the old descriptor to the new 1388 // one. 1389 targetMemRef.setAllocatedPtr(rewriter, loc, 1390 viewMemRef.allocatedPtr(rewriter, loc)); 1391 targetMemRef.setAlignedPtr(rewriter, loc, 1392 viewMemRef.alignedPtr(rewriter, loc)); 1393 1394 // Copy the offset pointer from the old descriptor to the new one. 1395 targetMemRef.setOffset(rewriter, loc, viewMemRef.offset(rewriter, loc)); 1396 1397 // Iterate over the dimensions and apply size/stride permutation. 1398 for (auto en : llvm::enumerate(transposeOp.permutation().getResults())) { 1399 int sourcePos = en.index(); 1400 int targetPos = en.value().cast<AffineDimExpr>().getPosition(); 1401 targetMemRef.setSize(rewriter, loc, targetPos, 1402 viewMemRef.size(rewriter, loc, sourcePos)); 1403 targetMemRef.setStride(rewriter, loc, targetPos, 1404 viewMemRef.stride(rewriter, loc, sourcePos)); 1405 } 1406 1407 rewriter.replaceOp(transposeOp, {targetMemRef}); 1408 return success(); 1409 } 1410 }; 1411 1412 /// Conversion pattern that transforms an op into: 1413 /// 1. An `llvm.mlir.undef` operation to create a memref descriptor 1414 /// 2. Updates to the descriptor to introduce the data ptr, offset, size 1415 /// and stride. 1416 /// The view op is replaced by the descriptor. 1417 struct ViewOpLowering : public ConvertOpToLLVMPattern<memref::ViewOp> { 1418 using ConvertOpToLLVMPattern<memref::ViewOp>::ConvertOpToLLVMPattern; 1419 1420 // Build and return the value for the idx^th shape dimension, either by 1421 // returning the constant shape dimension or counting the proper dynamic size. 1422 Value getSize(ConversionPatternRewriter &rewriter, Location loc, 1423 ArrayRef<int64_t> shape, ValueRange dynamicSizes, 1424 unsigned idx) const { 1425 assert(idx < shape.size()); 1426 if (!ShapedType::isDynamic(shape[idx])) 1427 return createIndexConstant(rewriter, loc, shape[idx]); 1428 // Count the number of dynamic dims in range [0, idx] 1429 unsigned nDynamic = llvm::count_if(shape.take_front(idx), [](int64_t v) { 1430 return ShapedType::isDynamic(v); 1431 }); 1432 return dynamicSizes[nDynamic]; 1433 } 1434 1435 // Build and return the idx^th stride, either by returning the constant stride 1436 // or by computing the dynamic stride from the current `runningStride` and 1437 // `nextSize`. The caller should keep a running stride and update it with the 1438 // result returned by this function. 1439 Value getStride(ConversionPatternRewriter &rewriter, Location loc, 1440 ArrayRef<int64_t> strides, Value nextSize, 1441 Value runningStride, unsigned idx) const { 1442 assert(idx < strides.size()); 1443 if (!MemRefType::isDynamicStrideOrOffset(strides[idx])) 1444 return createIndexConstant(rewriter, loc, strides[idx]); 1445 if (nextSize) 1446 return runningStride 1447 ? rewriter.create<LLVM::MulOp>(loc, runningStride, nextSize) 1448 : nextSize; 1449 assert(!runningStride); 1450 return createIndexConstant(rewriter, loc, 1); 1451 } 1452 1453 LogicalResult 1454 matchAndRewrite(memref::ViewOp viewOp, OpAdaptor adaptor, 1455 ConversionPatternRewriter &rewriter) const override { 1456 auto loc = viewOp.getLoc(); 1457 1458 auto viewMemRefType = viewOp.getType(); 1459 auto targetElementTy = 1460 typeConverter->convertType(viewMemRefType.getElementType()); 1461 auto targetDescTy = typeConverter->convertType(viewMemRefType); 1462 if (!targetDescTy || !targetElementTy || 1463 !LLVM::isCompatibleType(targetElementTy) || 1464 !LLVM::isCompatibleType(targetDescTy)) 1465 return viewOp.emitWarning("Target descriptor type not converted to LLVM"), 1466 failure(); 1467 1468 int64_t offset; 1469 SmallVector<int64_t, 4> strides; 1470 auto successStrides = getStridesAndOffset(viewMemRefType, strides, offset); 1471 if (failed(successStrides)) 1472 return viewOp.emitWarning("cannot cast to non-strided shape"), failure(); 1473 assert(offset == 0 && "expected offset to be 0"); 1474 1475 // Create the descriptor. 1476 MemRefDescriptor sourceMemRef(adaptor.source()); 1477 auto targetMemRef = MemRefDescriptor::undef(rewriter, loc, targetDescTy); 1478 1479 // Field 1: Copy the allocated pointer, used for malloc/free. 1480 Value allocatedPtr = sourceMemRef.allocatedPtr(rewriter, loc); 1481 auto srcMemRefType = viewOp.source().getType().cast<MemRefType>(); 1482 Value bitcastPtr = rewriter.create<LLVM::BitcastOp>( 1483 loc, 1484 LLVM::LLVMPointerType::get(targetElementTy, 1485 srcMemRefType.getMemorySpaceAsInt()), 1486 allocatedPtr); 1487 targetMemRef.setAllocatedPtr(rewriter, loc, bitcastPtr); 1488 1489 // Field 2: Copy the actual aligned pointer to payload. 1490 Value alignedPtr = sourceMemRef.alignedPtr(rewriter, loc); 1491 alignedPtr = rewriter.create<LLVM::GEPOp>(loc, alignedPtr.getType(), 1492 alignedPtr, adaptor.byte_shift()); 1493 bitcastPtr = rewriter.create<LLVM::BitcastOp>( 1494 loc, 1495 LLVM::LLVMPointerType::get(targetElementTy, 1496 srcMemRefType.getMemorySpaceAsInt()), 1497 alignedPtr); 1498 targetMemRef.setAlignedPtr(rewriter, loc, bitcastPtr); 1499 1500 // Field 3: The offset in the resulting type must be 0. This is because of 1501 // the type change: an offset on srcType* may not be expressible as an 1502 // offset on dstType*. 1503 targetMemRef.setOffset(rewriter, loc, 1504 createIndexConstant(rewriter, loc, offset)); 1505 1506 // Early exit for 0-D corner case. 1507 if (viewMemRefType.getRank() == 0) 1508 return rewriter.replaceOp(viewOp, {targetMemRef}), success(); 1509 1510 // Fields 4 and 5: Update sizes and strides. 1511 if (strides.back() != 1) 1512 return viewOp.emitWarning("cannot cast to non-contiguous shape"), 1513 failure(); 1514 Value stride = nullptr, nextSize = nullptr; 1515 for (int i = viewMemRefType.getRank() - 1; i >= 0; --i) { 1516 // Update size. 1517 Value size = 1518 getSize(rewriter, loc, viewMemRefType.getShape(), adaptor.sizes(), i); 1519 targetMemRef.setSize(rewriter, loc, i, size); 1520 // Update stride. 1521 stride = getStride(rewriter, loc, strides, nextSize, stride, i); 1522 targetMemRef.setStride(rewriter, loc, i, stride); 1523 nextSize = size; 1524 } 1525 1526 rewriter.replaceOp(viewOp, {targetMemRef}); 1527 return success(); 1528 } 1529 }; 1530 1531 } // namespace 1532 1533 void mlir::populateMemRefToLLVMConversionPatterns(LLVMTypeConverter &converter, 1534 RewritePatternSet &patterns) { 1535 // clang-format off 1536 patterns.add< 1537 AllocaOpLowering, 1538 AllocaScopeOpLowering, 1539 AssumeAlignmentOpLowering, 1540 DimOpLowering, 1541 GlobalMemrefOpLowering, 1542 GetGlobalMemrefOpLowering, 1543 LoadOpLowering, 1544 MemRefCastOpLowering, 1545 MemRefCopyOpLowering, 1546 MemRefReinterpretCastOpLowering, 1547 MemRefReshapeOpLowering, 1548 PrefetchOpLowering, 1549 ReassociatingReshapeOpConversion<memref::ExpandShapeOp>, 1550 ReassociatingReshapeOpConversion<memref::CollapseShapeOp>, 1551 StoreOpLowering, 1552 SubViewOpLowering, 1553 TransposeOpLowering, 1554 ViewOpLowering>(converter); 1555 // clang-format on 1556 auto allocLowering = converter.getOptions().allocLowering; 1557 if (allocLowering == LowerToLLVMOptions::AllocLowering::AlignedAlloc) 1558 patterns.add<AlignedAllocOpLowering, DeallocOpLowering>(converter); 1559 else if (allocLowering == LowerToLLVMOptions::AllocLowering::Malloc) 1560 patterns.add<AllocOpLowering, DeallocOpLowering>(converter); 1561 } 1562 1563 namespace { 1564 struct MemRefToLLVMPass : public ConvertMemRefToLLVMBase<MemRefToLLVMPass> { 1565 MemRefToLLVMPass() = default; 1566 1567 void runOnOperation() override { 1568 Operation *op = getOperation(); 1569 const auto &dataLayoutAnalysis = getAnalysis<DataLayoutAnalysis>(); 1570 LowerToLLVMOptions options(&getContext(), 1571 dataLayoutAnalysis.getAtOrAbove(op)); 1572 options.allocLowering = 1573 (useAlignedAlloc ? LowerToLLVMOptions::AllocLowering::AlignedAlloc 1574 : LowerToLLVMOptions::AllocLowering::Malloc); 1575 if (indexBitwidth != kDeriveIndexBitwidthFromDataLayout) 1576 options.overrideIndexBitwidth(indexBitwidth); 1577 1578 LLVMTypeConverter typeConverter(&getContext(), options, 1579 &dataLayoutAnalysis); 1580 RewritePatternSet patterns(&getContext()); 1581 populateMemRefToLLVMConversionPatterns(typeConverter, patterns); 1582 LLVMConversionTarget target(getContext()); 1583 target.addLegalOp<FuncOp>(); 1584 if (failed(applyPartialConversion(op, target, std::move(patterns)))) 1585 signalPassFailure(); 1586 } 1587 }; 1588 } // namespace 1589 1590 std::unique_ptr<Pass> mlir::createMemRefToLLVMPass() { 1591 return std::make_unique<MemRefToLLVMPass>(); 1592 } 1593