1 //===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/ 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "flang/Optimizer/CodeGen/CodeGen.h" 14 #include "CGOps.h" 15 #include "PassDetail.h" 16 #include "flang/ISO_Fortran_binding.h" 17 #include "flang/Optimizer/Dialect/FIRAttr.h" 18 #include "flang/Optimizer/Dialect/FIROps.h" 19 #include "flang/Optimizer/Support/InternalNames.h" 20 #include "flang/Optimizer/Support/TypeCode.h" 21 #include "flang/Semantics/runtime-type-info.h" 22 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" 23 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" 24 #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" 25 #include "mlir/Conversion/LLVMCommon/Pattern.h" 26 #include "mlir/Conversion/OpenMPToLLVM/ConvertOpenMPToLLVM.h" 27 #include "mlir/IR/BuiltinTypes.h" 28 #include "mlir/IR/Matchers.h" 29 #include "mlir/Pass/Pass.h" 30 #include "mlir/Target/LLVMIR/ModuleTranslation.h" 31 #include "llvm/ADT/ArrayRef.h" 32 33 #define DEBUG_TYPE "flang-codegen" 34 35 using namespace mlir; 36 37 // fir::LLVMTypeConverter for converting to LLVM IR dialect types. 38 #include "TypeConverter.h" 39 40 // TODO: This should really be recovered from the specified target. 41 static constexpr unsigned defaultAlign = 8; 42 43 /// `fir.box` attribute values as defined for CFI_attribute_t in 44 /// flang/ISO_Fortran_binding.h. 45 static constexpr unsigned kAttrPointer = CFI_attribute_pointer; 46 static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable; 47 48 static inline mlir::Type getVoidPtrType(mlir::MLIRContext *context) { 49 return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8)); 50 } 51 52 static mlir::LLVM::ConstantOp 53 genConstantIndex(mlir::Location loc, mlir::Type ity, 54 mlir::ConversionPatternRewriter &rewriter, 55 std::int64_t offset) { 56 auto cattr = rewriter.getI64IntegerAttr(offset); 57 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 58 } 59 60 static Block *createBlock(mlir::ConversionPatternRewriter &rewriter, 61 mlir::Block *insertBefore) { 62 assert(insertBefore && "expected valid insertion block"); 63 return rewriter.createBlock(insertBefore->getParent(), 64 mlir::Region::iterator(insertBefore)); 65 } 66 67 namespace { 68 /// FIR conversion pattern template 69 template <typename FromOp> 70 class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> { 71 public: 72 explicit FIROpConversion(fir::LLVMTypeConverter &lowering, 73 const fir::FIRToLLVMPassOptions &options) 74 : mlir::ConvertOpToLLVMPattern<FromOp>(lowering), options(options) {} 75 76 protected: 77 mlir::Type convertType(mlir::Type ty) const { 78 return lowerTy().convertType(ty); 79 } 80 mlir::Type voidPtrTy() const { return getVoidPtrType(); } 81 82 mlir::Type getVoidPtrType() const { 83 return mlir::LLVM::LLVMPointerType::get( 84 mlir::IntegerType::get(&lowerTy().getContext(), 8)); 85 } 86 87 mlir::LLVM::ConstantOp 88 genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 89 int value) const { 90 mlir::Type i32Ty = rewriter.getI32Type(); 91 mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value); 92 return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr); 93 } 94 95 mlir::LLVM::ConstantOp 96 genConstantOffset(mlir::Location loc, 97 mlir::ConversionPatternRewriter &rewriter, 98 int offset) const { 99 mlir::Type ity = lowerTy().offsetType(); 100 mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset); 101 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 102 } 103 104 /// Perform an extension or truncation as needed on an integer value. Lowering 105 /// to the specific target may involve some sign-extending or truncation of 106 /// values, particularly to fit them from abstract box types to the 107 /// appropriate reified structures. 108 mlir::Value integerCast(mlir::Location loc, 109 mlir::ConversionPatternRewriter &rewriter, 110 mlir::Type ty, mlir::Value val) const { 111 auto valTy = val.getType(); 112 // If the value was not yet lowered, lower its type so that it can 113 // be used in getPrimitiveTypeSizeInBits. 114 if (!valTy.isa<mlir::IntegerType>()) 115 valTy = convertType(valTy); 116 auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 117 auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy); 118 if (toSize < fromSize) 119 return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val); 120 if (toSize > fromSize) 121 return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val); 122 return val; 123 } 124 125 /// Construct code sequence to extract the specifc value from a `fir.box`. 126 mlir::Value getValueFromBox(mlir::Location loc, mlir::Value box, 127 mlir::Type resultTy, 128 mlir::ConversionPatternRewriter &rewriter, 129 unsigned boxValue) const { 130 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 131 mlir::LLVM::ConstantOp cValuePos = 132 genConstantOffset(loc, rewriter, boxValue); 133 auto pty = mlir::LLVM::LLVMPointerType::get(resultTy); 134 auto p = rewriter.create<mlir::LLVM::GEPOp>( 135 loc, pty, box, mlir::ValueRange{c0, cValuePos}); 136 return rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p); 137 } 138 139 /// Method to construct code sequence to get the triple for dimension `dim` 140 /// from a box. 141 SmallVector<mlir::Value, 3> 142 getDimsFromBox(mlir::Location loc, ArrayRef<mlir::Type> retTys, 143 mlir::Value box, mlir::Value dim, 144 mlir::ConversionPatternRewriter &rewriter) const { 145 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 146 mlir::LLVM::ConstantOp cDims = 147 genConstantOffset(loc, rewriter, kDimsPosInBox); 148 mlir::LLVM::LoadOp l0 = 149 loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter); 150 mlir::LLVM::LoadOp l1 = 151 loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter); 152 mlir::LLVM::LoadOp l2 = 153 loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter); 154 return {l0.getResult(), l1.getResult(), l2.getResult()}; 155 } 156 157 mlir::LLVM::LoadOp 158 loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0, 159 mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off, 160 mlir::Type ty, 161 mlir::ConversionPatternRewriter &rewriter) const { 162 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 163 mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off); 164 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c); 165 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 166 } 167 168 mlir::Value 169 loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim, 170 mlir::ConversionPatternRewriter &rewriter) const { 171 auto idxTy = lowerTy().indexType(); 172 auto c0 = genConstantOffset(loc, rewriter, 0); 173 auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox); 174 auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim); 175 return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy, 176 rewriter); 177 } 178 179 /// Read base address from a fir.box. Returned address has type ty. 180 mlir::Value 181 loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 182 mlir::ConversionPatternRewriter &rewriter) const { 183 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 184 mlir::LLVM::ConstantOp cAddr = 185 genConstantOffset(loc, rewriter, kAddrPosInBox); 186 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 187 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr); 188 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 189 } 190 191 mlir::Value 192 loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 193 mlir::ConversionPatternRewriter &rewriter) const { 194 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 195 mlir::LLVM::ConstantOp cElemLen = 196 genConstantOffset(loc, rewriter, kElemLenPosInBox); 197 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 198 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen); 199 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 200 } 201 202 // Get the element type given an LLVM type that is of the form 203 // [llvm.ptr](array|struct|vector)+ and the provided indexes. 204 static mlir::Type getBoxEleTy(mlir::Type type, 205 llvm::ArrayRef<unsigned> indexes) { 206 if (auto t = type.dyn_cast<mlir::LLVM::LLVMPointerType>()) 207 type = t.getElementType(); 208 for (auto i : indexes) { 209 if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) { 210 assert(!t.isOpaque() && i < t.getBody().size()); 211 type = t.getBody()[i]; 212 } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 213 type = t.getElementType(); 214 } else if (auto t = type.dyn_cast<mlir::VectorType>()) { 215 type = t.getElementType(); 216 } else { 217 fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()), 218 "request for invalid box element type"); 219 } 220 } 221 return type; 222 } 223 224 // Return LLVM type of the base address given the LLVM type 225 // of the related descriptor (lowered fir.box type). 226 static mlir::Type getBaseAddrTypeFromBox(mlir::Type type) { 227 return getBoxEleTy(type, {kAddrPosInBox}); 228 } 229 230 // Load the attribute from the \p box and perform a check against \p maskValue 231 // The final comparison is implemented as `(attribute & maskValue) != 0`. 232 mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box, 233 mlir::ConversionPatternRewriter &rewriter, 234 unsigned maskValue) const { 235 mlir::Type attrTy = rewriter.getI32Type(); 236 mlir::Value attribute = 237 getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox); 238 mlir::LLVM::ConstantOp attrMask = 239 genConstantOffset(loc, rewriter, maskValue); 240 auto maskRes = 241 rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask); 242 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 243 return rewriter.create<mlir::LLVM::ICmpOp>( 244 loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0); 245 } 246 247 template <typename... ARGS> 248 mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty, 249 mlir::ConversionPatternRewriter &rewriter, 250 mlir::Value base, ARGS... args) const { 251 SmallVector<mlir::Value> cv{args...}; 252 return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv); 253 } 254 255 fir::LLVMTypeConverter &lowerTy() const { 256 return *static_cast<fir::LLVMTypeConverter *>(this->getTypeConverter()); 257 } 258 259 const fir::FIRToLLVMPassOptions &options; 260 }; 261 262 /// FIR conversion pattern template 263 template <typename FromOp> 264 class FIROpAndTypeConversion : public FIROpConversion<FromOp> { 265 public: 266 using FIROpConversion<FromOp>::FIROpConversion; 267 using OpAdaptor = typename FromOp::Adaptor; 268 269 mlir::LogicalResult 270 matchAndRewrite(FromOp op, OpAdaptor adaptor, 271 mlir::ConversionPatternRewriter &rewriter) const final { 272 mlir::Type ty = this->convertType(op.getType()); 273 return doRewrite(op, ty, adaptor, rewriter); 274 } 275 276 virtual mlir::LogicalResult 277 doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor, 278 mlir::ConversionPatternRewriter &rewriter) const = 0; 279 }; 280 281 // Lower `fir.address_of` operation to `llvm.address_of` operation. 282 struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> { 283 using FIROpConversion::FIROpConversion; 284 285 mlir::LogicalResult 286 matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor, 287 mlir::ConversionPatternRewriter &rewriter) const override { 288 auto ty = convertType(addr.getType()); 289 rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>( 290 addr, ty, addr.getSymbol().getRootReference().getValue()); 291 return success(); 292 } 293 }; 294 } // namespace 295 296 /// Lookup the function to compute the memory size of this parametric derived 297 /// type. The size of the object may depend on the LEN type parameters of the 298 /// derived type. 299 static mlir::LLVM::LLVMFuncOp 300 getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op, 301 mlir::ConversionPatternRewriter &rewriter) { 302 auto module = op->getParentOfType<mlir::ModuleOp>(); 303 std::string name = recTy.getName().str() + "P.mem.size"; 304 return module.lookupSymbol<mlir::LLVM::LLVMFuncOp>(name); 305 } 306 307 namespace { 308 /// convert to LLVM IR dialect `alloca` 309 struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> { 310 using FIROpConversion::FIROpConversion; 311 312 mlir::LogicalResult 313 matchAndRewrite(fir::AllocaOp alloc, OpAdaptor adaptor, 314 mlir::ConversionPatternRewriter &rewriter) const override { 315 mlir::ValueRange operands = adaptor.getOperands(); 316 auto loc = alloc.getLoc(); 317 mlir::Type ity = lowerTy().indexType(); 318 unsigned i = 0; 319 mlir::Value size = genConstantIndex(loc, ity, rewriter, 1).getResult(); 320 mlir::Type ty = convertType(alloc.getType()); 321 mlir::Type resultTy = ty; 322 if (alloc.hasLenParams()) { 323 unsigned end = alloc.numLenParams(); 324 llvm::SmallVector<mlir::Value> lenParams; 325 for (; i < end; ++i) 326 lenParams.push_back(operands[i]); 327 mlir::Type scalarType = fir::unwrapSequenceType(alloc.getInType()); 328 if (auto chrTy = scalarType.dyn_cast<fir::CharacterType>()) { 329 fir::CharacterType rawCharTy = fir::CharacterType::getUnknownLen( 330 chrTy.getContext(), chrTy.getFKind()); 331 ty = mlir::LLVM::LLVMPointerType::get(convertType(rawCharTy)); 332 assert(end == 1); 333 size = integerCast(loc, rewriter, ity, lenParams[0]); 334 } else if (auto recTy = scalarType.dyn_cast<fir::RecordType>()) { 335 mlir::LLVM::LLVMFuncOp memSizeFn = 336 getDependentTypeMemSizeFn(recTy, alloc, rewriter); 337 if (!memSizeFn) 338 emitError(loc, "did not find allocation function"); 339 mlir::NamedAttribute attr = rewriter.getNamedAttr( 340 "callee", mlir::SymbolRefAttr::get(memSizeFn)); 341 auto call = rewriter.create<mlir::LLVM::CallOp>( 342 loc, ity, lenParams, llvm::ArrayRef<mlir::NamedAttribute>{attr}); 343 size = call.getResult(0); 344 ty = mlir::LLVM::LLVMPointerType::get( 345 mlir::IntegerType::get(alloc.getContext(), 8)); 346 } else { 347 return emitError(loc, "unexpected type ") 348 << scalarType << " with type parameters"; 349 } 350 } 351 if (alloc.hasShapeOperands()) { 352 mlir::Type allocEleTy = fir::unwrapRefType(alloc.getType()); 353 // Scale the size by constant factors encoded in the array type. 354 // We only do this for arrays that don't have a constant interior, since 355 // those are the only ones that get decayed to a pointer to the element 356 // type. 357 if (auto seqTy = allocEleTy.dyn_cast<fir::SequenceType>()) { 358 if (!seqTy.hasConstantInterior()) { 359 fir::SequenceType::Extent constSize = 1; 360 for (auto extent : seqTy.getShape()) 361 if (extent != fir::SequenceType::getUnknownExtent()) 362 constSize *= extent; 363 mlir::Value constVal{ 364 genConstantIndex(loc, ity, rewriter, constSize).getResult()}; 365 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, constVal); 366 } 367 } 368 unsigned end = operands.size(); 369 for (; i < end; ++i) 370 size = rewriter.create<mlir::LLVM::MulOp>( 371 loc, ity, size, integerCast(loc, rewriter, ity, operands[i])); 372 } 373 if (ty == resultTy) { 374 // Do not emit the bitcast if ty and resultTy are the same. 375 rewriter.replaceOpWithNewOp<mlir::LLVM::AllocaOp>(alloc, ty, size, 376 alloc->getAttrs()); 377 } else { 378 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, ty, size, 379 alloc->getAttrs()); 380 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(alloc, resultTy, al); 381 } 382 return success(); 383 } 384 }; 385 } // namespace 386 387 /// Construct an `llvm.extractvalue` instruction. It will return value at 388 /// element \p x from \p tuple. 389 static mlir::LLVM::ExtractValueOp 390 genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty, 391 mlir::ConversionPatternRewriter &rewriter, 392 mlir::MLIRContext *ctx, int x) { 393 auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x)); 394 auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x]; 395 return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx); 396 } 397 398 namespace { 399 /// Lower `fir.box_addr` to the sequence of operations to extract the first 400 /// element of the box. 401 struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> { 402 using FIROpConversion::FIROpConversion; 403 404 mlir::LogicalResult 405 matchAndRewrite(fir::BoxAddrOp boxaddr, OpAdaptor adaptor, 406 mlir::ConversionPatternRewriter &rewriter) const override { 407 mlir::Value a = adaptor.getOperands()[0]; 408 auto loc = boxaddr.getLoc(); 409 mlir::Type ty = convertType(boxaddr.getType()); 410 if (auto argty = boxaddr.getVal().getType().dyn_cast<fir::BoxType>()) { 411 rewriter.replaceOp(boxaddr, loadBaseAddrFromBox(loc, ty, a, rewriter)); 412 } else { 413 auto c0attr = rewriter.getI32IntegerAttr(0); 414 auto c0 = mlir::ArrayAttr::get(boxaddr.getContext(), c0attr); 415 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(boxaddr, ty, a, 416 c0); 417 } 418 return success(); 419 } 420 }; 421 422 /// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the 423 /// boxchar. 424 struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> { 425 using FIROpConversion::FIROpConversion; 426 427 mlir::LogicalResult 428 matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor, 429 mlir::ConversionPatternRewriter &rewriter) const override { 430 mlir::Value boxChar = adaptor.getOperands()[0]; 431 mlir::Location loc = boxChar.getLoc(); 432 mlir::MLIRContext *ctx = boxChar.getContext(); 433 mlir::Type returnValTy = boxCharLen.getResult().getType(); 434 435 constexpr int boxcharLenIdx = 1; 436 mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex( 437 loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx); 438 mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len); 439 rewriter.replaceOp(boxCharLen, lenAfterCast); 440 441 return success(); 442 } 443 }; 444 445 /// Lower `fir.box_dims` to a sequence of operations to extract the requested 446 /// dimension infomartion from the boxed value. 447 /// Result in a triple set of GEPs and loads. 448 struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> { 449 using FIROpConversion::FIROpConversion; 450 451 mlir::LogicalResult 452 matchAndRewrite(fir::BoxDimsOp boxdims, OpAdaptor adaptor, 453 mlir::ConversionPatternRewriter &rewriter) const override { 454 SmallVector<mlir::Type, 3> resultTypes = { 455 convertType(boxdims.getResult(0).getType()), 456 convertType(boxdims.getResult(1).getType()), 457 convertType(boxdims.getResult(2).getType()), 458 }; 459 auto results = 460 getDimsFromBox(boxdims.getLoc(), resultTypes, adaptor.getOperands()[0], 461 adaptor.getOperands()[1], rewriter); 462 rewriter.replaceOp(boxdims, results); 463 return success(); 464 } 465 }; 466 467 /// Lower `fir.box_elesize` to a sequence of operations ro extract the size of 468 /// an element in the boxed value. 469 struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> { 470 using FIROpConversion::FIROpConversion; 471 472 mlir::LogicalResult 473 matchAndRewrite(fir::BoxEleSizeOp boxelesz, OpAdaptor adaptor, 474 mlir::ConversionPatternRewriter &rewriter) const override { 475 mlir::Value a = adaptor.getOperands()[0]; 476 auto loc = boxelesz.getLoc(); 477 auto ty = convertType(boxelesz.getType()); 478 auto elemSize = getValueFromBox(loc, a, ty, rewriter, kElemLenPosInBox); 479 rewriter.replaceOp(boxelesz, elemSize); 480 return success(); 481 } 482 }; 483 484 /// Lower `fir.box_isalloc` to a sequence of operations to determine if the 485 /// boxed value was from an ALLOCATABLE entity. 486 struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> { 487 using FIROpConversion::FIROpConversion; 488 489 mlir::LogicalResult 490 matchAndRewrite(fir::BoxIsAllocOp boxisalloc, OpAdaptor adaptor, 491 mlir::ConversionPatternRewriter &rewriter) const override { 492 mlir::Value box = adaptor.getOperands()[0]; 493 auto loc = boxisalloc.getLoc(); 494 mlir::Value check = 495 genBoxAttributeCheck(loc, box, rewriter, kAttrAllocatable); 496 rewriter.replaceOp(boxisalloc, check); 497 return success(); 498 } 499 }; 500 501 /// Lower `fir.box_isarray` to a sequence of operations to determine if the 502 /// boxed is an array. 503 struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> { 504 using FIROpConversion::FIROpConversion; 505 506 mlir::LogicalResult 507 matchAndRewrite(fir::BoxIsArrayOp boxisarray, OpAdaptor adaptor, 508 mlir::ConversionPatternRewriter &rewriter) const override { 509 mlir::Value a = adaptor.getOperands()[0]; 510 auto loc = boxisarray.getLoc(); 511 auto rank = 512 getValueFromBox(loc, a, rewriter.getI32Type(), rewriter, kRankPosInBox); 513 auto c0 = genConstantOffset(loc, rewriter, 0); 514 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 515 boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0); 516 return success(); 517 } 518 }; 519 520 /// Lower `fir.box_isptr` to a sequence of operations to determined if the 521 /// boxed value was from a POINTER entity. 522 struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> { 523 using FIROpConversion::FIROpConversion; 524 525 mlir::LogicalResult 526 matchAndRewrite(fir::BoxIsPtrOp boxisptr, OpAdaptor adaptor, 527 mlir::ConversionPatternRewriter &rewriter) const override { 528 mlir::Value box = adaptor.getOperands()[0]; 529 auto loc = boxisptr.getLoc(); 530 mlir::Value check = genBoxAttributeCheck(loc, box, rewriter, kAttrPointer); 531 rewriter.replaceOp(boxisptr, check); 532 return success(); 533 } 534 }; 535 536 /// Lower `fir.box_rank` to the sequence of operation to extract the rank from 537 /// the box. 538 struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> { 539 using FIROpConversion::FIROpConversion; 540 541 mlir::LogicalResult 542 matchAndRewrite(fir::BoxRankOp boxrank, OpAdaptor adaptor, 543 mlir::ConversionPatternRewriter &rewriter) const override { 544 mlir::Value a = adaptor.getOperands()[0]; 545 auto loc = boxrank.getLoc(); 546 mlir::Type ty = convertType(boxrank.getType()); 547 auto result = getValueFromBox(loc, a, ty, rewriter, kRankPosInBox); 548 rewriter.replaceOp(boxrank, result); 549 return success(); 550 } 551 }; 552 553 /// Lower `fir.boxproc_host` operation. Extracts the host pointer from the 554 /// boxproc. 555 /// TODO: Part of supporting Fortran 2003 procedure pointers. 556 struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> { 557 using FIROpConversion::FIROpConversion; 558 559 mlir::LogicalResult 560 matchAndRewrite(fir::BoxProcHostOp boxprochost, OpAdaptor adaptor, 561 mlir::ConversionPatternRewriter &rewriter) const override { 562 TODO(boxprochost.getLoc(), "fir.boxproc_host codegen"); 563 return failure(); 564 } 565 }; 566 567 /// Lower `fir.box_tdesc` to the sequence of operations to extract the type 568 /// descriptor from the box. 569 struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> { 570 using FIROpConversion::FIROpConversion; 571 572 mlir::LogicalResult 573 matchAndRewrite(fir::BoxTypeDescOp boxtypedesc, OpAdaptor adaptor, 574 mlir::ConversionPatternRewriter &rewriter) const override { 575 mlir::Value box = adaptor.getOperands()[0]; 576 auto loc = boxtypedesc.getLoc(); 577 mlir::Type typeTy = 578 fir::getDescFieldTypeModel<kTypePosInBox>()(boxtypedesc.getContext()); 579 auto result = getValueFromBox(loc, box, typeTy, rewriter, kTypePosInBox); 580 auto typePtrTy = mlir::LLVM::LLVMPointerType::get(typeTy); 581 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(boxtypedesc, typePtrTy, 582 result); 583 return success(); 584 } 585 }; 586 587 /// Lower `fir.string_lit` to LLVM IR dialect operation. 588 struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> { 589 using FIROpConversion::FIROpConversion; 590 591 mlir::LogicalResult 592 matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor, 593 mlir::ConversionPatternRewriter &rewriter) const override { 594 auto ty = convertType(constop.getType()); 595 auto attr = constop.getValue(); 596 if (attr.isa<mlir::StringAttr>()) { 597 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr); 598 return success(); 599 } 600 601 auto arr = attr.cast<mlir::ArrayAttr>(); 602 auto charTy = constop.getType().cast<fir::CharacterType>(); 603 unsigned bits = lowerTy().characterBitsize(charTy); 604 mlir::Type intTy = rewriter.getIntegerType(bits); 605 auto attrs = llvm::map_range( 606 arr.getValue(), [intTy, bits](mlir::Attribute attr) -> Attribute { 607 return mlir::IntegerAttr::get( 608 intTy, 609 attr.cast<mlir::IntegerAttr>().getValue().sextOrTrunc(bits)); 610 }); 611 mlir::Type vecType = mlir::VectorType::get(arr.size(), intTy); 612 auto denseAttr = mlir::DenseElementsAttr::get( 613 vecType.cast<mlir::ShapedType>(), llvm::to_vector<8>(attrs)); 614 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>(constop, ty, 615 denseAttr); 616 return success(); 617 } 618 }; 619 620 // `fir.call` -> `llvm.call` 621 struct CallOpConversion : public FIROpConversion<fir::CallOp> { 622 using FIROpConversion::FIROpConversion; 623 624 mlir::LogicalResult 625 matchAndRewrite(fir::CallOp call, OpAdaptor adaptor, 626 mlir::ConversionPatternRewriter &rewriter) const override { 627 SmallVector<mlir::Type> resultTys; 628 for (auto r : call.getResults()) 629 resultTys.push_back(convertType(r.getType())); 630 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 631 call, resultTys, adaptor.getOperands(), call->getAttrs()); 632 return success(); 633 } 634 }; 635 } // namespace 636 637 static mlir::Type getComplexEleTy(mlir::Type complex) { 638 if (auto cc = complex.dyn_cast<mlir::ComplexType>()) 639 return cc.getElementType(); 640 return complex.cast<fir::ComplexType>().getElementType(); 641 } 642 643 namespace { 644 /// Compare complex values 645 /// 646 /// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une). 647 /// 648 /// For completeness, all other comparison are done on the real component only. 649 struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> { 650 using FIROpConversion::FIROpConversion; 651 652 mlir::LogicalResult 653 matchAndRewrite(fir::CmpcOp cmp, OpAdaptor adaptor, 654 mlir::ConversionPatternRewriter &rewriter) const override { 655 mlir::ValueRange operands = adaptor.getOperands(); 656 mlir::MLIRContext *ctxt = cmp.getContext(); 657 mlir::Type eleTy = convertType(getComplexEleTy(cmp.getLhs().getType())); 658 mlir::Type resTy = convertType(cmp.getType()); 659 mlir::Location loc = cmp.getLoc(); 660 auto pos0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 661 SmallVector<mlir::Value, 2> rp{rewriter.create<mlir::LLVM::ExtractValueOp>( 662 loc, eleTy, operands[0], pos0), 663 rewriter.create<mlir::LLVM::ExtractValueOp>( 664 loc, eleTy, operands[1], pos0)}; 665 auto rcp = 666 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, rp, cmp->getAttrs()); 667 auto pos1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 668 SmallVector<mlir::Value, 2> ip{rewriter.create<mlir::LLVM::ExtractValueOp>( 669 loc, eleTy, operands[0], pos1), 670 rewriter.create<mlir::LLVM::ExtractValueOp>( 671 loc, eleTy, operands[1], pos1)}; 672 auto icp = 673 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, ip, cmp->getAttrs()); 674 SmallVector<mlir::Value, 2> cp{rcp, icp}; 675 switch (cmp.getPredicate()) { 676 case mlir::arith::CmpFPredicate::OEQ: // .EQ. 677 rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(cmp, resTy, cp); 678 break; 679 case mlir::arith::CmpFPredicate::UNE: // .NE. 680 rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(cmp, resTy, cp); 681 break; 682 default: 683 rewriter.replaceOp(cmp, rcp.getResult()); 684 break; 685 } 686 return success(); 687 } 688 }; 689 690 /// Lower complex constants 691 struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> { 692 using FIROpConversion::FIROpConversion; 693 694 mlir::LogicalResult 695 matchAndRewrite(fir::ConstcOp conc, OpAdaptor, 696 mlir::ConversionPatternRewriter &rewriter) const override { 697 mlir::Location loc = conc.getLoc(); 698 mlir::MLIRContext *ctx = conc.getContext(); 699 mlir::Type ty = convertType(conc.getType()); 700 mlir::Type ety = convertType(getComplexEleTy(conc.getType())); 701 auto realFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getReal())); 702 auto realPart = 703 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, realFloatAttr); 704 auto imFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getImaginary())); 705 auto imPart = 706 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, imFloatAttr); 707 auto realIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 708 auto imIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 709 auto undef = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 710 auto setReal = rewriter.create<mlir::LLVM::InsertValueOp>( 711 loc, ty, undef, realPart, realIndex); 712 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(conc, ty, setReal, 713 imPart, imIndex); 714 return success(); 715 } 716 717 inline APFloat getValue(mlir::Attribute attr) const { 718 return attr.cast<fir::RealAttr>().getValue(); 719 } 720 }; 721 722 /// convert value of from-type to value of to-type 723 struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> { 724 using FIROpConversion::FIROpConversion; 725 726 static bool isFloatingPointTy(mlir::Type ty) { 727 return ty.isa<mlir::FloatType>(); 728 } 729 730 mlir::LogicalResult 731 matchAndRewrite(fir::ConvertOp convert, OpAdaptor adaptor, 732 mlir::ConversionPatternRewriter &rewriter) const override { 733 auto fromFirTy = convert.getValue().getType(); 734 auto toFirTy = convert.getRes().getType(); 735 auto fromTy = convertType(fromFirTy); 736 auto toTy = convertType(toFirTy); 737 mlir::Value op0 = adaptor.getOperands()[0]; 738 if (fromTy == toTy) { 739 rewriter.replaceOp(convert, op0); 740 return success(); 741 } 742 auto loc = convert.getLoc(); 743 auto convertFpToFp = [&](mlir::Value val, unsigned fromBits, 744 unsigned toBits, mlir::Type toTy) -> mlir::Value { 745 if (fromBits == toBits) { 746 // TODO: Converting between two floating-point representations with the 747 // same bitwidth is not allowed for now. 748 mlir::emitError(loc, 749 "cannot implicitly convert between two floating-point " 750 "representations of the same bitwidth"); 751 return {}; 752 } 753 if (fromBits > toBits) 754 return rewriter.create<mlir::LLVM::FPTruncOp>(loc, toTy, val); 755 return rewriter.create<mlir::LLVM::FPExtOp>(loc, toTy, val); 756 }; 757 // Complex to complex conversion. 758 if (fir::isa_complex(fromFirTy) && fir::isa_complex(toFirTy)) { 759 // Special case: handle the conversion of a complex such that both the 760 // real and imaginary parts are converted together. 761 auto zero = mlir::ArrayAttr::get(convert.getContext(), 762 rewriter.getI32IntegerAttr(0)); 763 auto one = mlir::ArrayAttr::get(convert.getContext(), 764 rewriter.getI32IntegerAttr(1)); 765 auto ty = convertType(getComplexEleTy(convert.getValue().getType())); 766 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, zero); 767 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, one); 768 auto nt = convertType(getComplexEleTy(convert.getRes().getType())); 769 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 770 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(nt); 771 auto rc = convertFpToFp(rp, fromBits, toBits, nt); 772 auto ic = convertFpToFp(ip, fromBits, toBits, nt); 773 auto un = rewriter.create<mlir::LLVM::UndefOp>(loc, toTy); 774 auto i1 = 775 rewriter.create<mlir::LLVM::InsertValueOp>(loc, toTy, un, rc, zero); 776 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(convert, toTy, i1, 777 ic, one); 778 return mlir::success(); 779 } 780 781 // Follow UNIX F77 convention for logicals: 782 // 1. underlying integer is not zero => logical is .TRUE. 783 // 2. logical is .TRUE. => set underlying integer to 1. 784 auto i1Type = mlir::IntegerType::get(convert.getContext(), 1); 785 if (fromFirTy.isa<fir::LogicalType>() && toFirTy == i1Type) { 786 mlir::Value zero = genConstantIndex(loc, fromTy, rewriter, 0); 787 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 788 convert, mlir::LLVM::ICmpPredicate::ne, op0, zero); 789 return mlir::success(); 790 } 791 if (fromFirTy == i1Type && toFirTy.isa<fir::LogicalType>()) { 792 rewriter.replaceOpWithNewOp<mlir::LLVM::ZExtOp>(convert, toTy, op0); 793 return mlir::success(); 794 } 795 796 // Floating point to floating point conversion. 797 if (isFloatingPointTy(fromTy)) { 798 if (isFloatingPointTy(toTy)) { 799 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 800 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 801 auto v = convertFpToFp(op0, fromBits, toBits, toTy); 802 rewriter.replaceOp(convert, v); 803 return mlir::success(); 804 } 805 if (toTy.isa<mlir::IntegerType>()) { 806 rewriter.replaceOpWithNewOp<mlir::LLVM::FPToSIOp>(convert, toTy, op0); 807 return mlir::success(); 808 } 809 } else if (fromTy.isa<mlir::IntegerType>()) { 810 // Integer to integer conversion. 811 if (toTy.isa<mlir::IntegerType>()) { 812 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 813 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 814 assert(fromBits != toBits); 815 if (fromBits > toBits) { 816 rewriter.replaceOpWithNewOp<mlir::LLVM::TruncOp>(convert, toTy, op0); 817 return mlir::success(); 818 } 819 rewriter.replaceOpWithNewOp<mlir::LLVM::SExtOp>(convert, toTy, op0); 820 return mlir::success(); 821 } 822 // Integer to floating point conversion. 823 if (isFloatingPointTy(toTy)) { 824 rewriter.replaceOpWithNewOp<mlir::LLVM::SIToFPOp>(convert, toTy, op0); 825 return mlir::success(); 826 } 827 // Integer to pointer conversion. 828 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 829 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(convert, toTy, op0); 830 return mlir::success(); 831 } 832 } else if (fromTy.isa<mlir::LLVM::LLVMPointerType>()) { 833 // Pointer to integer conversion. 834 if (toTy.isa<mlir::IntegerType>()) { 835 rewriter.replaceOpWithNewOp<mlir::LLVM::PtrToIntOp>(convert, toTy, op0); 836 return mlir::success(); 837 } 838 // Pointer to pointer conversion. 839 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 840 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(convert, toTy, op0); 841 return mlir::success(); 842 } 843 } 844 return emitError(loc) << "cannot convert " << fromTy << " to " << toTy; 845 } 846 }; 847 848 /// Lower `fir.dispatch` operation. A virtual call to a method in a dispatch 849 /// table. 850 struct DispatchOpConversion : public FIROpConversion<fir::DispatchOp> { 851 using FIROpConversion::FIROpConversion; 852 853 mlir::LogicalResult 854 matchAndRewrite(fir::DispatchOp dispatch, OpAdaptor adaptor, 855 mlir::ConversionPatternRewriter &rewriter) const override { 856 TODO(dispatch.getLoc(), "fir.dispatch codegen"); 857 return failure(); 858 } 859 }; 860 861 /// Lower `fir.dispatch_table` operation. The dispatch table for a Fortran 862 /// derived type. 863 struct DispatchTableOpConversion 864 : public FIROpConversion<fir::DispatchTableOp> { 865 using FIROpConversion::FIROpConversion; 866 867 mlir::LogicalResult 868 matchAndRewrite(fir::DispatchTableOp dispTab, OpAdaptor adaptor, 869 mlir::ConversionPatternRewriter &rewriter) const override { 870 TODO(dispTab.getLoc(), "fir.dispatch_table codegen"); 871 return failure(); 872 } 873 }; 874 875 /// Lower `fir.dt_entry` operation. An entry in a dispatch table; binds a 876 /// method-name to a function. 877 struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> { 878 using FIROpConversion::FIROpConversion; 879 880 mlir::LogicalResult 881 matchAndRewrite(fir::DTEntryOp dtEnt, OpAdaptor adaptor, 882 mlir::ConversionPatternRewriter &rewriter) const override { 883 TODO(dtEnt.getLoc(), "fir.dt_entry codegen"); 884 return failure(); 885 } 886 }; 887 888 /// Lower `fir.global_len` operation. 889 struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> { 890 using FIROpConversion::FIROpConversion; 891 892 mlir::LogicalResult 893 matchAndRewrite(fir::GlobalLenOp globalLen, OpAdaptor adaptor, 894 mlir::ConversionPatternRewriter &rewriter) const override { 895 TODO(globalLen.getLoc(), "fir.global_len codegen"); 896 return failure(); 897 } 898 }; 899 900 /// Lower fir.len_param_index 901 struct LenParamIndexOpConversion 902 : public FIROpConversion<fir::LenParamIndexOp> { 903 using FIROpConversion::FIROpConversion; 904 905 // FIXME: this should be specialized by the runtime target 906 mlir::LogicalResult 907 matchAndRewrite(fir::LenParamIndexOp lenp, OpAdaptor, 908 mlir::ConversionPatternRewriter &rewriter) const override { 909 TODO(lenp.getLoc(), "fir.len_param_index codegen"); 910 } 911 }; 912 913 /// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of 914 /// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element 915 /// in this struct is a pointer. Its type is determined from `KIND`. The 2nd 916 /// element is the length of the character buffer (`#n`). 917 struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> { 918 using FIROpConversion::FIROpConversion; 919 920 mlir::LogicalResult 921 matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor, 922 mlir::ConversionPatternRewriter &rewriter) const override { 923 mlir::ValueRange operands = adaptor.getOperands(); 924 MLIRContext *ctx = emboxChar.getContext(); 925 926 mlir::Value charBuffer = operands[0]; 927 mlir::Value charBufferLen = operands[1]; 928 929 mlir::Location loc = emboxChar.getLoc(); 930 mlir::Type llvmStructTy = convertType(emboxChar.getType()); 931 auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy); 932 933 mlir::Type lenTy = 934 llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1]; 935 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen); 936 937 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 938 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 939 auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>( 940 loc, llvmStructTy, llvmStruct, charBuffer, c0); 941 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 942 emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1); 943 944 return success(); 945 } 946 }; 947 } // namespace 948 949 /// Return the LLVMFuncOp corresponding to the standard malloc call. 950 static mlir::LLVM::LLVMFuncOp 951 getMalloc(fir::AllocMemOp op, mlir::ConversionPatternRewriter &rewriter) { 952 auto module = op->getParentOfType<mlir::ModuleOp>(); 953 if (mlir::LLVM::LLVMFuncOp mallocFunc = 954 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("malloc")) 955 return mallocFunc; 956 mlir::OpBuilder moduleBuilder( 957 op->getParentOfType<mlir::ModuleOp>().getBodyRegion()); 958 auto indexType = mlir::IntegerType::get(op.getContext(), 64); 959 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 960 rewriter.getUnknownLoc(), "malloc", 961 mlir::LLVM::LLVMFunctionType::get(getVoidPtrType(op.getContext()), 962 indexType, 963 /*isVarArg=*/false)); 964 } 965 966 /// Helper function for generating the LLVM IR that computes the size 967 /// in bytes for a derived type. 968 static mlir::Value 969 computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy, 970 mlir::ConversionPatternRewriter &rewriter) { 971 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 972 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 973 llvm::SmallVector<mlir::Value> args{one}; 974 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, args); 975 return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep); 976 } 977 978 namespace { 979 /// Lower a `fir.allocmem` instruction into `llvm.call @malloc` 980 struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> { 981 using FIROpConversion::FIROpConversion; 982 983 mlir::LogicalResult 984 matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor, 985 mlir::ConversionPatternRewriter &rewriter) const override { 986 auto heapTy = heap.getType(); 987 auto ty = convertType(heapTy); 988 mlir::LLVM::LLVMFuncOp mallocFunc = getMalloc(heap, rewriter); 989 mlir::Location loc = heap.getLoc(); 990 auto ity = lowerTy().indexType(); 991 auto dataTy = fir::unwrapRefType(heapTy); 992 if (fir::isRecordWithTypeParameters(fir::unwrapSequenceType(dataTy))) 993 TODO(loc, "fir.allocmem codegen of derived type with length parameters"); 994 mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, ty); 995 // !fir.array<NxMx!fir.char<K,?>> sets `size` to the width of !fir.char<K>. 996 // So multiply the constant dimensions here. 997 if (fir::hasDynamicSize(dataTy)) 998 if (auto seqTy = dataTy.dyn_cast<fir::SequenceType>()) 999 if (fir::characterWithDynamicLen(seqTy.getEleTy())) { 1000 fir::SequenceType::Extent arrSize = 1; 1001 for (auto d : seqTy.getShape()) 1002 if (d != fir::SequenceType::getUnknownExtent()) 1003 arrSize *= d; 1004 size = rewriter.create<mlir::LLVM::MulOp>( 1005 loc, ity, size, genConstantIndex(loc, ity, rewriter, arrSize)); 1006 } 1007 for (mlir::Value opnd : adaptor.getOperands()) 1008 size = rewriter.create<mlir::LLVM::MulOp>( 1009 loc, ity, size, integerCast(loc, rewriter, ity, opnd)); 1010 heap->setAttr("callee", mlir::SymbolRefAttr::get(mallocFunc)); 1011 auto malloc = rewriter.create<mlir::LLVM::CallOp>( 1012 loc, ::getVoidPtrType(heap.getContext()), size, heap->getAttrs()); 1013 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(heap, ty, 1014 malloc.getResult(0)); 1015 return success(); 1016 } 1017 1018 // Compute the (allocation) size of the allocmem type in bytes. 1019 mlir::Value genTypeSizeInBytes(mlir::Location loc, mlir::Type idxTy, 1020 mlir::ConversionPatternRewriter &rewriter, 1021 mlir::Type llTy) const { 1022 // Use the primitive size, if available. 1023 auto ptrTy = llTy.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1024 if (auto size = 1025 mlir::LLVM::getPrimitiveTypeSizeInBits(ptrTy.getElementType())) 1026 return genConstantIndex(loc, idxTy, rewriter, size / 8); 1027 1028 // Otherwise, generate the GEP trick in LLVM IR to compute the size. 1029 return computeDerivedTypeSize(loc, ptrTy, idxTy, rewriter); 1030 } 1031 }; 1032 } // namespace 1033 1034 /// Return the LLVMFuncOp corresponding to the standard free call. 1035 static mlir::LLVM::LLVMFuncOp 1036 getFree(fir::FreeMemOp op, mlir::ConversionPatternRewriter &rewriter) { 1037 auto module = op->getParentOfType<mlir::ModuleOp>(); 1038 if (mlir::LLVM::LLVMFuncOp freeFunc = 1039 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("free")) 1040 return freeFunc; 1041 mlir::OpBuilder moduleBuilder(module.getBodyRegion()); 1042 auto voidType = mlir::LLVM::LLVMVoidType::get(op.getContext()); 1043 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 1044 rewriter.getUnknownLoc(), "free", 1045 mlir::LLVM::LLVMFunctionType::get(voidType, 1046 getVoidPtrType(op.getContext()), 1047 /*isVarArg=*/false)); 1048 } 1049 1050 namespace { 1051 /// Lower a `fir.freemem` instruction into `llvm.call @free` 1052 struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> { 1053 using FIROpConversion::FIROpConversion; 1054 1055 mlir::LogicalResult 1056 matchAndRewrite(fir::FreeMemOp freemem, OpAdaptor adaptor, 1057 mlir::ConversionPatternRewriter &rewriter) const override { 1058 mlir::LLVM::LLVMFuncOp freeFunc = getFree(freemem, rewriter); 1059 mlir::Location loc = freemem.getLoc(); 1060 auto bitcast = rewriter.create<mlir::LLVM::BitcastOp>( 1061 freemem.getLoc(), voidPtrTy(), adaptor.getOperands()[0]); 1062 freemem->setAttr("callee", mlir::SymbolRefAttr::get(freeFunc)); 1063 rewriter.create<mlir::LLVM::CallOp>( 1064 loc, mlir::TypeRange{}, mlir::ValueRange{bitcast}, freemem->getAttrs()); 1065 rewriter.eraseOp(freemem); 1066 return success(); 1067 } 1068 }; 1069 } // namespace 1070 1071 namespace {} // namespace 1072 1073 /// Common base class for embox to descriptor conversion. 1074 template <typename OP> 1075 struct EmboxCommonConversion : public FIROpConversion<OP> { 1076 using FIROpConversion<OP>::FIROpConversion; 1077 1078 // Find the LLVMFuncOp in whose entry block the alloca should be inserted. 1079 // The order to find the LLVMFuncOp is as follows: 1080 // 1. The parent operation of the current block if it is a LLVMFuncOp. 1081 // 2. The first ancestor that is a LLVMFuncOp. 1082 mlir::LLVM::LLVMFuncOp 1083 getFuncForAllocaInsert(mlir::ConversionPatternRewriter &rewriter) const { 1084 mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp(); 1085 return mlir::isa<mlir::LLVM::LLVMFuncOp>(parentOp) 1086 ? mlir::cast<mlir::LLVM::LLVMFuncOp>(parentOp) 1087 : parentOp->getParentOfType<mlir::LLVM::LLVMFuncOp>(); 1088 } 1089 1090 // Generate an alloca of size 1 and type \p toTy. 1091 mlir::LLVM::AllocaOp 1092 genAllocaWithType(mlir::Location loc, mlir::Type toTy, unsigned alignment, 1093 mlir::ConversionPatternRewriter &rewriter) const { 1094 auto thisPt = rewriter.saveInsertionPoint(); 1095 mlir::LLVM::LLVMFuncOp func = getFuncForAllocaInsert(rewriter); 1096 rewriter.setInsertionPointToStart(&func.front()); 1097 auto size = this->genI32Constant(loc, rewriter, 1); 1098 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, toTy, size, alignment); 1099 rewriter.restoreInsertionPoint(thisPt); 1100 return al; 1101 } 1102 1103 static int getCFIAttr(fir::BoxType boxTy) { 1104 auto eleTy = boxTy.getEleTy(); 1105 if (eleTy.isa<fir::PointerType>()) 1106 return CFI_attribute_pointer; 1107 if (eleTy.isa<fir::HeapType>()) 1108 return CFI_attribute_allocatable; 1109 return CFI_attribute_other; 1110 } 1111 1112 static fir::RecordType unwrapIfDerived(fir::BoxType boxTy) { 1113 return fir::unwrapSequenceType(fir::dyn_cast_ptrOrBoxEleTy(boxTy)) 1114 .template dyn_cast<fir::RecordType>(); 1115 } 1116 static bool isDerivedTypeWithLenParams(fir::BoxType boxTy) { 1117 auto recTy = unwrapIfDerived(boxTy); 1118 return recTy && recTy.getNumLenParams() > 0; 1119 } 1120 static bool isDerivedType(fir::BoxType boxTy) { 1121 return unwrapIfDerived(boxTy) != nullptr; 1122 } 1123 1124 // Get the element size and CFI type code of the boxed value. 1125 std::tuple<mlir::Value, mlir::Value> getSizeAndTypeCode( 1126 mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 1127 mlir::Type boxEleTy, mlir::ValueRange lenParams = {}) const { 1128 auto doInteger = 1129 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1130 int typeCode = fir::integerBitsToTypeCode(width); 1131 return {this->genConstantOffset(loc, rewriter, width / 8), 1132 this->genConstantOffset(loc, rewriter, typeCode)}; 1133 }; 1134 auto doLogical = 1135 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1136 int typeCode = fir::logicalBitsToTypeCode(width); 1137 return {this->genConstantOffset(loc, rewriter, width / 8), 1138 this->genConstantOffset(loc, rewriter, typeCode)}; 1139 }; 1140 auto doFloat = [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1141 int typeCode = fir::realBitsToTypeCode(width); 1142 return {this->genConstantOffset(loc, rewriter, width / 8), 1143 this->genConstantOffset(loc, rewriter, typeCode)}; 1144 }; 1145 auto doComplex = 1146 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1147 auto typeCode = fir::complexBitsToTypeCode(width); 1148 return {this->genConstantOffset(loc, rewriter, width / 8 * 2), 1149 this->genConstantOffset(loc, rewriter, typeCode)}; 1150 }; 1151 auto doCharacter = 1152 [&](unsigned width, 1153 mlir::Value len) -> std::tuple<mlir::Value, mlir::Value> { 1154 auto typeCode = fir::characterBitsToTypeCode(width); 1155 auto typeCodeVal = this->genConstantOffset(loc, rewriter, typeCode); 1156 if (width == 8) 1157 return {len, typeCodeVal}; 1158 auto byteWidth = this->genConstantOffset(loc, rewriter, width / 8); 1159 auto i64Ty = mlir::IntegerType::get(&this->lowerTy().getContext(), 64); 1160 auto size = 1161 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len); 1162 return {size, typeCodeVal}; 1163 }; 1164 auto getKindMap = [&]() -> fir::KindMapping & { 1165 return this->lowerTy().getKindMap(); 1166 }; 1167 // Pointer-like types. 1168 if (auto eleTy = fir::dyn_cast_ptrEleTy(boxEleTy)) 1169 boxEleTy = eleTy; 1170 // Integer types. 1171 if (fir::isa_integer(boxEleTy)) { 1172 if (auto ty = boxEleTy.dyn_cast<mlir::IntegerType>()) 1173 return doInteger(ty.getWidth()); 1174 auto ty = boxEleTy.cast<fir::IntegerType>(); 1175 return doInteger(getKindMap().getIntegerBitsize(ty.getFKind())); 1176 } 1177 // Floating point types. 1178 if (fir::isa_real(boxEleTy)) { 1179 if (auto ty = boxEleTy.dyn_cast<mlir::FloatType>()) 1180 return doFloat(ty.getWidth()); 1181 auto ty = boxEleTy.cast<fir::RealType>(); 1182 return doFloat(getKindMap().getRealBitsize(ty.getFKind())); 1183 } 1184 // Complex types. 1185 if (fir::isa_complex(boxEleTy)) { 1186 if (auto ty = boxEleTy.dyn_cast<mlir::ComplexType>()) 1187 return doComplex( 1188 ty.getElementType().cast<mlir::FloatType>().getWidth()); 1189 auto ty = boxEleTy.cast<fir::ComplexType>(); 1190 return doComplex(getKindMap().getRealBitsize(ty.getFKind())); 1191 } 1192 // Character types. 1193 if (auto ty = boxEleTy.dyn_cast<fir::CharacterType>()) { 1194 auto charWidth = getKindMap().getCharacterBitsize(ty.getFKind()); 1195 if (ty.getLen() != fir::CharacterType::unknownLen()) { 1196 auto len = this->genConstantOffset(loc, rewriter, ty.getLen()); 1197 return doCharacter(charWidth, len); 1198 } 1199 assert(!lenParams.empty()); 1200 return doCharacter(charWidth, lenParams.back()); 1201 } 1202 // Logical type. 1203 if (auto ty = boxEleTy.dyn_cast<fir::LogicalType>()) 1204 return doLogical(getKindMap().getLogicalBitsize(ty.getFKind())); 1205 // Array types. 1206 if (auto seqTy = boxEleTy.dyn_cast<fir::SequenceType>()) 1207 return getSizeAndTypeCode(loc, rewriter, seqTy.getEleTy(), lenParams); 1208 // Derived-type types. 1209 if (boxEleTy.isa<fir::RecordType>()) { 1210 auto ptrTy = mlir::LLVM::LLVMPointerType::get( 1211 this->lowerTy().convertType(boxEleTy)); 1212 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 1213 auto one = 1214 genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1); 1215 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, 1216 mlir::ValueRange{one}); 1217 auto eleSize = rewriter.create<mlir::LLVM::PtrToIntOp>( 1218 loc, this->lowerTy().indexType(), gep); 1219 return {eleSize, 1220 this->genConstantOffset(loc, rewriter, fir::derivedToTypeCode())}; 1221 } 1222 // Reference type. 1223 if (fir::isa_ref_type(boxEleTy)) { 1224 // FIXME: use the target pointer size rather than sizeof(void*) 1225 return {this->genConstantOffset(loc, rewriter, sizeof(void *)), 1226 this->genConstantOffset(loc, rewriter, CFI_type_cptr)}; 1227 } 1228 fir::emitFatalError(loc, "unhandled type in fir.box code generation"); 1229 } 1230 1231 /// Basic pattern to write a field in the descriptor 1232 mlir::Value insertField(mlir::ConversionPatternRewriter &rewriter, 1233 mlir::Location loc, mlir::Value dest, 1234 ArrayRef<unsigned> fldIndexes, mlir::Value value, 1235 bool bitcast = false) const { 1236 auto boxTy = dest.getType(); 1237 auto fldTy = this->getBoxEleTy(boxTy, fldIndexes); 1238 if (bitcast) 1239 value = rewriter.create<mlir::LLVM::BitcastOp>(loc, fldTy, value); 1240 else 1241 value = this->integerCast(loc, rewriter, fldTy, value); 1242 SmallVector<mlir::Attribute, 2> attrs; 1243 for (auto i : fldIndexes) 1244 attrs.push_back(rewriter.getI32IntegerAttr(i)); 1245 auto indexesAttr = mlir::ArrayAttr::get(rewriter.getContext(), attrs); 1246 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, boxTy, dest, value, 1247 indexesAttr); 1248 } 1249 1250 inline mlir::Value 1251 insertBaseAddress(mlir::ConversionPatternRewriter &rewriter, 1252 mlir::Location loc, mlir::Value dest, 1253 mlir::Value base) const { 1254 return insertField(rewriter, loc, dest, {kAddrPosInBox}, base, 1255 /*bitCast=*/true); 1256 } 1257 1258 inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter, 1259 mlir::Location loc, mlir::Value dest, 1260 unsigned dim, mlir::Value lb) const { 1261 return insertField(rewriter, loc, dest, 1262 {kDimsPosInBox, dim, kDimLowerBoundPos}, lb); 1263 } 1264 1265 inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter, 1266 mlir::Location loc, mlir::Value dest, 1267 unsigned dim, mlir::Value extent) const { 1268 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos}, 1269 extent); 1270 } 1271 1272 inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter, 1273 mlir::Location loc, mlir::Value dest, 1274 unsigned dim, mlir::Value stride) const { 1275 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos}, 1276 stride); 1277 } 1278 1279 /// Get the address of the type descriptor global variable that was created by 1280 /// lowering for derived type \p recType. 1281 template <typename BOX> 1282 mlir::Value 1283 getTypeDescriptor(BOX box, mlir::ConversionPatternRewriter &rewriter, 1284 mlir::Location loc, fir::RecordType recType) const { 1285 std::string name = 1286 fir::NameUniquer::getTypeDescriptorName(recType.getName()); 1287 auto module = box->template getParentOfType<mlir::ModuleOp>(); 1288 if (auto global = module.template lookupSymbol<fir::GlobalOp>(name)) { 1289 auto ty = mlir::LLVM::LLVMPointerType::get( 1290 this->lowerTy().convertType(global.getType())); 1291 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1292 global.getSymName()); 1293 } 1294 if (auto global = 1295 module.template lookupSymbol<mlir::LLVM::GlobalOp>(name)) { 1296 // The global may have already been translated to LLVM. 1297 auto ty = mlir::LLVM::LLVMPointerType::get(global.getType()); 1298 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1299 global.getSymName()); 1300 } 1301 // Type info derived types do not have type descriptors since they are the 1302 // types defining type descriptors. 1303 if (!this->options.ignoreMissingTypeDescriptors && 1304 !fir::NameUniquer::belongsToModule( 1305 name, Fortran::semantics::typeInfoBuiltinModule)) 1306 fir::emitFatalError( 1307 loc, "runtime derived type info descriptor was not generated"); 1308 return rewriter.create<mlir::LLVM::NullOp>( 1309 loc, ::getVoidPtrType(box.getContext())); 1310 } 1311 1312 template <typename BOX> 1313 std::tuple<fir::BoxType, mlir::Value, mlir::Value> 1314 consDescriptorPrefix(BOX box, mlir::ConversionPatternRewriter &rewriter, 1315 unsigned rank, mlir::ValueRange lenParams) const { 1316 auto loc = box.getLoc(); 1317 auto boxTy = box.getType().template dyn_cast<fir::BoxType>(); 1318 auto convTy = this->lowerTy().convertBoxType(boxTy, rank); 1319 auto llvmBoxPtrTy = convTy.template cast<mlir::LLVM::LLVMPointerType>(); 1320 auto llvmBoxTy = llvmBoxPtrTy.getElementType(); 1321 mlir::Value descriptor = 1322 rewriter.create<mlir::LLVM::UndefOp>(loc, llvmBoxTy); 1323 1324 llvm::SmallVector<mlir::Value> typeparams = lenParams; 1325 if constexpr (!std::is_same_v<BOX, fir::EmboxOp>) { 1326 if (!box.substr().empty() && fir::hasDynamicSize(boxTy.getEleTy())) 1327 typeparams.push_back(box.substr()[1]); 1328 } 1329 1330 // Write each of the fields with the appropriate values 1331 auto [eleSize, cfiTy] = 1332 getSizeAndTypeCode(loc, rewriter, boxTy.getEleTy(), typeparams); 1333 descriptor = 1334 insertField(rewriter, loc, descriptor, {kElemLenPosInBox}, eleSize); 1335 descriptor = insertField(rewriter, loc, descriptor, {kVersionPosInBox}, 1336 this->genI32Constant(loc, rewriter, CFI_VERSION)); 1337 descriptor = insertField(rewriter, loc, descriptor, {kRankPosInBox}, 1338 this->genI32Constant(loc, rewriter, rank)); 1339 descriptor = insertField(rewriter, loc, descriptor, {kTypePosInBox}, cfiTy); 1340 descriptor = 1341 insertField(rewriter, loc, descriptor, {kAttributePosInBox}, 1342 this->genI32Constant(loc, rewriter, getCFIAttr(boxTy))); 1343 const bool hasAddendum = isDerivedType(boxTy); 1344 descriptor = 1345 insertField(rewriter, loc, descriptor, {kF18AddendumPosInBox}, 1346 this->genI32Constant(loc, rewriter, hasAddendum ? 1 : 0)); 1347 1348 if (hasAddendum) { 1349 auto isArray = 1350 fir::dyn_cast_ptrOrBoxEleTy(boxTy).template isa<fir::SequenceType>(); 1351 unsigned typeDescFieldId = isArray ? kOptTypePtrPosInBox : kDimsPosInBox; 1352 auto typeDesc = 1353 getTypeDescriptor(box, rewriter, loc, unwrapIfDerived(boxTy)); 1354 descriptor = 1355 insertField(rewriter, loc, descriptor, {typeDescFieldId}, typeDesc, 1356 /*bitCast=*/true); 1357 } 1358 1359 return {boxTy, descriptor, eleSize}; 1360 } 1361 1362 /// Compute the base address of a substring given the base address of a scalar 1363 /// string and the zero based string lower bound. 1364 mlir::Value shiftSubstringBase(mlir::ConversionPatternRewriter &rewriter, 1365 mlir::Location loc, mlir::Value base, 1366 mlir::Value lowerBound) const { 1367 llvm::SmallVector<mlir::Value> gepOperands; 1368 auto baseType = 1369 base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1370 if (baseType.isa<mlir::LLVM::LLVMArrayType>()) { 1371 auto idxTy = this->lowerTy().indexType(); 1372 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1373 gepOperands.push_back(zero); 1374 } 1375 gepOperands.push_back(lowerBound); 1376 return this->genGEP(loc, base.getType(), rewriter, base, gepOperands); 1377 } 1378 1379 /// If the embox is not in a globalOp body, allocate storage for the box; 1380 /// store the value inside and return the generated alloca. Return the input 1381 /// value otherwise. 1382 mlir::Value 1383 placeInMemoryIfNotGlobalInit(mlir::ConversionPatternRewriter &rewriter, 1384 mlir::Location loc, mlir::Value boxValue) const { 1385 auto *thisBlock = rewriter.getInsertionBlock(); 1386 if (thisBlock && mlir::isa<mlir::LLVM::GlobalOp>(thisBlock->getParentOp())) 1387 return boxValue; 1388 auto boxPtrTy = mlir::LLVM::LLVMPointerType::get(boxValue.getType()); 1389 auto alloca = genAllocaWithType(loc, boxPtrTy, defaultAlign, rewriter); 1390 rewriter.create<mlir::LLVM::StoreOp>(loc, boxValue, alloca); 1391 return alloca; 1392 } 1393 }; 1394 1395 /// Compute the extent of a triplet slice (lb:ub:step). 1396 static mlir::Value 1397 computeTripletExtent(mlir::ConversionPatternRewriter &rewriter, 1398 mlir::Location loc, mlir::Value lb, mlir::Value ub, 1399 mlir::Value step, mlir::Value zero, mlir::Type type) { 1400 mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb); 1401 extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step); 1402 extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step); 1403 // If the resulting extent is negative (`ub-lb` and `step` have different 1404 // signs), zero must be returned instead. 1405 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1406 loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero); 1407 return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero); 1408 } 1409 1410 /// Create a generic box on a memory reference. This conversions lowers the 1411 /// abstract box to the appropriate, initialized descriptor. 1412 struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> { 1413 using EmboxCommonConversion::EmboxCommonConversion; 1414 1415 mlir::LogicalResult 1416 matchAndRewrite(fir::EmboxOp embox, OpAdaptor adaptor, 1417 mlir::ConversionPatternRewriter &rewriter) const override { 1418 assert(!embox.getShape() && "There should be no dims on this embox op"); 1419 auto [boxTy, dest, eleSize] = 1420 consDescriptorPrefix(embox, rewriter, /*rank=*/0, 1421 /*lenParams=*/adaptor.getOperands().drop_front(1)); 1422 dest = insertBaseAddress(rewriter, embox.getLoc(), dest, 1423 adaptor.getOperands()[0]); 1424 if (isDerivedTypeWithLenParams(boxTy)) { 1425 TODO(embox.getLoc(), 1426 "fir.embox codegen of derived with length parameters"); 1427 return failure(); 1428 } 1429 auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest); 1430 rewriter.replaceOp(embox, result); 1431 return success(); 1432 } 1433 }; 1434 1435 /// Create a generic box on a memory reference. 1436 struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> { 1437 using EmboxCommonConversion::EmboxCommonConversion; 1438 1439 mlir::LogicalResult 1440 matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor, 1441 mlir::ConversionPatternRewriter &rewriter) const override { 1442 auto [boxTy, dest, eleSize] = consDescriptorPrefix( 1443 xbox, rewriter, xbox.getOutRank(), 1444 adaptor.getOperands().drop_front(xbox.lenParamOffset())); 1445 // Generate the triples in the dims field of the descriptor 1446 mlir::ValueRange operands = adaptor.getOperands(); 1447 auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64); 1448 mlir::Value base = operands[0]; 1449 assert(!xbox.shape().empty() && "must have a shape"); 1450 unsigned shapeOffset = xbox.shapeOffset(); 1451 bool hasShift = !xbox.shift().empty(); 1452 unsigned shiftOffset = xbox.shiftOffset(); 1453 bool hasSlice = !xbox.slice().empty(); 1454 unsigned sliceOffset = xbox.sliceOffset(); 1455 mlir::Location loc = xbox.getLoc(); 1456 mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0); 1457 mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1); 1458 mlir::Value prevDim = integerCast(loc, rewriter, i64Ty, eleSize); 1459 mlir::Value prevPtrOff = one; 1460 mlir::Type eleTy = boxTy.getEleTy(); 1461 const unsigned rank = xbox.getRank(); 1462 llvm::SmallVector<mlir::Value> gepArgs; 1463 unsigned constRows = 0; 1464 mlir::Value ptrOffset = zero; 1465 if (auto memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType())) 1466 if (auto seqTy = memEleTy.dyn_cast<fir::SequenceType>()) { 1467 mlir::Type seqEleTy = seqTy.getEleTy(); 1468 // Adjust the element scaling factor if the element is a dependent type. 1469 if (fir::hasDynamicSize(seqEleTy)) { 1470 if (fir::isa_char(seqEleTy)) { 1471 assert(xbox.lenParams().size() == 1); 1472 prevPtrOff = integerCast(loc, rewriter, i64Ty, 1473 operands[xbox.lenParamOffset()]); 1474 } else if (seqEleTy.isa<fir::RecordType>()) { 1475 TODO(loc, "generate call to calculate size of PDT"); 1476 } else { 1477 return rewriter.notifyMatchFailure(xbox, "unexpected dynamic type"); 1478 } 1479 } else { 1480 constRows = seqTy.getConstantRows(); 1481 } 1482 } 1483 1484 bool hasSubcomp = !xbox.subcomponent().empty(); 1485 mlir::Value stepExpr; 1486 if (hasSubcomp) { 1487 // We have a subcomponent. The step value needs to be the number of 1488 // bytes per element (which is a derived type). 1489 mlir::Type ty0 = base.getType(); 1490 [[maybe_unused]] auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1491 assert(ptrTy && "expected pointer type"); 1492 mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()); 1493 assert(memEleTy && "expected fir pointer type"); 1494 auto seqTy = memEleTy.dyn_cast<fir::SequenceType>(); 1495 assert(seqTy && "expected sequence type"); 1496 mlir::Type seqEleTy = seqTy.getEleTy(); 1497 auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy)); 1498 stepExpr = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter); 1499 } 1500 1501 // Process the array subspace arguments (shape, shift, etc.), if any, 1502 // translating everything to values in the descriptor wherever the entity 1503 // has a dynamic array dimension. 1504 for (unsigned di = 0, descIdx = 0; di < rank; ++di) { 1505 mlir::Value extent = operands[shapeOffset]; 1506 mlir::Value outerExtent = extent; 1507 bool skipNext = false; 1508 if (hasSlice) { 1509 mlir::Value off = operands[sliceOffset]; 1510 mlir::Value adj = one; 1511 if (hasShift) 1512 adj = operands[shiftOffset]; 1513 auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj); 1514 if (constRows > 0) { 1515 gepArgs.push_back(ao); 1516 --constRows; 1517 } else { 1518 auto dimOff = 1519 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff); 1520 ptrOffset = 1521 rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset); 1522 } 1523 if (mlir::isa_and_nonnull<fir::UndefOp>( 1524 xbox.slice()[3 * di + 1].getDefiningOp())) { 1525 // This dimension contains a scalar expression in the array slice op. 1526 // The dimension is loop invariant, will be dropped, and will not 1527 // appear in the descriptor. 1528 skipNext = true; 1529 } 1530 } 1531 if (!skipNext) { 1532 if (hasSlice) 1533 extent = computeTripletExtent(rewriter, loc, operands[sliceOffset], 1534 operands[sliceOffset + 1], 1535 operands[sliceOffset + 2], zero, i64Ty); 1536 // store lower bound (normally 0) for BIND(C) interoperability. 1537 mlir::Value lb = zero; 1538 const bool isaPointerOrAllocatable = 1539 eleTy.isa<fir::PointerType>() || eleTy.isa<fir::HeapType>(); 1540 // Lower bound is defaults to 1 for POINTER, ALLOCATABLE, and 1541 // denormalized descriptors. 1542 if (isaPointerOrAllocatable || !normalizedLowerBound(xbox)) { 1543 lb = one; 1544 // If there is a shifted origin and this is not a normalized 1545 // descriptor then use the value from the shift op as the lower bound. 1546 if (hasShift) { 1547 lb = operands[shiftOffset]; 1548 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1549 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1550 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, 1551 lb); 1552 } 1553 } 1554 dest = insertLowerBound(rewriter, loc, dest, descIdx, lb); 1555 1556 dest = insertExtent(rewriter, loc, dest, descIdx, extent); 1557 1558 // store step (scaled by shaped extent) 1559 1560 mlir::Value step = hasSubcomp ? stepExpr : prevDim; 1561 if (hasSlice) 1562 step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step, 1563 operands[sliceOffset + 2]); 1564 dest = insertStride(rewriter, loc, dest, descIdx, step); 1565 ++descIdx; 1566 } 1567 1568 // compute the stride and offset for the next natural dimension 1569 prevDim = 1570 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevDim, outerExtent); 1571 if (constRows == 0) 1572 prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff, 1573 outerExtent); 1574 1575 // increment iterators 1576 ++shapeOffset; 1577 if (hasShift) 1578 ++shiftOffset; 1579 if (hasSlice) 1580 sliceOffset += 3; 1581 } 1582 if (hasSlice || hasSubcomp || !xbox.substr().empty()) { 1583 llvm::SmallVector<mlir::Value> args = {ptrOffset}; 1584 args.append(gepArgs.rbegin(), gepArgs.rend()); 1585 if (hasSubcomp) { 1586 // For each field in the path add the offset to base via the args list. 1587 // In the most general case, some offsets must be computed since 1588 // they are not be known until runtime. 1589 if (fir::hasDynamicSize(fir::unwrapSequenceType( 1590 fir::unwrapPassByRefType(xbox.memref().getType())))) 1591 TODO(loc, "fir.embox codegen dynamic size component in derived type"); 1592 args.append(operands.begin() + xbox.subcomponentOffset(), 1593 operands.begin() + xbox.subcomponentOffset() + 1594 xbox.subcomponent().size()); 1595 } 1596 base = 1597 rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), base, args); 1598 if (!xbox.substr().empty()) 1599 base = shiftSubstringBase(rewriter, loc, base, 1600 operands[xbox.substrOffset()]); 1601 } 1602 dest = insertBaseAddress(rewriter, loc, dest, base); 1603 if (isDerivedTypeWithLenParams(boxTy)) 1604 TODO(loc, "fir.embox codegen of derived with length parameters"); 1605 1606 mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest); 1607 rewriter.replaceOp(xbox, result); 1608 return success(); 1609 } 1610 1611 /// Return true if `xbox` has a normalized lower bounds attribute. A box value 1612 /// that is neither a POINTER nor an ALLOCATABLE should be normalized to a 1613 /// zero origin lower bound for interoperability with BIND(C). 1614 inline static bool normalizedLowerBound(fir::cg::XEmboxOp xbox) { 1615 return xbox->hasAttr(fir::getNormalizedLowerBoundAttrName()); 1616 } 1617 }; 1618 1619 /// Create a new box given a box reference. 1620 struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> { 1621 using EmboxCommonConversion::EmboxCommonConversion; 1622 1623 mlir::LogicalResult 1624 matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor, 1625 mlir::ConversionPatternRewriter &rewriter) const override { 1626 mlir::Location loc = rebox.getLoc(); 1627 mlir::Type idxTy = lowerTy().indexType(); 1628 mlir::Value loweredBox = adaptor.getOperands()[0]; 1629 mlir::ValueRange operands = adaptor.getOperands(); 1630 1631 // Create new descriptor and fill its non-shape related data. 1632 llvm::SmallVector<mlir::Value, 2> lenParams; 1633 mlir::Type inputEleTy = getInputEleTy(rebox); 1634 if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) { 1635 mlir::Value len = 1636 loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter); 1637 if (charTy.getFKind() != 1) { 1638 mlir::Value width = 1639 genConstantIndex(loc, idxTy, rewriter, charTy.getFKind()); 1640 len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width); 1641 } 1642 lenParams.emplace_back(len); 1643 } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) { 1644 if (recTy.getNumLenParams() != 0) 1645 TODO(loc, "reboxing descriptor of derived type with length parameters"); 1646 } 1647 auto [boxTy, dest, eleSize] = 1648 consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams); 1649 1650 // Read input extents, strides, and base address 1651 llvm::SmallVector<mlir::Value> inputExtents; 1652 llvm::SmallVector<mlir::Value> inputStrides; 1653 const unsigned inputRank = rebox.getRank(); 1654 for (unsigned i = 0; i < inputRank; ++i) { 1655 mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i); 1656 SmallVector<mlir::Value, 3> dimInfo = 1657 getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter); 1658 inputExtents.emplace_back(dimInfo[1]); 1659 inputStrides.emplace_back(dimInfo[2]); 1660 } 1661 1662 mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType()); 1663 mlir::Value baseAddr = 1664 loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter); 1665 1666 if (!rebox.slice().empty() || !rebox.subcomponent().empty()) 1667 return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1668 operands, rewriter); 1669 return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1670 operands, rewriter); 1671 } 1672 1673 private: 1674 /// Write resulting shape and base address in descriptor, and replace rebox 1675 /// op. 1676 mlir::LogicalResult 1677 finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1678 mlir::ValueRange lbounds, mlir::ValueRange extents, 1679 mlir::ValueRange strides, 1680 mlir::ConversionPatternRewriter &rewriter) const { 1681 mlir::Location loc = rebox.getLoc(); 1682 mlir::Value zero = 1683 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 1684 mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1); 1685 for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) { 1686 mlir::Value extent = std::get<0>(iter.value()); 1687 unsigned dim = iter.index(); 1688 mlir::Value lb = one; 1689 if (!lbounds.empty()) { 1690 lb = lbounds[dim]; 1691 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1692 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1693 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, lb); 1694 }; 1695 dest = insertLowerBound(rewriter, loc, dest, dim, lb); 1696 dest = insertExtent(rewriter, loc, dest, dim, extent); 1697 dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value())); 1698 } 1699 dest = insertBaseAddress(rewriter, loc, dest, base); 1700 mlir::Value result = 1701 placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest); 1702 rewriter.replaceOp(rebox, result); 1703 return success(); 1704 } 1705 1706 // Apply slice given the base address, extents and strides of the input box. 1707 mlir::LogicalResult 1708 sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1709 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1710 mlir::ValueRange operands, 1711 mlir::ConversionPatternRewriter &rewriter) const { 1712 mlir::Location loc = rebox.getLoc(); 1713 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1714 mlir::Type idxTy = lowerTy().indexType(); 1715 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1716 // Apply subcomponent and substring shift on base address. 1717 if (!rebox.subcomponent().empty() || !rebox.substr().empty()) { 1718 // Cast to inputEleTy* so that a GEP can be used. 1719 mlir::Type inputEleTy = getInputEleTy(rebox); 1720 auto llvmElePtrTy = 1721 mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy)); 1722 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base); 1723 1724 if (!rebox.subcomponent().empty()) { 1725 llvm::SmallVector<mlir::Value> gepOperands = {zero}; 1726 for (unsigned i = 0; i < rebox.subcomponent().size(); ++i) 1727 gepOperands.push_back(operands[rebox.subcomponentOffset() + i]); 1728 base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands); 1729 } 1730 if (!rebox.substr().empty()) 1731 base = shiftSubstringBase(rewriter, loc, base, 1732 operands[rebox.substrOffset()]); 1733 } 1734 1735 if (rebox.slice().empty()) 1736 // The array section is of the form array[%component][substring], keep 1737 // the input array extents and strides. 1738 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1739 inputExtents, inputStrides, rewriter); 1740 1741 // Strides from the fir.box are in bytes. 1742 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1743 1744 // The slice is of the form array(i:j:k)[%component]. Compute new extents 1745 // and strides. 1746 llvm::SmallVector<mlir::Value> slicedExtents; 1747 llvm::SmallVector<mlir::Value> slicedStrides; 1748 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 1749 const bool sliceHasOrigins = !rebox.shift().empty(); 1750 unsigned sliceOps = rebox.sliceOffset(); 1751 unsigned shiftOps = rebox.shiftOffset(); 1752 auto strideOps = inputStrides.begin(); 1753 const unsigned inputRank = inputStrides.size(); 1754 for (unsigned i = 0; i < inputRank; 1755 ++i, ++strideOps, ++shiftOps, sliceOps += 3) { 1756 mlir::Value sliceLb = 1757 integerCast(loc, rewriter, idxTy, operands[sliceOps]); 1758 mlir::Value inputStride = *strideOps; // already idxTy 1759 // Apply origin shift: base += (lb-shift)*input_stride 1760 mlir::Value sliceOrigin = 1761 sliceHasOrigins 1762 ? integerCast(loc, rewriter, idxTy, operands[shiftOps]) 1763 : one; 1764 mlir::Value diff = 1765 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin); 1766 mlir::Value offset = 1767 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride); 1768 base = genGEP(loc, voidPtrTy, rewriter, base, offset); 1769 // Apply upper bound and step if this is a triplet. Otherwise, the 1770 // dimension is dropped and no extents/strides are computed. 1771 mlir::Value upper = operands[sliceOps + 1]; 1772 const bool isTripletSlice = 1773 !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp()); 1774 if (isTripletSlice) { 1775 mlir::Value step = 1776 integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]); 1777 // extent = ub-lb+step/step 1778 mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper); 1779 mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb, 1780 sliceUb, step, zero, idxTy); 1781 slicedExtents.emplace_back(extent); 1782 // stride = step*input_stride 1783 mlir::Value stride = 1784 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride); 1785 slicedStrides.emplace_back(stride); 1786 } 1787 } 1788 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1789 slicedExtents, slicedStrides, rewriter); 1790 } 1791 1792 /// Apply a new shape to the data described by a box given the base address, 1793 /// extents and strides of the box. 1794 mlir::LogicalResult 1795 reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1796 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1797 mlir::ValueRange operands, 1798 mlir::ConversionPatternRewriter &rewriter) const { 1799 mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(), 1800 operands.begin() + rebox.shiftOffset() + 1801 rebox.shift().size()}; 1802 if (rebox.shape().empty()) { 1803 // Only setting new lower bounds. 1804 return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents, 1805 inputStrides, rewriter); 1806 } 1807 1808 mlir::Location loc = rebox.getLoc(); 1809 // Strides from the fir.box are in bytes. 1810 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1811 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1812 1813 llvm::SmallVector<mlir::Value> newStrides; 1814 llvm::SmallVector<mlir::Value> newExtents; 1815 mlir::Type idxTy = lowerTy().indexType(); 1816 // First stride from input box is kept. The rest is assumed contiguous 1817 // (it is not possible to reshape otherwise). If the input is scalar, 1818 // which may be OK if all new extents are ones, the stride does not 1819 // matter, use one. 1820 mlir::Value stride = inputStrides.empty() 1821 ? genConstantIndex(loc, idxTy, rewriter, 1) 1822 : inputStrides[0]; 1823 for (unsigned i = 0; i < rebox.shape().size(); ++i) { 1824 mlir::Value rawExtent = operands[rebox.shapeOffset() + i]; 1825 mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent); 1826 newExtents.emplace_back(extent); 1827 newStrides.emplace_back(stride); 1828 // nextStride = extent * stride; 1829 stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride); 1830 } 1831 return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides, 1832 rewriter); 1833 } 1834 1835 /// Return scalar element type of the input box. 1836 static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) { 1837 auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType()); 1838 if (auto seqTy = ty.dyn_cast<fir::SequenceType>()) 1839 return seqTy.getEleTy(); 1840 return ty; 1841 } 1842 }; 1843 1844 /// Lower `fir.emboxproc` operation. Creates a procedure box. 1845 /// TODO: Part of supporting Fortran 2003 procedure pointers. 1846 struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> { 1847 using FIROpConversion::FIROpConversion; 1848 1849 mlir::LogicalResult 1850 matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor, 1851 mlir::ConversionPatternRewriter &rewriter) const override { 1852 TODO(emboxproc.getLoc(), "fir.emboxproc codegen"); 1853 return failure(); 1854 } 1855 }; 1856 1857 // Code shared between insert_value and extract_value Ops. 1858 struct ValueOpCommon { 1859 // Translate the arguments pertaining to any multidimensional array to 1860 // row-major order for LLVM-IR. 1861 static void toRowMajor(SmallVectorImpl<mlir::Attribute> &attrs, 1862 mlir::Type ty) { 1863 assert(ty && "type is null"); 1864 const auto end = attrs.size(); 1865 for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) { 1866 if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 1867 const auto dim = getDimension(seq); 1868 if (dim > 1) { 1869 auto ub = std::min(i + dim, end); 1870 std::reverse(attrs.begin() + i, attrs.begin() + ub); 1871 i += dim - 1; 1872 } 1873 ty = getArrayElementType(seq); 1874 } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) { 1875 ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()]; 1876 } else { 1877 llvm_unreachable("index into invalid type"); 1878 } 1879 } 1880 } 1881 1882 static llvm::SmallVector<mlir::Attribute> 1883 collectIndices(mlir::ConversionPatternRewriter &rewriter, 1884 mlir::ArrayAttr arrAttr) { 1885 llvm::SmallVector<mlir::Attribute> attrs; 1886 for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) { 1887 if (i->isa<mlir::IntegerAttr>()) { 1888 attrs.push_back(*i); 1889 } else { 1890 auto fieldName = i->cast<mlir::StringAttr>().getValue(); 1891 ++i; 1892 auto ty = i->cast<mlir::TypeAttr>().getValue(); 1893 auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName); 1894 attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index)); 1895 } 1896 } 1897 return attrs; 1898 } 1899 1900 private: 1901 static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) { 1902 unsigned result = 1; 1903 for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>(); 1904 eleTy; 1905 eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>()) 1906 ++result; 1907 return result; 1908 } 1909 1910 static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) { 1911 auto eleTy = ty.getElementType(); 1912 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 1913 eleTy = arrTy.getElementType(); 1914 return eleTy; 1915 } 1916 }; 1917 1918 namespace { 1919 /// Extract a subobject value from an ssa-value of aggregate type 1920 struct ExtractValueOpConversion 1921 : public FIROpAndTypeConversion<fir::ExtractValueOp>, 1922 public ValueOpCommon { 1923 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1924 1925 mlir::LogicalResult 1926 doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor, 1927 mlir::ConversionPatternRewriter &rewriter) const override { 1928 auto attrs = collectIndices(rewriter, extractVal.getCoor()); 1929 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 1930 auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs); 1931 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>( 1932 extractVal, ty, adaptor.getOperands()[0], position); 1933 return success(); 1934 } 1935 }; 1936 1937 /// InsertValue is the generalized instruction for the composition of new 1938 /// aggregate type values. 1939 struct InsertValueOpConversion 1940 : public FIROpAndTypeConversion<fir::InsertValueOp>, 1941 public ValueOpCommon { 1942 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1943 1944 mlir::LogicalResult 1945 doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor, 1946 mlir::ConversionPatternRewriter &rewriter) const override { 1947 auto attrs = collectIndices(rewriter, insertVal.getCoor()); 1948 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 1949 auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs); 1950 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 1951 insertVal, ty, adaptor.getOperands()[0], adaptor.getOperands()[1], 1952 position); 1953 return success(); 1954 } 1955 }; 1956 1957 /// InsertOnRange inserts a value into a sequence over a range of offsets. 1958 struct InsertOnRangeOpConversion 1959 : public FIROpAndTypeConversion<fir::InsertOnRangeOp> { 1960 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1961 1962 // Increments an array of subscripts in a row major fasion. 1963 void incrementSubscripts(const SmallVector<uint64_t> &dims, 1964 SmallVector<uint64_t> &subscripts) const { 1965 for (size_t i = dims.size(); i > 0; --i) { 1966 if (++subscripts[i - 1] < dims[i - 1]) { 1967 return; 1968 } 1969 subscripts[i - 1] = 0; 1970 } 1971 } 1972 1973 mlir::LogicalResult 1974 doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor, 1975 mlir::ConversionPatternRewriter &rewriter) const override { 1976 1977 llvm::SmallVector<uint64_t> dims; 1978 auto type = adaptor.getOperands()[0].getType(); 1979 1980 // Iteratively extract the array dimensions from the type. 1981 while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 1982 dims.push_back(t.getNumElements()); 1983 type = t.getElementType(); 1984 } 1985 1986 SmallVector<uint64_t> lBounds; 1987 SmallVector<uint64_t> uBounds; 1988 1989 // Unzip the upper and lower bound and convert to a row major format. 1990 mlir::DenseIntElementsAttr coor = range.getCoor(); 1991 auto reversedCoor = llvm::reverse(coor.getValues<int64_t>()); 1992 for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) { 1993 uBounds.push_back(*i++); 1994 lBounds.push_back(*i); 1995 } 1996 1997 auto &subscripts = lBounds; 1998 auto loc = range.getLoc(); 1999 mlir::Value lastOp = adaptor.getOperands()[0]; 2000 mlir::Value insertVal = adaptor.getOperands()[1]; 2001 2002 auto i64Ty = rewriter.getI64Type(); 2003 while (subscripts != uBounds) { 2004 // Convert uint64_t's to Attribute's. 2005 SmallVector<mlir::Attribute> subscriptAttrs; 2006 for (const auto &subscript : subscripts) 2007 subscriptAttrs.push_back(IntegerAttr::get(i64Ty, subscript)); 2008 lastOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2009 loc, ty, lastOp, insertVal, 2010 ArrayAttr::get(range.getContext(), subscriptAttrs)); 2011 2012 incrementSubscripts(dims, subscripts); 2013 } 2014 2015 // Convert uint64_t's to Attribute's. 2016 SmallVector<mlir::Attribute> subscriptAttrs; 2017 for (const auto &subscript : subscripts) 2018 subscriptAttrs.push_back( 2019 IntegerAttr::get(rewriter.getI64Type(), subscript)); 2020 mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs); 2021 2022 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2023 range, ty, lastOp, insertVal, 2024 ArrayAttr::get(range.getContext(), arrayRef)); 2025 2026 return success(); 2027 } 2028 }; 2029 } // namespace 2030 2031 namespace { 2032 /// XArrayCoor is the address arithmetic on a dynamically shaped, sliced, 2033 /// shifted etc. array. 2034 /// (See the static restriction on coordinate_of.) array_coor determines the 2035 /// coordinate (location) of a specific element. 2036 struct XArrayCoorOpConversion 2037 : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> { 2038 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2039 2040 mlir::LogicalResult 2041 doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor, 2042 mlir::ConversionPatternRewriter &rewriter) const override { 2043 auto loc = coor.getLoc(); 2044 mlir::ValueRange operands = adaptor.getOperands(); 2045 unsigned rank = coor.getRank(); 2046 assert(coor.indices().size() == rank); 2047 assert(coor.shape().empty() || coor.shape().size() == rank); 2048 assert(coor.shift().empty() || coor.shift().size() == rank); 2049 assert(coor.slice().empty() || coor.slice().size() == 3 * rank); 2050 mlir::Type idxTy = lowerTy().indexType(); 2051 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2052 mlir::Value prevExt = one; 2053 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2054 mlir::Value offset = zero; 2055 const bool isShifted = !coor.shift().empty(); 2056 const bool isSliced = !coor.slice().empty(); 2057 const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>(); 2058 2059 auto indexOps = coor.indices().begin(); 2060 auto shapeOps = coor.shape().begin(); 2061 auto shiftOps = coor.shift().begin(); 2062 auto sliceOps = coor.slice().begin(); 2063 // For each dimension of the array, generate the offset calculation. 2064 for (unsigned i = 0; i < rank; 2065 ++i, ++indexOps, ++shapeOps, ++shiftOps, sliceOps += 3) { 2066 mlir::Value index = 2067 integerCast(loc, rewriter, idxTy, operands[coor.indicesOffset() + i]); 2068 mlir::Value lb = isShifted ? integerCast(loc, rewriter, idxTy, 2069 operands[coor.shiftOffset() + i]) 2070 : one; 2071 mlir::Value step = one; 2072 bool normalSlice = isSliced; 2073 // Compute zero based index in dimension i of the element, applying 2074 // potential triplets and lower bounds. 2075 if (isSliced) { 2076 mlir::Value ub = *(sliceOps + 1); 2077 normalSlice = !mlir::isa_and_nonnull<fir::UndefOp>(ub.getDefiningOp()); 2078 if (normalSlice) 2079 step = integerCast(loc, rewriter, idxTy, *(sliceOps + 2)); 2080 } 2081 auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb); 2082 mlir::Value diff = 2083 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step); 2084 if (normalSlice) { 2085 mlir::Value sliceLb = 2086 integerCast(loc, rewriter, idxTy, operands[coor.sliceOffset() + i]); 2087 auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb); 2088 diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj); 2089 } 2090 // Update the offset given the stride and the zero based index `diff` 2091 // that was just computed. 2092 if (baseIsBoxed) { 2093 // Use stride in bytes from the descriptor. 2094 mlir::Value stride = 2095 loadStrideFromBox(loc, adaptor.getOperands()[0], i, rewriter); 2096 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride); 2097 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2098 } else { 2099 // Use stride computed at last iteration. 2100 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt); 2101 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2102 // Compute next stride assuming contiguity of the base array 2103 // (in element number). 2104 auto nextExt = 2105 integerCast(loc, rewriter, idxTy, operands[coor.shapeOffset() + i]); 2106 prevExt = 2107 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt); 2108 } 2109 } 2110 2111 // Add computed offset to the base address. 2112 if (baseIsBoxed) { 2113 // Working with byte offsets. The base address is read from the fir.box. 2114 // and need to be casted to i8* to do the pointer arithmetic. 2115 mlir::Type baseTy = 2116 getBaseAddrTypeFromBox(adaptor.getOperands()[0].getType()); 2117 mlir::Value base = 2118 loadBaseAddrFromBox(loc, baseTy, adaptor.getOperands()[0], rewriter); 2119 mlir::Type voidPtrTy = getVoidPtrType(); 2120 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2121 llvm::SmallVector<mlir::Value> args{offset}; 2122 auto addr = 2123 rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args); 2124 if (coor.subcomponent().empty()) { 2125 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, baseTy, addr); 2126 return success(); 2127 } 2128 auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr); 2129 args.clear(); 2130 args.push_back(zero); 2131 if (!coor.lenParams().empty()) { 2132 // If type parameters are present, then we don't want to use a GEPOp 2133 // as below, as the LLVM struct type cannot be statically defined. 2134 TODO(loc, "derived type with type parameters"); 2135 } 2136 // TODO: array offset subcomponents must be converted to LLVM's 2137 // row-major layout here. 2138 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2139 args.push_back(operands[i]); 2140 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, baseTy, casted, 2141 args); 2142 return success(); 2143 } 2144 2145 // The array was not boxed, so it must be contiguous. offset is therefore an 2146 // element offset and the base type is kept in the GEP unless the element 2147 // type size is itself dynamic. 2148 mlir::Value base; 2149 if (coor.subcomponent().empty()) { 2150 // No subcomponent. 2151 if (!coor.lenParams().empty()) { 2152 // Type parameters. Adjust element size explicitly. 2153 auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType()); 2154 assert(eleTy && "result must be a reference-like type"); 2155 if (fir::characterWithDynamicLen(eleTy)) { 2156 assert(coor.lenParams().size() == 1); 2157 auto bitsInChar = lowerTy().getKindMap().getCharacterBitsize( 2158 eleTy.cast<fir::CharacterType>().getFKind()); 2159 auto scaling = genConstantIndex(loc, idxTy, rewriter, bitsInChar / 8); 2160 auto scaledBySize = 2161 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, scaling); 2162 auto length = 2163 integerCast(loc, rewriter, idxTy, 2164 adaptor.getOperands()[coor.lenParamsOffset()]); 2165 offset = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, scaledBySize, 2166 length); 2167 } else { 2168 TODO(loc, "compute size of derived type with type parameters"); 2169 } 2170 } 2171 // Cast the base address to a pointer to T. 2172 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty, 2173 adaptor.getOperands()[0]); 2174 } else { 2175 // Operand #0 must have a pointer type. For subcomponent slicing, we 2176 // want to cast away the array type and have a plain struct type. 2177 mlir::Type ty0 = adaptor.getOperands()[0].getType(); 2178 auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 2179 assert(ptrTy && "expected pointer type"); 2180 mlir::Type eleTy = ptrTy.getElementType(); 2181 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2182 eleTy = arrTy.getElementType(); 2183 auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy); 2184 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy, 2185 adaptor.getOperands()[0]); 2186 } 2187 SmallVector<mlir::Value> args = {offset}; 2188 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2189 args.push_back(operands[i]); 2190 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args); 2191 return success(); 2192 } 2193 }; 2194 } // namespace 2195 2196 /// Convert to (memory) reference to a reference to a subobject. 2197 /// The coordinate_of op is a Swiss army knife operation that can be used on 2198 /// (memory) references to records, arrays, complex, etc. as well as boxes. 2199 /// With unboxed arrays, there is the restriction that the array have a static 2200 /// shape in all but the last column. 2201 struct CoordinateOpConversion 2202 : public FIROpAndTypeConversion<fir::CoordinateOp> { 2203 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2204 2205 mlir::LogicalResult 2206 doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor, 2207 mlir::ConversionPatternRewriter &rewriter) const override { 2208 mlir::ValueRange operands = adaptor.getOperands(); 2209 2210 mlir::Location loc = coor.getLoc(); 2211 mlir::Value base = operands[0]; 2212 mlir::Type baseObjectTy = coor.getBaseType(); 2213 mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2214 assert(objectTy && "fir.coordinate_of expects a reference type"); 2215 2216 // Complex type - basically, extract the real or imaginary part 2217 if (fir::isa_complex(objectTy)) { 2218 mlir::LLVM::ConstantOp c0 = 2219 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2220 SmallVector<mlir::Value> offs = {c0, operands[1]}; 2221 mlir::Value gep = genGEP(loc, ty, rewriter, base, offs); 2222 rewriter.replaceOp(coor, gep); 2223 return success(); 2224 } 2225 2226 // Boxed type - get the base pointer from the box 2227 if (baseObjectTy.dyn_cast<fir::BoxType>()) 2228 return doRewriteBox(coor, ty, operands, loc, rewriter); 2229 2230 // Reference or pointer type 2231 if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType>()) 2232 return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter); 2233 2234 return rewriter.notifyMatchFailure( 2235 coor, "fir.coordinate_of base operand has unsupported type"); 2236 } 2237 2238 unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) const { 2239 return fir::hasDynamicSize(ty) 2240 ? op.getDefiningOp() 2241 ->getAttrOfType<mlir::IntegerAttr>("field") 2242 .getInt() 2243 : getIntValue(op); 2244 } 2245 2246 int64_t getIntValue(mlir::Value val) const { 2247 assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value"); 2248 mlir::Operation *defop = val.getDefiningOp(); 2249 2250 if (auto constOp = dyn_cast<mlir::arith::ConstantIntOp>(defop)) 2251 return constOp.value(); 2252 if (auto llConstOp = dyn_cast<mlir::LLVM::ConstantOp>(defop)) 2253 if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>()) 2254 return attr.getValue().getSExtValue(); 2255 fir::emitFatalError(val.getLoc(), "must be a constant"); 2256 } 2257 2258 bool hasSubDimensions(mlir::Type type) const { 2259 return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>(); 2260 } 2261 2262 /// Check whether this form of `!fir.coordinate_of` is supported. These 2263 /// additional checks are required, because we are not yet able to convert 2264 /// all valid forms of `!fir.coordinate_of`. 2265 /// TODO: Either implement the unsupported cases or extend the verifier 2266 /// in FIROps.cpp instead. 2267 bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) const { 2268 const std::size_t numOfCoors = coors.size(); 2269 std::size_t i = 0; 2270 bool subEle = false; 2271 bool ptrEle = false; 2272 for (; i < numOfCoors; ++i) { 2273 mlir::Value nxtOpnd = coors[i]; 2274 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2275 subEle = true; 2276 i += arrTy.getDimension() - 1; 2277 type = arrTy.getEleTy(); 2278 } else if (auto recTy = type.dyn_cast<fir::RecordType>()) { 2279 subEle = true; 2280 type = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2281 } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) { 2282 subEle = true; 2283 type = tupTy.getType(getIntValue(nxtOpnd)); 2284 } else { 2285 ptrEle = true; 2286 } 2287 } 2288 if (ptrEle) 2289 return (!subEle) && (numOfCoors == 1); 2290 return subEle && (i >= numOfCoors); 2291 } 2292 2293 /// Walk the abstract memory layout and determine if the path traverses any 2294 /// array types with unknown shape. Return true iff all the array types have a 2295 /// constant shape along the path. 2296 bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) const { 2297 const std::size_t sz = coors.size(); 2298 std::size_t i = 0; 2299 for (; i < sz; ++i) { 2300 mlir::Value nxtOpnd = coors[i]; 2301 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2302 if (fir::sequenceWithNonConstantShape(arrTy)) 2303 return false; 2304 i += arrTy.getDimension() - 1; 2305 type = arrTy.getEleTy(); 2306 } else if (auto strTy = type.dyn_cast<fir::RecordType>()) { 2307 type = strTy.getType(getFieldNumber(strTy, nxtOpnd)); 2308 } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) { 2309 type = strTy.getType(getIntValue(nxtOpnd)); 2310 } else { 2311 return true; 2312 } 2313 } 2314 return true; 2315 } 2316 2317 private: 2318 mlir::LogicalResult 2319 doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands, 2320 mlir::Location loc, 2321 mlir::ConversionPatternRewriter &rewriter) const { 2322 mlir::Type boxObjTy = coor.getBaseType(); 2323 assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`"); 2324 2325 mlir::Value boxBaseAddr = operands[0]; 2326 2327 // 1. SPECIAL CASE (uses `fir.len_param_index`): 2328 // %box = ... : !fir.box<!fir.type<derived{len1:i32}>> 2329 // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}> 2330 // %addr = coordinate_of %box, %lenp 2331 if (coor.getNumOperands() == 2) { 2332 mlir::Operation *coordinateDef = 2333 (*coor.getCoor().begin()).getDefiningOp(); 2334 if (isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef)) { 2335 TODO(loc, 2336 "fir.coordinate_of - fir.len_param_index is not supported yet"); 2337 } 2338 } 2339 2340 // 2. GENERAL CASE: 2341 // 2.1. (`fir.array`) 2342 // %box = ... : !fix.box<!fir.array<?xU>> 2343 // %idx = ... : index 2344 // %resultAddr = coordinate_of %box, %idx : !fir.ref<U> 2345 // 2.2 (`fir.derived`) 2346 // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>> 2347 // %idx = ... : i32 2348 // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32> 2349 // 2.3 (`fir.derived` inside `fir.array`) 2350 // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32, 2351 // field_2:f32}>>> %idx1 = ... : index %idx2 = ... : i32 %resultAddr = 2352 // coordinate_of %box, %idx1, %idx2 : !fir.ref<f32> 2353 // 2.4. TODO: Either document or disable any other case that the following 2354 // implementation might convert. 2355 mlir::LLVM::ConstantOp c0 = 2356 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2357 mlir::Value resultAddr = 2358 loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()), 2359 boxBaseAddr, rewriter); 2360 auto currentObjTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy); 2361 mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext()); 2362 2363 for (unsigned i = 1, last = operands.size(); i < last; ++i) { 2364 if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) { 2365 if (i != 1) 2366 TODO(loc, "fir.array nested inside other array and/or derived type"); 2367 // Applies byte strides from the box. Ignore lower bound from box 2368 // since fir.coordinate_of indexes are zero based. Lowering takes care 2369 // of lower bound aspects. This both accounts for dynamically sized 2370 // types and non contiguous arrays. 2371 auto idxTy = lowerTy().indexType(); 2372 mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0); 2373 for (unsigned index = i, lastIndex = i + arrTy.getDimension(); 2374 index < lastIndex; ++index) { 2375 mlir::Value stride = 2376 loadStrideFromBox(loc, operands[0], index - i, rewriter); 2377 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, 2378 operands[index], stride); 2379 off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off); 2380 } 2381 auto voidPtrBase = 2382 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr); 2383 SmallVector<mlir::Value> args{off}; 2384 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, 2385 voidPtrBase, args); 2386 i += arrTy.getDimension() - 1; 2387 currentObjTy = arrTy.getEleTy(); 2388 } else if (auto recTy = currentObjTy.dyn_cast<fir::RecordType>()) { 2389 auto recRefTy = 2390 mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy)); 2391 mlir::Value nxtOpnd = operands[i]; 2392 auto memObj = 2393 rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr); 2394 llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd}; 2395 currentObjTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2396 auto llvmCurrentObjTy = lowerTy().convertType(currentObjTy); 2397 auto gep = rewriter.create<mlir::LLVM::GEPOp>( 2398 loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj, 2399 args); 2400 resultAddr = 2401 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep); 2402 } else { 2403 fir::emitFatalError(loc, "unexpected type in coordinate_of"); 2404 } 2405 } 2406 2407 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr); 2408 return success(); 2409 } 2410 2411 mlir::LogicalResult 2412 doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty, 2413 mlir::ValueRange operands, mlir::Location loc, 2414 mlir::ConversionPatternRewriter &rewriter) const { 2415 mlir::Type baseObjectTy = coor.getBaseType(); 2416 2417 mlir::Type currentObjTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2418 bool hasSubdimension = hasSubDimensions(currentObjTy); 2419 bool columnIsDeferred = !hasSubdimension; 2420 2421 if (!supportedCoordinate(currentObjTy, operands.drop_front(1))) { 2422 TODO(loc, "unsupported combination of coordinate operands"); 2423 } 2424 2425 const bool hasKnownShape = 2426 arraysHaveKnownShape(currentObjTy, operands.drop_front(1)); 2427 2428 // If only the column is `?`, then we can simply place the column value in 2429 // the 0-th GEP position. 2430 if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) { 2431 if (!hasKnownShape) { 2432 const unsigned sz = arrTy.getDimension(); 2433 if (arraysHaveKnownShape(arrTy.getEleTy(), 2434 operands.drop_front(1 + sz))) { 2435 llvm::ArrayRef<int64_t> shape = arrTy.getShape(); 2436 bool allConst = true; 2437 for (unsigned i = 0; i < sz - 1; ++i) { 2438 if (shape[i] < 0) { 2439 allConst = false; 2440 break; 2441 } 2442 } 2443 if (allConst) 2444 columnIsDeferred = true; 2445 } 2446 } 2447 } 2448 2449 if (fir::hasDynamicSize(fir::unwrapSequenceType(currentObjTy))) { 2450 mlir::emitError( 2451 loc, "fir.coordinate_of with a dynamic element size is unsupported"); 2452 return failure(); 2453 } 2454 2455 if (hasKnownShape || columnIsDeferred) { 2456 SmallVector<mlir::Value> offs; 2457 if (hasKnownShape && hasSubdimension) { 2458 mlir::LLVM::ConstantOp c0 = 2459 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2460 offs.push_back(c0); 2461 } 2462 const std::size_t sz = operands.size(); 2463 Optional<int> dims; 2464 SmallVector<mlir::Value> arrIdx; 2465 for (std::size_t i = 1; i < sz; ++i) { 2466 mlir::Value nxtOpnd = operands[i]; 2467 2468 if (!currentObjTy) { 2469 mlir::emitError(loc, "invalid coordinate/check failed"); 2470 return failure(); 2471 } 2472 2473 // check if the i-th coordinate relates to an array 2474 if (dims.hasValue()) { 2475 arrIdx.push_back(nxtOpnd); 2476 int dimsLeft = *dims; 2477 if (dimsLeft > 1) { 2478 dims = dimsLeft - 1; 2479 continue; 2480 } 2481 currentObjTy = currentObjTy.cast<fir::SequenceType>().getEleTy(); 2482 // append array range in reverse (FIR arrays are column-major) 2483 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2484 arrIdx.clear(); 2485 dims.reset(); 2486 continue; 2487 } 2488 if (auto arrTy = currentObjTy.dyn_cast<fir::SequenceType>()) { 2489 int d = arrTy.getDimension() - 1; 2490 if (d > 0) { 2491 dims = d; 2492 arrIdx.push_back(nxtOpnd); 2493 continue; 2494 } 2495 currentObjTy = currentObjTy.cast<fir::SequenceType>().getEleTy(); 2496 offs.push_back(nxtOpnd); 2497 continue; 2498 } 2499 2500 // check if the i-th coordinate relates to a field 2501 if (auto recTy = currentObjTy.dyn_cast<fir::RecordType>()) 2502 currentObjTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2503 else if (auto tupTy = currentObjTy.dyn_cast<mlir::TupleType>()) 2504 currentObjTy = tupTy.getType(getIntValue(nxtOpnd)); 2505 else 2506 currentObjTy = nullptr; 2507 2508 offs.push_back(nxtOpnd); 2509 } 2510 if (dims.hasValue()) 2511 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2512 mlir::Value base = operands[0]; 2513 mlir::Value retval = genGEP(loc, ty, rewriter, base, offs); 2514 rewriter.replaceOp(coor, retval); 2515 return success(); 2516 } 2517 2518 mlir::emitError(loc, "fir.coordinate_of base operand has unsupported type"); 2519 return failure(); 2520 } 2521 }; 2522 2523 /// Convert `fir.field_index`. The conversion depends on whether the size of 2524 /// the record is static or dynamic. 2525 struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> { 2526 using FIROpConversion::FIROpConversion; 2527 2528 // NB: most field references should be resolved by this point 2529 mlir::LogicalResult 2530 matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor, 2531 mlir::ConversionPatternRewriter &rewriter) const override { 2532 auto recTy = field.getOnType().cast<fir::RecordType>(); 2533 unsigned index = recTy.getFieldIndex(field.getFieldId()); 2534 2535 if (!fir::hasDynamicSize(recTy)) { 2536 // Derived type has compile-time constant layout. Return index of the 2537 // component type in the parent type (to be used in GEP). 2538 rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset( 2539 field.getLoc(), rewriter, index)}); 2540 return success(); 2541 } 2542 2543 // Derived type has compile-time constant layout. Call the compiler 2544 // generated function to determine the byte offset of the field at runtime. 2545 // This returns a non-constant. 2546 FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get( 2547 field.getContext(), getOffsetMethodName(recTy, field.getFieldId())); 2548 NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr); 2549 NamedAttribute fieldAttr = rewriter.getNamedAttr( 2550 "field", mlir::IntegerAttr::get(lowerTy().indexType(), index)); 2551 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 2552 field, lowerTy().offsetType(), adaptor.getOperands(), 2553 llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr}); 2554 return success(); 2555 } 2556 2557 // Re-Construct the name of the compiler generated method that calculates the 2558 // offset 2559 inline static std::string getOffsetMethodName(fir::RecordType recTy, 2560 llvm::StringRef field) { 2561 return recTy.getName().str() + "P." + field.str() + ".offset"; 2562 } 2563 }; 2564 2565 /// Convert `fir.end` 2566 struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> { 2567 using FIROpConversion::FIROpConversion; 2568 2569 mlir::LogicalResult 2570 matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor, 2571 mlir::ConversionPatternRewriter &rewriter) const override { 2572 TODO(firEnd.getLoc(), "fir.end codegen"); 2573 return failure(); 2574 } 2575 }; 2576 2577 /// Lower `fir.gentypedesc` to a global constant. 2578 struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> { 2579 using FIROpConversion::FIROpConversion; 2580 2581 mlir::LogicalResult 2582 matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor, 2583 mlir::ConversionPatternRewriter &rewriter) const override { 2584 TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen"); 2585 return failure(); 2586 } 2587 }; 2588 2589 /// Lower `fir.has_value` operation to `llvm.return` operation. 2590 struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> { 2591 using FIROpConversion::FIROpConversion; 2592 2593 mlir::LogicalResult 2594 matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor, 2595 mlir::ConversionPatternRewriter &rewriter) const override { 2596 rewriter.replaceOpWithNewOp<LLVM::ReturnOp>(op, adaptor.getOperands()); 2597 return success(); 2598 } 2599 }; 2600 2601 /// Lower `fir.global` operation to `llvm.global` operation. 2602 /// `fir.insert_on_range` operations are replaced with constant dense attribute 2603 /// if they are applied on the full range. 2604 struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> { 2605 using FIROpConversion::FIROpConversion; 2606 2607 mlir::LogicalResult 2608 matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor, 2609 mlir::ConversionPatternRewriter &rewriter) const override { 2610 auto tyAttr = convertType(global.getType()); 2611 if (global.getType().isa<fir::BoxType>()) 2612 tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType(); 2613 auto loc = global.getLoc(); 2614 mlir::Attribute initAttr{}; 2615 if (global.getInitVal()) 2616 initAttr = global.getInitVal().getValue(); 2617 auto linkage = convertLinkage(global.getLinkName()); 2618 auto isConst = global.getConstant().hasValue(); 2619 auto g = rewriter.create<mlir::LLVM::GlobalOp>( 2620 loc, tyAttr, isConst, linkage, global.getSymName(), initAttr); 2621 auto &gr = g.getInitializerRegion(); 2622 rewriter.inlineRegionBefore(global.getRegion(), gr, gr.end()); 2623 if (!gr.empty()) { 2624 // Replace insert_on_range with a constant dense attribute if the 2625 // initialization is on the full range. 2626 auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>(); 2627 for (auto insertOp : insertOnRangeOps) { 2628 if (isFullRange(insertOp.getCoor(), insertOp.getType())) { 2629 auto seqTyAttr = convertType(insertOp.getType()); 2630 auto *op = insertOp.getVal().getDefiningOp(); 2631 auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op); 2632 if (!constant) { 2633 auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op); 2634 if (!convertOp) 2635 continue; 2636 constant = cast<mlir::arith::ConstantOp>( 2637 convertOp.getValue().getDefiningOp()); 2638 } 2639 mlir::Type vecType = mlir::VectorType::get( 2640 insertOp.getType().getShape(), constant.getType()); 2641 auto denseAttr = mlir::DenseElementsAttr::get( 2642 vecType.cast<ShapedType>(), constant.getValue()); 2643 rewriter.setInsertionPointAfter(insertOp); 2644 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>( 2645 insertOp, seqTyAttr, denseAttr); 2646 } 2647 } 2648 } 2649 rewriter.eraseOp(global); 2650 return success(); 2651 } 2652 2653 bool isFullRange(mlir::DenseIntElementsAttr indexes, 2654 fir::SequenceType seqTy) const { 2655 auto extents = seqTy.getShape(); 2656 if (indexes.size() / 2 != static_cast<int64_t>(extents.size())) 2657 return false; 2658 auto cur_index = indexes.value_begin<int64_t>(); 2659 for (unsigned i = 0; i < indexes.size(); i += 2) { 2660 if (*(cur_index++) != 0) 2661 return false; 2662 if (*(cur_index++) != extents[i / 2] - 1) 2663 return false; 2664 } 2665 return true; 2666 } 2667 2668 // TODO: String comparaison should be avoided. Replace linkName with an 2669 // enumeration. 2670 mlir::LLVM::Linkage convertLinkage(Optional<StringRef> optLinkage) const { 2671 if (optLinkage.hasValue()) { 2672 auto name = optLinkage.getValue(); 2673 if (name == "internal") 2674 return mlir::LLVM::Linkage::Internal; 2675 if (name == "linkonce") 2676 return mlir::LLVM::Linkage::Linkonce; 2677 if (name == "linkonce_odr") 2678 return mlir::LLVM::Linkage::LinkonceODR; 2679 if (name == "common") 2680 return mlir::LLVM::Linkage::Common; 2681 if (name == "weak") 2682 return mlir::LLVM::Linkage::Weak; 2683 } 2684 return mlir::LLVM::Linkage::External; 2685 } 2686 }; 2687 2688 /// `fir.load` --> `llvm.load` 2689 struct LoadOpConversion : public FIROpConversion<fir::LoadOp> { 2690 using FIROpConversion::FIROpConversion; 2691 2692 mlir::LogicalResult 2693 matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor, 2694 mlir::ConversionPatternRewriter &rewriter) const override { 2695 // fir.box is a special case because it is considered as an ssa values in 2696 // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box> 2697 // and fir.box end up being the same llvm types and loading a 2698 // fir.ref<fir.box> is actually a no op in LLVM. 2699 if (load.getType().isa<fir::BoxType>()) { 2700 rewriter.replaceOp(load, adaptor.getOperands()[0]); 2701 } else { 2702 mlir::Type ty = convertType(load.getType()); 2703 ArrayRef<NamedAttribute> at = load->getAttrs(); 2704 rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>( 2705 load, ty, adaptor.getOperands(), at); 2706 } 2707 return success(); 2708 } 2709 }; 2710 2711 /// Lower `fir.no_reassoc` to LLVM IR dialect. 2712 /// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast 2713 /// math flags? 2714 struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> { 2715 using FIROpConversion::FIROpConversion; 2716 2717 mlir::LogicalResult 2718 matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor, 2719 mlir::ConversionPatternRewriter &rewriter) const override { 2720 rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]); 2721 return success(); 2722 } 2723 }; 2724 2725 static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest, 2726 Optional<mlir::ValueRange> destOps, 2727 mlir::ConversionPatternRewriter &rewriter, 2728 mlir::Block *newBlock) { 2729 if (destOps.hasValue()) 2730 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, destOps.getValue(), 2731 newBlock, mlir::ValueRange()); 2732 else 2733 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock); 2734 } 2735 2736 template <typename A, typename B> 2737 static void genBrOp(A caseOp, mlir::Block *dest, Optional<B> destOps, 2738 mlir::ConversionPatternRewriter &rewriter) { 2739 if (destOps.hasValue()) 2740 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, destOps.getValue(), 2741 dest); 2742 else 2743 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest); 2744 } 2745 2746 static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp, 2747 mlir::Block *dest, 2748 Optional<mlir::ValueRange> destOps, 2749 mlir::ConversionPatternRewriter &rewriter) { 2750 auto *thisBlock = rewriter.getInsertionBlock(); 2751 auto *newBlock = createBlock(rewriter, dest); 2752 rewriter.setInsertionPointToEnd(thisBlock); 2753 genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock); 2754 rewriter.setInsertionPointToEnd(newBlock); 2755 } 2756 2757 /// Conversion of `fir.select_case` 2758 /// 2759 /// The `fir.select_case` operation is converted to a if-then-else ladder. 2760 /// Depending on the case condition type, one or several comparison and 2761 /// conditional branching can be generated. 2762 /// 2763 /// A a point value case such as `case(4)`, a lower bound case such as 2764 /// `case(5:)` or an upper bound case such as `case(:3)` are converted to a 2765 /// simple comparison between the selector value and the constant value in the 2766 /// case. The block associated with the case condition is then executed if 2767 /// the comparison succeed otherwise it branch to the next block with the 2768 /// comparison for the the next case conditon. 2769 /// 2770 /// A closed interval case condition such as `case(7:10)` is converted with a 2771 /// first comparison and conditional branching for the lower bound. If 2772 /// successful, it branch to a second block with the comparison for the 2773 /// upper bound in the same case condition. 2774 /// 2775 /// TODO: lowering of CHARACTER type cases is not handled yet. 2776 struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> { 2777 using FIROpConversion::FIROpConversion; 2778 2779 mlir::LogicalResult 2780 matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor, 2781 mlir::ConversionPatternRewriter &rewriter) const override { 2782 unsigned conds = caseOp.getNumConditions(); 2783 llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue(); 2784 // Type can be CHARACTER, INTEGER, or LOGICAL (C1145) 2785 auto ty = caseOp.getSelector().getType(); 2786 if (ty.isa<fir::CharacterType>()) { 2787 TODO(caseOp.getLoc(), "fir.select_case codegen with character type"); 2788 return failure(); 2789 } 2790 mlir::Value selector = caseOp.getSelector(adaptor.getOperands()); 2791 auto loc = caseOp.getLoc(); 2792 for (unsigned t = 0; t != conds; ++t) { 2793 mlir::Block *dest = caseOp.getSuccessor(t); 2794 llvm::Optional<mlir::ValueRange> destOps = 2795 caseOp.getSuccessorOperands(adaptor.getOperands(), t); 2796 llvm::Optional<mlir::ValueRange> cmpOps = 2797 *caseOp.getCompareOperands(adaptor.getOperands(), t); 2798 mlir::Value caseArg = *(cmpOps.getValue().begin()); 2799 mlir::Attribute attr = cases[t]; 2800 if (attr.isa<fir::PointIntervalAttr>()) { 2801 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2802 loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg); 2803 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2804 continue; 2805 } 2806 if (attr.isa<fir::LowerBoundAttr>()) { 2807 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2808 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2809 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2810 continue; 2811 } 2812 if (attr.isa<fir::UpperBoundAttr>()) { 2813 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2814 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg); 2815 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2816 continue; 2817 } 2818 if (attr.isa<fir::ClosedIntervalAttr>()) { 2819 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2820 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2821 auto *thisBlock = rewriter.getInsertionBlock(); 2822 auto *newBlock1 = createBlock(rewriter, dest); 2823 auto *newBlock2 = createBlock(rewriter, dest); 2824 rewriter.setInsertionPointToEnd(thisBlock); 2825 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2); 2826 rewriter.setInsertionPointToEnd(newBlock1); 2827 mlir::Value caseArg0 = *(cmpOps.getValue().begin() + 1); 2828 auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>( 2829 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0); 2830 genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2); 2831 rewriter.setInsertionPointToEnd(newBlock2); 2832 continue; 2833 } 2834 assert(attr.isa<mlir::UnitAttr>()); 2835 assert((t + 1 == conds) && "unit must be last"); 2836 genBrOp(caseOp, dest, destOps, rewriter); 2837 } 2838 return success(); 2839 } 2840 }; 2841 2842 template <typename OP> 2843 static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select, 2844 typename OP::Adaptor adaptor, 2845 mlir::ConversionPatternRewriter &rewriter) { 2846 unsigned conds = select.getNumConditions(); 2847 auto cases = select.getCases().getValue(); 2848 mlir::Value selector = adaptor.getSelector(); 2849 auto loc = select.getLoc(); 2850 assert(conds > 0 && "select must have cases"); 2851 2852 llvm::SmallVector<mlir::Block *> destinations; 2853 llvm::SmallVector<mlir::ValueRange> destinationsOperands; 2854 mlir::Block *defaultDestination; 2855 mlir::ValueRange defaultOperands; 2856 llvm::SmallVector<int32_t> caseValues; 2857 2858 for (unsigned t = 0; t != conds; ++t) { 2859 mlir::Block *dest = select.getSuccessor(t); 2860 auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t); 2861 const mlir::Attribute &attr = cases[t]; 2862 if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) { 2863 destinations.push_back(dest); 2864 destinationsOperands.push_back(destOps.hasValue() ? *destOps 2865 : ValueRange()); 2866 caseValues.push_back(intAttr.getInt()); 2867 continue; 2868 } 2869 assert(attr.template dyn_cast_or_null<mlir::UnitAttr>()); 2870 assert((t + 1 == conds) && "unit must be last"); 2871 defaultDestination = dest; 2872 defaultOperands = destOps.hasValue() ? *destOps : ValueRange(); 2873 } 2874 2875 // LLVM::SwitchOp takes a i32 type for the selector. 2876 if (select.getSelector().getType() != rewriter.getI32Type()) 2877 selector = 2878 rewriter.create<LLVM::TruncOp>(loc, rewriter.getI32Type(), selector); 2879 2880 rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>( 2881 select, selector, 2882 /*defaultDestination=*/defaultDestination, 2883 /*defaultOperands=*/defaultOperands, 2884 /*caseValues=*/caseValues, 2885 /*caseDestinations=*/destinations, 2886 /*caseOperands=*/destinationsOperands, 2887 /*branchWeights=*/ArrayRef<int32_t>()); 2888 } 2889 2890 /// conversion of fir::SelectOp to an if-then-else ladder 2891 struct SelectOpConversion : public FIROpConversion<fir::SelectOp> { 2892 using FIROpConversion::FIROpConversion; 2893 2894 mlir::LogicalResult 2895 matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor, 2896 mlir::ConversionPatternRewriter &rewriter) const override { 2897 selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter); 2898 return success(); 2899 } 2900 }; 2901 2902 /// conversion of fir::SelectRankOp to an if-then-else ladder 2903 struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> { 2904 using FIROpConversion::FIROpConversion; 2905 2906 mlir::LogicalResult 2907 matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor, 2908 mlir::ConversionPatternRewriter &rewriter) const override { 2909 selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter); 2910 return success(); 2911 } 2912 }; 2913 2914 /// Lower `fir.select_type` to LLVM IR dialect. 2915 struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> { 2916 using FIROpConversion::FIROpConversion; 2917 2918 mlir::LogicalResult 2919 matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor, 2920 mlir::ConversionPatternRewriter &rewriter) const override { 2921 mlir::emitError(select.getLoc(), 2922 "fir.select_type should have already been converted"); 2923 return failure(); 2924 } 2925 }; 2926 2927 /// `fir.store` --> `llvm.store` 2928 struct StoreOpConversion : public FIROpConversion<fir::StoreOp> { 2929 using FIROpConversion::FIROpConversion; 2930 2931 mlir::LogicalResult 2932 matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor, 2933 mlir::ConversionPatternRewriter &rewriter) const override { 2934 if (store.getValue().getType().isa<fir::BoxType>()) { 2935 // fir.box value is actually in memory, load it first before storing it. 2936 mlir::Location loc = store.getLoc(); 2937 mlir::Type boxPtrTy = adaptor.getOperands()[0].getType(); 2938 auto val = rewriter.create<mlir::LLVM::LoadOp>( 2939 loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(), 2940 adaptor.getOperands()[0]); 2941 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2942 store, val, adaptor.getOperands()[1]); 2943 } else { 2944 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2945 store, adaptor.getOperands()[0], adaptor.getOperands()[1]); 2946 } 2947 return success(); 2948 } 2949 }; 2950 2951 namespace { 2952 2953 /// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for 2954 /// the character buffer and one for the buffer length. 2955 struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> { 2956 using FIROpConversion::FIROpConversion; 2957 2958 mlir::LogicalResult 2959 matchAndRewrite(fir::UnboxCharOp unboxchar, OpAdaptor adaptor, 2960 mlir::ConversionPatternRewriter &rewriter) const override { 2961 MLIRContext *ctx = unboxchar.getContext(); 2962 2963 mlir::Type lenTy = convertType(unboxchar.getType(1)); 2964 mlir::Value tuple = adaptor.getOperands()[0]; 2965 mlir::Type tupleTy = tuple.getType(); 2966 2967 mlir::Location loc = unboxchar.getLoc(); 2968 mlir::Value ptrToBuffer = 2969 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 0); 2970 2971 mlir::LLVM::ExtractValueOp len = 2972 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 1); 2973 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, len); 2974 2975 rewriter.replaceOp(unboxchar, 2976 ArrayRef<mlir::Value>{ptrToBuffer, lenAfterCast}); 2977 return success(); 2978 } 2979 }; 2980 2981 /// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its 2982 /// components. 2983 /// TODO: Part of supporting Fortran 2003 procedure pointers. 2984 struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> { 2985 using FIROpConversion::FIROpConversion; 2986 2987 mlir::LogicalResult 2988 matchAndRewrite(fir::UnboxProcOp unboxproc, OpAdaptor adaptor, 2989 mlir::ConversionPatternRewriter &rewriter) const override { 2990 TODO(unboxproc.getLoc(), "fir.unboxproc codegen"); 2991 return failure(); 2992 } 2993 }; 2994 2995 /// convert to LLVM IR dialect `undef` 2996 struct UndefOpConversion : public FIROpConversion<fir::UndefOp> { 2997 using FIROpConversion::FIROpConversion; 2998 2999 mlir::LogicalResult 3000 matchAndRewrite(fir::UndefOp undef, OpAdaptor, 3001 mlir::ConversionPatternRewriter &rewriter) const override { 3002 rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>( 3003 undef, convertType(undef.getType())); 3004 return success(); 3005 } 3006 }; 3007 3008 struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> { 3009 using FIROpConversion::FIROpConversion; 3010 3011 mlir::LogicalResult 3012 matchAndRewrite(fir::ZeroOp zero, OpAdaptor, 3013 mlir::ConversionPatternRewriter &rewriter) const override { 3014 mlir::Type ty = convertType(zero.getType()); 3015 if (ty.isa<mlir::LLVM::LLVMPointerType>()) { 3016 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty); 3017 } else if (ty.isa<mlir::IntegerType>()) { 3018 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3019 zero, ty, mlir::IntegerAttr::get(zero.getType(), 0)); 3020 } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) { 3021 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3022 zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0)); 3023 } else { 3024 // TODO: create ConstantAggregateZero for FIR aggregate/array types. 3025 return rewriter.notifyMatchFailure( 3026 zero, 3027 "conversion of fir.zero with aggregate type not implemented yet"); 3028 } 3029 return success(); 3030 } 3031 }; 3032 3033 /// `fir.unreachable` --> `llvm.unreachable` 3034 struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> { 3035 using FIROpConversion::FIROpConversion; 3036 3037 mlir::LogicalResult 3038 matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor, 3039 mlir::ConversionPatternRewriter &rewriter) const override { 3040 rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach); 3041 return success(); 3042 } 3043 }; 3044 3045 /// `fir.is_present` --> 3046 /// ``` 3047 /// %0 = llvm.mlir.constant(0 : i64) 3048 /// %1 = llvm.ptrtoint %0 3049 /// %2 = llvm.icmp "ne" %1, %0 : i64 3050 /// ``` 3051 struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> { 3052 using FIROpConversion::FIROpConversion; 3053 3054 mlir::LogicalResult 3055 matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor, 3056 mlir::ConversionPatternRewriter &rewriter) const override { 3057 mlir::Type idxTy = lowerTy().indexType(); 3058 mlir::Location loc = isPresent.getLoc(); 3059 auto ptr = adaptor.getOperands()[0]; 3060 3061 if (isPresent.getVal().getType().isa<fir::BoxCharType>()) { 3062 auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>(); 3063 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3064 3065 mlir::Type ty = structTy.getBody()[0]; 3066 mlir::MLIRContext *ctx = isPresent.getContext(); 3067 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3068 ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0); 3069 } 3070 mlir::LLVM::ConstantOp c0 = 3071 genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0); 3072 auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr); 3073 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 3074 isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0); 3075 3076 return success(); 3077 } 3078 }; 3079 3080 /// Create value signaling an absent optional argument in a call, e.g. 3081 /// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>` 3082 struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> { 3083 using FIROpConversion::FIROpConversion; 3084 3085 mlir::LogicalResult 3086 matchAndRewrite(fir::AbsentOp absent, OpAdaptor, 3087 mlir::ConversionPatternRewriter &rewriter) const override { 3088 mlir::Type ty = convertType(absent.getType()); 3089 mlir::Location loc = absent.getLoc(); 3090 3091 if (absent.getType().isa<fir::BoxCharType>()) { 3092 auto structTy = ty.cast<mlir::LLVM::LLVMStructType>(); 3093 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3094 auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3095 auto nullField = 3096 rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]); 3097 mlir::MLIRContext *ctx = absent.getContext(); 3098 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3099 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 3100 absent, ty, undefStruct, nullField, c0); 3101 } else { 3102 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty); 3103 } 3104 return success(); 3105 } 3106 }; 3107 3108 // 3109 // Primitive operations on Complex types 3110 // 3111 3112 /// Generate inline code for complex addition/subtraction 3113 template <typename LLVMOP, typename OPTY> 3114 static mlir::LLVM::InsertValueOp 3115 complexSum(OPTY sumop, mlir::ValueRange opnds, 3116 mlir::ConversionPatternRewriter &rewriter, 3117 fir::LLVMTypeConverter &lowering) { 3118 mlir::Value a = opnds[0]; 3119 mlir::Value b = opnds[1]; 3120 auto loc = sumop.getLoc(); 3121 auto ctx = sumop.getContext(); 3122 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3123 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3124 mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType())); 3125 mlir::Type ty = lowering.convertType(sumop.getType()); 3126 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3127 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3128 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3129 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3130 auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1); 3131 auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1); 3132 auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3133 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0); 3134 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1); 3135 } 3136 } // namespace 3137 3138 namespace { 3139 struct AddcOpConversion : public FIROpConversion<fir::AddcOp> { 3140 using FIROpConversion::FIROpConversion; 3141 3142 mlir::LogicalResult 3143 matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor, 3144 mlir::ConversionPatternRewriter &rewriter) const override { 3145 // given: (x + iy) + (x' + iy') 3146 // result: (x + x') + i(y + y') 3147 auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(), 3148 rewriter, lowerTy()); 3149 rewriter.replaceOp(addc, r.getResult()); 3150 return success(); 3151 } 3152 }; 3153 3154 struct SubcOpConversion : public FIROpConversion<fir::SubcOp> { 3155 using FIROpConversion::FIROpConversion; 3156 3157 mlir::LogicalResult 3158 matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor, 3159 mlir::ConversionPatternRewriter &rewriter) const override { 3160 // given: (x + iy) - (x' + iy') 3161 // result: (x - x') + i(y - y') 3162 auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(), 3163 rewriter, lowerTy()); 3164 rewriter.replaceOp(subc, r.getResult()); 3165 return success(); 3166 } 3167 }; 3168 3169 /// Inlined complex multiply 3170 struct MulcOpConversion : public FIROpConversion<fir::MulcOp> { 3171 using FIROpConversion::FIROpConversion; 3172 3173 mlir::LogicalResult 3174 matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor, 3175 mlir::ConversionPatternRewriter &rewriter) const override { 3176 // TODO: Can we use a call to __muldc3 ? 3177 // given: (x + iy) * (x' + iy') 3178 // result: (xx'-yy')+i(xy'+yx') 3179 mlir::Value a = adaptor.getOperands()[0]; 3180 mlir::Value b = adaptor.getOperands()[1]; 3181 auto loc = mulc.getLoc(); 3182 auto *ctx = mulc.getContext(); 3183 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3184 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3185 mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType())); 3186 mlir::Type ty = convertType(mulc.getType()); 3187 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3188 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3189 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3190 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3191 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3192 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3193 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3194 auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx); 3195 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3196 auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy); 3197 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3198 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3199 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3200 rewriter.replaceOp(mulc, r0.getResult()); 3201 return success(); 3202 } 3203 }; 3204 3205 /// Inlined complex division 3206 struct DivcOpConversion : public FIROpConversion<fir::DivcOp> { 3207 using FIROpConversion::FIROpConversion; 3208 3209 mlir::LogicalResult 3210 matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor, 3211 mlir::ConversionPatternRewriter &rewriter) const override { 3212 // TODO: Can we use a call to __divdc3 instead? 3213 // Just generate inline code for now. 3214 // given: (x + iy) / (x' + iy') 3215 // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y' 3216 mlir::Value a = adaptor.getOperands()[0]; 3217 mlir::Value b = adaptor.getOperands()[1]; 3218 auto loc = divc.getLoc(); 3219 auto *ctx = divc.getContext(); 3220 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3221 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3222 mlir::Type eleTy = convertType(getComplexEleTy(divc.getType())); 3223 mlir::Type ty = convertType(divc.getType()); 3224 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3225 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3226 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3227 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3228 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3229 auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1); 3230 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3231 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3232 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3233 auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1); 3234 auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1); 3235 auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy); 3236 auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy); 3237 auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d); 3238 auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d); 3239 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3240 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3241 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3242 rewriter.replaceOp(divc, r0.getResult()); 3243 return success(); 3244 } 3245 }; 3246 3247 /// Inlined complex negation 3248 struct NegcOpConversion : public FIROpConversion<fir::NegcOp> { 3249 using FIROpConversion::FIROpConversion; 3250 3251 mlir::LogicalResult 3252 matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor, 3253 mlir::ConversionPatternRewriter &rewriter) const override { 3254 // given: -(x + iy) 3255 // result: -x - iy 3256 auto *ctxt = neg.getContext(); 3257 auto eleTy = convertType(getComplexEleTy(neg.getType())); 3258 auto ty = convertType(neg.getType()); 3259 auto loc = neg.getLoc(); 3260 mlir::Value o0 = adaptor.getOperands()[0]; 3261 auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 3262 auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 3263 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0); 3264 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1); 3265 auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp); 3266 auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip); 3267 auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0); 3268 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1); 3269 return success(); 3270 } 3271 }; 3272 3273 /// Conversion pattern for operation that must be dead. The information in these 3274 /// operations is used by other operation. At this point they should not have 3275 /// anymore uses. 3276 /// These operations are normally dead after the pre-codegen pass. 3277 template <typename FromOp> 3278 struct MustBeDeadConversion : public FIROpConversion<FromOp> { 3279 explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering, 3280 const fir::FIRToLLVMPassOptions &options) 3281 : FIROpConversion<FromOp>(lowering, options) {} 3282 using OpAdaptor = typename FromOp::Adaptor; 3283 3284 mlir::LogicalResult 3285 matchAndRewrite(FromOp op, OpAdaptor adaptor, 3286 mlir::ConversionPatternRewriter &rewriter) const final { 3287 if (!op->getUses().empty()) 3288 return rewriter.notifyMatchFailure(op, "op must be dead"); 3289 rewriter.eraseOp(op); 3290 return success(); 3291 } 3292 }; 3293 3294 struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> { 3295 using MustBeDeadConversion::MustBeDeadConversion; 3296 }; 3297 3298 struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> { 3299 using MustBeDeadConversion::MustBeDeadConversion; 3300 }; 3301 3302 struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> { 3303 using MustBeDeadConversion::MustBeDeadConversion; 3304 }; 3305 3306 struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> { 3307 using MustBeDeadConversion::MustBeDeadConversion; 3308 }; 3309 3310 } // namespace 3311 3312 namespace { 3313 /// Convert FIR dialect to LLVM dialect 3314 /// 3315 /// This pass lowers all FIR dialect operations to LLVM IR dialect. An 3316 /// MLIR pass is used to lower residual Std dialect to LLVM IR dialect. 3317 /// 3318 /// This pass is not complete yet. We are upstreaming it in small patches. 3319 class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> { 3320 public: 3321 FIRToLLVMLowering() = default; 3322 FIRToLLVMLowering(fir::FIRToLLVMPassOptions options) : options{options} {} 3323 mlir::ModuleOp getModule() { return getOperation(); } 3324 3325 void runOnOperation() override final { 3326 auto mod = getModule(); 3327 if (!forcedTargetTriple.empty()) { 3328 fir::setTargetTriple(mod, forcedTargetTriple); 3329 } 3330 3331 auto *context = getModule().getContext(); 3332 fir::LLVMTypeConverter typeConverter{getModule()}; 3333 mlir::RewritePatternSet pattern(context); 3334 pattern.insert< 3335 AbsentOpConversion, AddcOpConversion, AddrOfOpConversion, 3336 AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion, 3337 BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion, 3338 BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion, 3339 BoxProcHostOpConversion, BoxRankOpConversion, BoxTypeDescOpConversion, 3340 CallOpConversion, CmpcOpConversion, ConstcOpConversion, 3341 ConvertOpConversion, CoordinateOpConversion, DispatchOpConversion, 3342 DispatchTableOpConversion, DTEntryOpConversion, DivcOpConversion, 3343 EmboxOpConversion, EmboxCharOpConversion, EmboxProcOpConversion, 3344 ExtractValueOpConversion, FieldIndexOpConversion, FirEndOpConversion, 3345 FreeMemOpConversion, GenTypeDescOpConversion, GlobalLenOpConversion, 3346 GlobalOpConversion, HasValueOpConversion, InsertOnRangeOpConversion, 3347 InsertValueOpConversion, IsPresentOpConversion, 3348 LenParamIndexOpConversion, LoadOpConversion, MulcOpConversion, 3349 NegcOpConversion, NoReassocOpConversion, SelectCaseOpConversion, 3350 SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion, 3351 ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion, 3352 SliceOpConversion, StoreOpConversion, StringLitOpConversion, 3353 SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion, 3354 UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion, 3355 XEmboxOpConversion, XReboxOpConversion, ZeroOpConversion>(typeConverter, 3356 options); 3357 mlir::populateFuncToLLVMConversionPatterns(typeConverter, pattern); 3358 mlir::populateOpenMPToLLVMConversionPatterns(typeConverter, pattern); 3359 mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter, 3360 pattern); 3361 mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter, 3362 pattern); 3363 mlir::ConversionTarget target{*context}; 3364 target.addLegalDialect<mlir::LLVM::LLVMDialect>(); 3365 // The OpenMP dialect is legal for Operations without regions, for those 3366 // which contains regions it is legal if the region contains only the 3367 // LLVM dialect. 3368 target.addDynamicallyLegalOp<mlir::omp::ParallelOp, mlir::omp::WsLoopOp, 3369 mlir::omp::MasterOp>([&](Operation *op) { 3370 return typeConverter.isLegal(&op->getRegion(0)); 3371 }); 3372 target.addLegalDialect<mlir::omp::OpenMPDialect>(); 3373 3374 // required NOPs for applying a full conversion 3375 target.addLegalOp<mlir::ModuleOp>(); 3376 3377 // apply the patterns 3378 if (mlir::failed(mlir::applyFullConversion(getModule(), target, 3379 std::move(pattern)))) { 3380 signalPassFailure(); 3381 } 3382 } 3383 3384 private: 3385 fir::FIRToLLVMPassOptions options; 3386 }; 3387 3388 /// Lower from LLVM IR dialect to proper LLVM-IR and dump the module 3389 struct LLVMIRLoweringPass 3390 : public mlir::PassWrapper<LLVMIRLoweringPass, 3391 mlir::OperationPass<mlir::ModuleOp>> { 3392 using Printer = fir::LLVMIRLoweringPrinter; 3393 LLVMIRLoweringPass(raw_ostream &output, Printer p) 3394 : output{output}, printer{p} {} 3395 3396 mlir::ModuleOp getModule() { return getOperation(); } 3397 3398 void runOnOperation() override final { 3399 auto *ctx = getModule().getContext(); 3400 auto optName = getModule().getName(); 3401 llvm::LLVMContext llvmCtx; 3402 if (auto llvmModule = mlir::translateModuleToLLVMIR( 3403 getModule(), llvmCtx, optName ? *optName : "FIRModule")) { 3404 printer(*llvmModule, output); 3405 return; 3406 } 3407 3408 mlir::emitError(mlir::UnknownLoc::get(ctx), "could not emit LLVM-IR\n"); 3409 signalPassFailure(); 3410 } 3411 3412 private: 3413 raw_ostream &output; 3414 Printer printer; 3415 }; 3416 3417 } // namespace 3418 3419 std::unique_ptr<mlir::Pass> fir::createFIRToLLVMPass() { 3420 return std::make_unique<FIRToLLVMLowering>(); 3421 } 3422 3423 std::unique_ptr<mlir::Pass> 3424 fir::createFIRToLLVMPass(FIRToLLVMPassOptions options) { 3425 return std::make_unique<FIRToLLVMLowering>(options); 3426 } 3427 3428 std::unique_ptr<mlir::Pass> 3429 fir::createLLVMDialectToLLVMPass(raw_ostream &output, 3430 fir::LLVMIRLoweringPrinter printer) { 3431 return std::make_unique<LLVMIRLoweringPass>(output, printer); 3432 } 3433