1 //===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/ 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "flang/Optimizer/CodeGen/CodeGen.h" 14 #include "CGOps.h" 15 #include "PassDetail.h" 16 #include "flang/ISO_Fortran_binding.h" 17 #include "flang/Optimizer/Dialect/FIRAttr.h" 18 #include "flang/Optimizer/Dialect/FIROps.h" 19 #include "flang/Optimizer/Support/InternalNames.h" 20 #include "flang/Optimizer/Support/TypeCode.h" 21 #include "flang/Semantics/runtime-type-info.h" 22 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" 23 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" 24 #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" 25 #include "mlir/Conversion/LLVMCommon/Pattern.h" 26 #include "mlir/Conversion/OpenMPToLLVM/ConvertOpenMPToLLVM.h" 27 #include "mlir/IR/BuiltinTypes.h" 28 #include "mlir/IR/Matchers.h" 29 #include "mlir/Pass/Pass.h" 30 #include "mlir/Target/LLVMIR/ModuleTranslation.h" 31 #include "llvm/ADT/ArrayRef.h" 32 33 #define DEBUG_TYPE "flang-codegen" 34 35 using namespace mlir; 36 37 // fir::LLVMTypeConverter for converting to LLVM IR dialect types. 38 #include "TypeConverter.h" 39 40 // TODO: This should really be recovered from the specified target. 41 static constexpr unsigned defaultAlign = 8; 42 43 /// `fir.box` attribute values as defined for CFI_attribute_t in 44 /// flang/ISO_Fortran_binding.h. 45 static constexpr unsigned kAttrPointer = CFI_attribute_pointer; 46 static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable; 47 48 static inline mlir::Type getVoidPtrType(mlir::MLIRContext *context) { 49 return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8)); 50 } 51 52 static mlir::LLVM::ConstantOp 53 genConstantIndex(mlir::Location loc, mlir::Type ity, 54 mlir::ConversionPatternRewriter &rewriter, 55 std::int64_t offset) { 56 auto cattr = rewriter.getI64IntegerAttr(offset); 57 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 58 } 59 60 static Block *createBlock(mlir::ConversionPatternRewriter &rewriter, 61 mlir::Block *insertBefore) { 62 assert(insertBefore && "expected valid insertion block"); 63 return rewriter.createBlock(insertBefore->getParent(), 64 mlir::Region::iterator(insertBefore)); 65 } 66 67 namespace { 68 /// FIR conversion pattern template 69 template <typename FromOp> 70 class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> { 71 public: 72 explicit FIROpConversion(fir::LLVMTypeConverter &lowering, 73 const fir::FIRToLLVMPassOptions &options) 74 : mlir::ConvertOpToLLVMPattern<FromOp>(lowering), options(options) {} 75 76 protected: 77 mlir::Type convertType(mlir::Type ty) const { 78 return lowerTy().convertType(ty); 79 } 80 mlir::Type voidPtrTy() const { return getVoidPtrType(); } 81 82 mlir::Type getVoidPtrType() const { 83 return mlir::LLVM::LLVMPointerType::get( 84 mlir::IntegerType::get(&lowerTy().getContext(), 8)); 85 } 86 87 mlir::LLVM::ConstantOp 88 genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 89 int value) const { 90 mlir::Type i32Ty = rewriter.getI32Type(); 91 mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value); 92 return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr); 93 } 94 95 mlir::LLVM::ConstantOp 96 genConstantOffset(mlir::Location loc, 97 mlir::ConversionPatternRewriter &rewriter, 98 int offset) const { 99 mlir::Type ity = lowerTy().offsetType(); 100 mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset); 101 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 102 } 103 104 /// Perform an extension or truncation as needed on an integer value. Lowering 105 /// to the specific target may involve some sign-extending or truncation of 106 /// values, particularly to fit them from abstract box types to the 107 /// appropriate reified structures. 108 mlir::Value integerCast(mlir::Location loc, 109 mlir::ConversionPatternRewriter &rewriter, 110 mlir::Type ty, mlir::Value val) const { 111 auto valTy = val.getType(); 112 // If the value was not yet lowered, lower its type so that it can 113 // be used in getPrimitiveTypeSizeInBits. 114 if (!valTy.isa<mlir::IntegerType>()) 115 valTy = convertType(valTy); 116 auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 117 auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy); 118 if (toSize < fromSize) 119 return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val); 120 if (toSize > fromSize) 121 return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val); 122 return val; 123 } 124 125 /// Construct code sequence to extract the specifc value from a `fir.box`. 126 mlir::Value getValueFromBox(mlir::Location loc, mlir::Value box, 127 mlir::Type resultTy, 128 mlir::ConversionPatternRewriter &rewriter, 129 unsigned boxValue) const { 130 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 131 mlir::LLVM::ConstantOp cValuePos = 132 genConstantOffset(loc, rewriter, boxValue); 133 auto pty = mlir::LLVM::LLVMPointerType::get(resultTy); 134 auto p = rewriter.create<mlir::LLVM::GEPOp>( 135 loc, pty, box, mlir::ValueRange{c0, cValuePos}); 136 return rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p); 137 } 138 139 /// Method to construct code sequence to get the triple for dimension `dim` 140 /// from a box. 141 SmallVector<mlir::Value, 3> 142 getDimsFromBox(mlir::Location loc, ArrayRef<mlir::Type> retTys, 143 mlir::Value box, mlir::Value dim, 144 mlir::ConversionPatternRewriter &rewriter) const { 145 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 146 mlir::LLVM::ConstantOp cDims = 147 genConstantOffset(loc, rewriter, kDimsPosInBox); 148 mlir::LLVM::LoadOp l0 = 149 loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter); 150 mlir::LLVM::LoadOp l1 = 151 loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter); 152 mlir::LLVM::LoadOp l2 = 153 loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter); 154 return {l0.getResult(), l1.getResult(), l2.getResult()}; 155 } 156 157 mlir::LLVM::LoadOp 158 loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0, 159 mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off, 160 mlir::Type ty, 161 mlir::ConversionPatternRewriter &rewriter) const { 162 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 163 mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off); 164 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c); 165 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 166 } 167 168 mlir::Value 169 loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim, 170 mlir::ConversionPatternRewriter &rewriter) const { 171 auto idxTy = lowerTy().indexType(); 172 auto c0 = genConstantOffset(loc, rewriter, 0); 173 auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox); 174 auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim); 175 return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy, 176 rewriter); 177 } 178 179 /// Read base address from a fir.box. Returned address has type ty. 180 mlir::Value 181 loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 182 mlir::ConversionPatternRewriter &rewriter) const { 183 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 184 mlir::LLVM::ConstantOp cAddr = 185 genConstantOffset(loc, rewriter, kAddrPosInBox); 186 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 187 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr); 188 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 189 } 190 191 mlir::Value 192 loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 193 mlir::ConversionPatternRewriter &rewriter) const { 194 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 195 mlir::LLVM::ConstantOp cElemLen = 196 genConstantOffset(loc, rewriter, kElemLenPosInBox); 197 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 198 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen); 199 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 200 } 201 202 // Get the element type given an LLVM type that is of the form 203 // [llvm.ptr](array|struct|vector)+ and the provided indexes. 204 static mlir::Type getBoxEleTy(mlir::Type type, 205 llvm::ArrayRef<unsigned> indexes) { 206 if (auto t = type.dyn_cast<mlir::LLVM::LLVMPointerType>()) 207 type = t.getElementType(); 208 for (auto i : indexes) { 209 if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) { 210 assert(!t.isOpaque() && i < t.getBody().size()); 211 type = t.getBody()[i]; 212 } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 213 type = t.getElementType(); 214 } else if (auto t = type.dyn_cast<mlir::VectorType>()) { 215 type = t.getElementType(); 216 } else { 217 fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()), 218 "request for invalid box element type"); 219 } 220 } 221 return type; 222 } 223 224 // Return LLVM type of the base address given the LLVM type 225 // of the related descriptor (lowered fir.box type). 226 static mlir::Type getBaseAddrTypeFromBox(mlir::Type type) { 227 return getBoxEleTy(type, {kAddrPosInBox}); 228 } 229 230 // Load the attribute from the \p box and perform a check against \p maskValue 231 // The final comparison is implemented as `(attribute & maskValue) != 0`. 232 mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box, 233 mlir::ConversionPatternRewriter &rewriter, 234 unsigned maskValue) const { 235 mlir::Type attrTy = rewriter.getI32Type(); 236 mlir::Value attribute = 237 getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox); 238 mlir::LLVM::ConstantOp attrMask = 239 genConstantOffset(loc, rewriter, maskValue); 240 auto maskRes = 241 rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask); 242 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 243 return rewriter.create<mlir::LLVM::ICmpOp>( 244 loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0); 245 } 246 247 template <typename... ARGS> 248 mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty, 249 mlir::ConversionPatternRewriter &rewriter, 250 mlir::Value base, ARGS... args) const { 251 SmallVector<mlir::Value> cv{args...}; 252 return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv); 253 } 254 255 fir::LLVMTypeConverter &lowerTy() const { 256 return *static_cast<fir::LLVMTypeConverter *>(this->getTypeConverter()); 257 } 258 259 const fir::FIRToLLVMPassOptions &options; 260 }; 261 262 /// FIR conversion pattern template 263 template <typename FromOp> 264 class FIROpAndTypeConversion : public FIROpConversion<FromOp> { 265 public: 266 using FIROpConversion<FromOp>::FIROpConversion; 267 using OpAdaptor = typename FromOp::Adaptor; 268 269 mlir::LogicalResult 270 matchAndRewrite(FromOp op, OpAdaptor adaptor, 271 mlir::ConversionPatternRewriter &rewriter) const final { 272 mlir::Type ty = this->convertType(op.getType()); 273 return doRewrite(op, ty, adaptor, rewriter); 274 } 275 276 virtual mlir::LogicalResult 277 doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor, 278 mlir::ConversionPatternRewriter &rewriter) const = 0; 279 }; 280 281 // Lower `fir.address_of` operation to `llvm.address_of` operation. 282 struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> { 283 using FIROpConversion::FIROpConversion; 284 285 mlir::LogicalResult 286 matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor, 287 mlir::ConversionPatternRewriter &rewriter) const override { 288 auto ty = convertType(addr.getType()); 289 rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>( 290 addr, ty, addr.getSymbol().getRootReference().getValue()); 291 return success(); 292 } 293 }; 294 } // namespace 295 296 /// Lookup the function to compute the memory size of this parametric derived 297 /// type. The size of the object may depend on the LEN type parameters of the 298 /// derived type. 299 static mlir::LLVM::LLVMFuncOp 300 getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op, 301 mlir::ConversionPatternRewriter &rewriter) { 302 auto module = op->getParentOfType<mlir::ModuleOp>(); 303 std::string name = recTy.getName().str() + "P.mem.size"; 304 return module.lookupSymbol<mlir::LLVM::LLVMFuncOp>(name); 305 } 306 307 namespace { 308 /// convert to LLVM IR dialect `alloca` 309 struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> { 310 using FIROpConversion::FIROpConversion; 311 312 mlir::LogicalResult 313 matchAndRewrite(fir::AllocaOp alloc, OpAdaptor adaptor, 314 mlir::ConversionPatternRewriter &rewriter) const override { 315 mlir::ValueRange operands = adaptor.getOperands(); 316 auto loc = alloc.getLoc(); 317 mlir::Type ity = lowerTy().indexType(); 318 unsigned i = 0; 319 mlir::Value size = genConstantIndex(loc, ity, rewriter, 1).getResult(); 320 mlir::Type ty = convertType(alloc.getType()); 321 mlir::Type resultTy = ty; 322 if (alloc.hasLenParams()) { 323 unsigned end = alloc.numLenParams(); 324 llvm::SmallVector<mlir::Value> lenParams; 325 for (; i < end; ++i) 326 lenParams.push_back(operands[i]); 327 mlir::Type scalarType = fir::unwrapSequenceType(alloc.getInType()); 328 if (auto chrTy = scalarType.dyn_cast<fir::CharacterType>()) { 329 fir::CharacterType rawCharTy = fir::CharacterType::getUnknownLen( 330 chrTy.getContext(), chrTy.getFKind()); 331 ty = mlir::LLVM::LLVMPointerType::get(convertType(rawCharTy)); 332 assert(end == 1); 333 size = integerCast(loc, rewriter, ity, lenParams[0]); 334 } else if (auto recTy = scalarType.dyn_cast<fir::RecordType>()) { 335 mlir::LLVM::LLVMFuncOp memSizeFn = 336 getDependentTypeMemSizeFn(recTy, alloc, rewriter); 337 if (!memSizeFn) 338 emitError(loc, "did not find allocation function"); 339 mlir::NamedAttribute attr = rewriter.getNamedAttr( 340 "callee", mlir::SymbolRefAttr::get(memSizeFn)); 341 auto call = rewriter.create<mlir::LLVM::CallOp>( 342 loc, ity, lenParams, llvm::ArrayRef<mlir::NamedAttribute>{attr}); 343 size = call.getResult(0); 344 ty = mlir::LLVM::LLVMPointerType::get( 345 mlir::IntegerType::get(alloc.getContext(), 8)); 346 } else { 347 return emitError(loc, "unexpected type ") 348 << scalarType << " with type parameters"; 349 } 350 } 351 if (alloc.hasShapeOperands()) { 352 mlir::Type allocEleTy = fir::unwrapRefType(alloc.getType()); 353 // Scale the size by constant factors encoded in the array type. 354 // We only do this for arrays that don't have a constant interior, since 355 // those are the only ones that get decayed to a pointer to the element 356 // type. 357 if (auto seqTy = allocEleTy.dyn_cast<fir::SequenceType>()) { 358 if (!seqTy.hasConstantInterior()) { 359 fir::SequenceType::Extent constSize = 1; 360 for (auto extent : seqTy.getShape()) 361 if (extent != fir::SequenceType::getUnknownExtent()) 362 constSize *= extent; 363 mlir::Value constVal{ 364 genConstantIndex(loc, ity, rewriter, constSize).getResult()}; 365 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, constVal); 366 } 367 } 368 unsigned end = operands.size(); 369 for (; i < end; ++i) 370 size = rewriter.create<mlir::LLVM::MulOp>( 371 loc, ity, size, integerCast(loc, rewriter, ity, operands[i])); 372 } 373 if (ty == resultTy) { 374 // Do not emit the bitcast if ty and resultTy are the same. 375 rewriter.replaceOpWithNewOp<mlir::LLVM::AllocaOp>(alloc, ty, size, 376 alloc->getAttrs()); 377 } else { 378 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, ty, size, 379 alloc->getAttrs()); 380 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(alloc, resultTy, al); 381 } 382 return success(); 383 } 384 }; 385 } // namespace 386 387 /// Construct an `llvm.extractvalue` instruction. It will return value at 388 /// element \p x from \p tuple. 389 static mlir::LLVM::ExtractValueOp 390 genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty, 391 mlir::ConversionPatternRewriter &rewriter, 392 mlir::MLIRContext *ctx, int x) { 393 auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x)); 394 auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x]; 395 return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx); 396 } 397 398 namespace { 399 /// Lower `fir.box_addr` to the sequence of operations to extract the first 400 /// element of the box. 401 struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> { 402 using FIROpConversion::FIROpConversion; 403 404 mlir::LogicalResult 405 matchAndRewrite(fir::BoxAddrOp boxaddr, OpAdaptor adaptor, 406 mlir::ConversionPatternRewriter &rewriter) const override { 407 mlir::Value a = adaptor.getOperands()[0]; 408 auto loc = boxaddr.getLoc(); 409 mlir::Type ty = convertType(boxaddr.getType()); 410 if (auto argty = boxaddr.getVal().getType().dyn_cast<fir::BoxType>()) { 411 rewriter.replaceOp(boxaddr, loadBaseAddrFromBox(loc, ty, a, rewriter)); 412 } else { 413 auto c0attr = rewriter.getI32IntegerAttr(0); 414 auto c0 = mlir::ArrayAttr::get(boxaddr.getContext(), c0attr); 415 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(boxaddr, ty, a, 416 c0); 417 } 418 return success(); 419 } 420 }; 421 422 /// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the 423 /// boxchar. 424 struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> { 425 using FIROpConversion::FIROpConversion; 426 427 mlir::LogicalResult 428 matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor, 429 mlir::ConversionPatternRewriter &rewriter) const override { 430 mlir::Value boxChar = adaptor.getOperands()[0]; 431 mlir::Location loc = boxChar.getLoc(); 432 mlir::MLIRContext *ctx = boxChar.getContext(); 433 mlir::Type returnValTy = boxCharLen.getResult().getType(); 434 435 constexpr int boxcharLenIdx = 1; 436 mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex( 437 loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx); 438 mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len); 439 rewriter.replaceOp(boxCharLen, lenAfterCast); 440 441 return success(); 442 } 443 }; 444 445 /// Lower `fir.box_dims` to a sequence of operations to extract the requested 446 /// dimension infomartion from the boxed value. 447 /// Result in a triple set of GEPs and loads. 448 struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> { 449 using FIROpConversion::FIROpConversion; 450 451 mlir::LogicalResult 452 matchAndRewrite(fir::BoxDimsOp boxdims, OpAdaptor adaptor, 453 mlir::ConversionPatternRewriter &rewriter) const override { 454 SmallVector<mlir::Type, 3> resultTypes = { 455 convertType(boxdims.getResult(0).getType()), 456 convertType(boxdims.getResult(1).getType()), 457 convertType(boxdims.getResult(2).getType()), 458 }; 459 auto results = 460 getDimsFromBox(boxdims.getLoc(), resultTypes, adaptor.getOperands()[0], 461 adaptor.getOperands()[1], rewriter); 462 rewriter.replaceOp(boxdims, results); 463 return success(); 464 } 465 }; 466 467 /// Lower `fir.box_elesize` to a sequence of operations ro extract the size of 468 /// an element in the boxed value. 469 struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> { 470 using FIROpConversion::FIROpConversion; 471 472 mlir::LogicalResult 473 matchAndRewrite(fir::BoxEleSizeOp boxelesz, OpAdaptor adaptor, 474 mlir::ConversionPatternRewriter &rewriter) const override { 475 mlir::Value a = adaptor.getOperands()[0]; 476 auto loc = boxelesz.getLoc(); 477 auto ty = convertType(boxelesz.getType()); 478 auto elemSize = getValueFromBox(loc, a, ty, rewriter, kElemLenPosInBox); 479 rewriter.replaceOp(boxelesz, elemSize); 480 return success(); 481 } 482 }; 483 484 /// Lower `fir.box_isalloc` to a sequence of operations to determine if the 485 /// boxed value was from an ALLOCATABLE entity. 486 struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> { 487 using FIROpConversion::FIROpConversion; 488 489 mlir::LogicalResult 490 matchAndRewrite(fir::BoxIsAllocOp boxisalloc, OpAdaptor adaptor, 491 mlir::ConversionPatternRewriter &rewriter) const override { 492 mlir::Value box = adaptor.getOperands()[0]; 493 auto loc = boxisalloc.getLoc(); 494 mlir::Value check = 495 genBoxAttributeCheck(loc, box, rewriter, kAttrAllocatable); 496 rewriter.replaceOp(boxisalloc, check); 497 return success(); 498 } 499 }; 500 501 /// Lower `fir.box_isarray` to a sequence of operations to determine if the 502 /// boxed is an array. 503 struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> { 504 using FIROpConversion::FIROpConversion; 505 506 mlir::LogicalResult 507 matchAndRewrite(fir::BoxIsArrayOp boxisarray, OpAdaptor adaptor, 508 mlir::ConversionPatternRewriter &rewriter) const override { 509 mlir::Value a = adaptor.getOperands()[0]; 510 auto loc = boxisarray.getLoc(); 511 auto rank = 512 getValueFromBox(loc, a, rewriter.getI32Type(), rewriter, kRankPosInBox); 513 auto c0 = genConstantOffset(loc, rewriter, 0); 514 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 515 boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0); 516 return success(); 517 } 518 }; 519 520 /// Lower `fir.box_isptr` to a sequence of operations to determined if the 521 /// boxed value was from a POINTER entity. 522 struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> { 523 using FIROpConversion::FIROpConversion; 524 525 mlir::LogicalResult 526 matchAndRewrite(fir::BoxIsPtrOp boxisptr, OpAdaptor adaptor, 527 mlir::ConversionPatternRewriter &rewriter) const override { 528 mlir::Value box = adaptor.getOperands()[0]; 529 auto loc = boxisptr.getLoc(); 530 mlir::Value check = genBoxAttributeCheck(loc, box, rewriter, kAttrPointer); 531 rewriter.replaceOp(boxisptr, check); 532 return success(); 533 } 534 }; 535 536 /// Lower `fir.box_rank` to the sequence of operation to extract the rank from 537 /// the box. 538 struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> { 539 using FIROpConversion::FIROpConversion; 540 541 mlir::LogicalResult 542 matchAndRewrite(fir::BoxRankOp boxrank, OpAdaptor adaptor, 543 mlir::ConversionPatternRewriter &rewriter) const override { 544 mlir::Value a = adaptor.getOperands()[0]; 545 auto loc = boxrank.getLoc(); 546 mlir::Type ty = convertType(boxrank.getType()); 547 auto result = getValueFromBox(loc, a, ty, rewriter, kRankPosInBox); 548 rewriter.replaceOp(boxrank, result); 549 return success(); 550 } 551 }; 552 553 /// Lower `fir.boxproc_host` operation. Extracts the host pointer from the 554 /// boxproc. 555 /// TODO: Part of supporting Fortran 2003 procedure pointers. 556 struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> { 557 using FIROpConversion::FIROpConversion; 558 559 mlir::LogicalResult 560 matchAndRewrite(fir::BoxProcHostOp boxprochost, OpAdaptor adaptor, 561 mlir::ConversionPatternRewriter &rewriter) const override { 562 TODO(boxprochost.getLoc(), "fir.boxproc_host codegen"); 563 return failure(); 564 } 565 }; 566 567 /// Lower `fir.box_tdesc` to the sequence of operations to extract the type 568 /// descriptor from the box. 569 struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> { 570 using FIROpConversion::FIROpConversion; 571 572 mlir::LogicalResult 573 matchAndRewrite(fir::BoxTypeDescOp boxtypedesc, OpAdaptor adaptor, 574 mlir::ConversionPatternRewriter &rewriter) const override { 575 mlir::Value box = adaptor.getOperands()[0]; 576 auto loc = boxtypedesc.getLoc(); 577 mlir::Type typeTy = 578 fir::getDescFieldTypeModel<kTypePosInBox>()(boxtypedesc.getContext()); 579 auto result = getValueFromBox(loc, box, typeTy, rewriter, kTypePosInBox); 580 auto typePtrTy = mlir::LLVM::LLVMPointerType::get(typeTy); 581 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(boxtypedesc, typePtrTy, 582 result); 583 return success(); 584 } 585 }; 586 587 /// Lower `fir.string_lit` to LLVM IR dialect operation. 588 struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> { 589 using FIROpConversion::FIROpConversion; 590 591 mlir::LogicalResult 592 matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor, 593 mlir::ConversionPatternRewriter &rewriter) const override { 594 auto ty = convertType(constop.getType()); 595 auto attr = constop.getValue(); 596 if (attr.isa<mlir::StringAttr>()) { 597 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr); 598 return success(); 599 } 600 601 auto charTy = constop.getType().cast<fir::CharacterType>(); 602 unsigned bits = lowerTy().characterBitsize(charTy); 603 mlir::Type intTy = rewriter.getIntegerType(bits); 604 mlir::Location loc = constop.getLoc(); 605 mlir::Value cst = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 606 if (auto arr = attr.dyn_cast<mlir::DenseElementsAttr>()) { 607 cst = rewriter.create<mlir::LLVM::ConstantOp>(loc, ty, arr); 608 } else if (auto arr = attr.dyn_cast<mlir::ArrayAttr>()) { 609 for (auto a : llvm::enumerate(arr.getValue())) { 610 // convert each character to a precise bitsize 611 auto elemAttr = mlir::IntegerAttr::get( 612 intTy, 613 a.value().cast<mlir::IntegerAttr>().getValue().zextOrTrunc(bits)); 614 auto elemCst = 615 rewriter.create<mlir::LLVM::ConstantOp>(loc, intTy, elemAttr); 616 auto index = mlir::ArrayAttr::get( 617 constop.getContext(), rewriter.getI32IntegerAttr(a.index())); 618 cst = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, cst, elemCst, 619 index); 620 } 621 } else { 622 return failure(); 623 } 624 rewriter.replaceOp(constop, cst); 625 return success(); 626 } 627 }; 628 629 // `fir.call` -> `llvm.call` 630 struct CallOpConversion : public FIROpConversion<fir::CallOp> { 631 using FIROpConversion::FIROpConversion; 632 633 mlir::LogicalResult 634 matchAndRewrite(fir::CallOp call, OpAdaptor adaptor, 635 mlir::ConversionPatternRewriter &rewriter) const override { 636 SmallVector<mlir::Type> resultTys; 637 for (auto r : call.getResults()) 638 resultTys.push_back(convertType(r.getType())); 639 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 640 call, resultTys, adaptor.getOperands(), call->getAttrs()); 641 return success(); 642 } 643 }; 644 } // namespace 645 646 static mlir::Type getComplexEleTy(mlir::Type complex) { 647 if (auto cc = complex.dyn_cast<mlir::ComplexType>()) 648 return cc.getElementType(); 649 return complex.cast<fir::ComplexType>().getElementType(); 650 } 651 652 namespace { 653 /// Compare complex values 654 /// 655 /// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une). 656 /// 657 /// For completeness, all other comparison are done on the real component only. 658 struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> { 659 using FIROpConversion::FIROpConversion; 660 661 mlir::LogicalResult 662 matchAndRewrite(fir::CmpcOp cmp, OpAdaptor adaptor, 663 mlir::ConversionPatternRewriter &rewriter) const override { 664 mlir::ValueRange operands = adaptor.getOperands(); 665 mlir::MLIRContext *ctxt = cmp.getContext(); 666 mlir::Type eleTy = convertType(getComplexEleTy(cmp.getLhs().getType())); 667 mlir::Type resTy = convertType(cmp.getType()); 668 mlir::Location loc = cmp.getLoc(); 669 auto pos0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 670 SmallVector<mlir::Value, 2> rp{rewriter.create<mlir::LLVM::ExtractValueOp>( 671 loc, eleTy, operands[0], pos0), 672 rewriter.create<mlir::LLVM::ExtractValueOp>( 673 loc, eleTy, operands[1], pos0)}; 674 auto rcp = 675 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, rp, cmp->getAttrs()); 676 auto pos1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 677 SmallVector<mlir::Value, 2> ip{rewriter.create<mlir::LLVM::ExtractValueOp>( 678 loc, eleTy, operands[0], pos1), 679 rewriter.create<mlir::LLVM::ExtractValueOp>( 680 loc, eleTy, operands[1], pos1)}; 681 auto icp = 682 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, ip, cmp->getAttrs()); 683 SmallVector<mlir::Value, 2> cp{rcp, icp}; 684 switch (cmp.getPredicate()) { 685 case mlir::arith::CmpFPredicate::OEQ: // .EQ. 686 rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(cmp, resTy, cp); 687 break; 688 case mlir::arith::CmpFPredicate::UNE: // .NE. 689 rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(cmp, resTy, cp); 690 break; 691 default: 692 rewriter.replaceOp(cmp, rcp.getResult()); 693 break; 694 } 695 return success(); 696 } 697 }; 698 699 /// Lower complex constants 700 struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> { 701 using FIROpConversion::FIROpConversion; 702 703 mlir::LogicalResult 704 matchAndRewrite(fir::ConstcOp conc, OpAdaptor, 705 mlir::ConversionPatternRewriter &rewriter) const override { 706 mlir::Location loc = conc.getLoc(); 707 mlir::MLIRContext *ctx = conc.getContext(); 708 mlir::Type ty = convertType(conc.getType()); 709 mlir::Type ety = convertType(getComplexEleTy(conc.getType())); 710 auto realFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getReal())); 711 auto realPart = 712 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, realFloatAttr); 713 auto imFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getImaginary())); 714 auto imPart = 715 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, imFloatAttr); 716 auto realIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 717 auto imIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 718 auto undef = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 719 auto setReal = rewriter.create<mlir::LLVM::InsertValueOp>( 720 loc, ty, undef, realPart, realIndex); 721 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(conc, ty, setReal, 722 imPart, imIndex); 723 return success(); 724 } 725 726 inline APFloat getValue(mlir::Attribute attr) const { 727 return attr.cast<fir::RealAttr>().getValue(); 728 } 729 }; 730 731 /// convert value of from-type to value of to-type 732 struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> { 733 using FIROpConversion::FIROpConversion; 734 735 static bool isFloatingPointTy(mlir::Type ty) { 736 return ty.isa<mlir::FloatType>(); 737 } 738 739 mlir::LogicalResult 740 matchAndRewrite(fir::ConvertOp convert, OpAdaptor adaptor, 741 mlir::ConversionPatternRewriter &rewriter) const override { 742 auto fromFirTy = convert.getValue().getType(); 743 auto toFirTy = convert.getRes().getType(); 744 auto fromTy = convertType(fromFirTy); 745 auto toTy = convertType(toFirTy); 746 mlir::Value op0 = adaptor.getOperands()[0]; 747 if (fromTy == toTy) { 748 rewriter.replaceOp(convert, op0); 749 return success(); 750 } 751 auto loc = convert.getLoc(); 752 auto convertFpToFp = [&](mlir::Value val, unsigned fromBits, 753 unsigned toBits, mlir::Type toTy) -> mlir::Value { 754 if (fromBits == toBits) { 755 // TODO: Converting between two floating-point representations with the 756 // same bitwidth is not allowed for now. 757 mlir::emitError(loc, 758 "cannot implicitly convert between two floating-point " 759 "representations of the same bitwidth"); 760 return {}; 761 } 762 if (fromBits > toBits) 763 return rewriter.create<mlir::LLVM::FPTruncOp>(loc, toTy, val); 764 return rewriter.create<mlir::LLVM::FPExtOp>(loc, toTy, val); 765 }; 766 // Complex to complex conversion. 767 if (fir::isa_complex(fromFirTy) && fir::isa_complex(toFirTy)) { 768 // Special case: handle the conversion of a complex such that both the 769 // real and imaginary parts are converted together. 770 auto zero = mlir::ArrayAttr::get(convert.getContext(), 771 rewriter.getI32IntegerAttr(0)); 772 auto one = mlir::ArrayAttr::get(convert.getContext(), 773 rewriter.getI32IntegerAttr(1)); 774 auto ty = convertType(getComplexEleTy(convert.getValue().getType())); 775 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, zero); 776 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, one); 777 auto nt = convertType(getComplexEleTy(convert.getRes().getType())); 778 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 779 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(nt); 780 auto rc = convertFpToFp(rp, fromBits, toBits, nt); 781 auto ic = convertFpToFp(ip, fromBits, toBits, nt); 782 auto un = rewriter.create<mlir::LLVM::UndefOp>(loc, toTy); 783 auto i1 = 784 rewriter.create<mlir::LLVM::InsertValueOp>(loc, toTy, un, rc, zero); 785 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(convert, toTy, i1, 786 ic, one); 787 return mlir::success(); 788 } 789 790 // Follow UNIX F77 convention for logicals: 791 // 1. underlying integer is not zero => logical is .TRUE. 792 // 2. logical is .TRUE. => set underlying integer to 1. 793 auto i1Type = mlir::IntegerType::get(convert.getContext(), 1); 794 if (fromFirTy.isa<fir::LogicalType>() && toFirTy == i1Type) { 795 mlir::Value zero = genConstantIndex(loc, fromTy, rewriter, 0); 796 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 797 convert, mlir::LLVM::ICmpPredicate::ne, op0, zero); 798 return mlir::success(); 799 } 800 if (fromFirTy == i1Type && toFirTy.isa<fir::LogicalType>()) { 801 rewriter.replaceOpWithNewOp<mlir::LLVM::ZExtOp>(convert, toTy, op0); 802 return mlir::success(); 803 } 804 805 // Floating point to floating point conversion. 806 if (isFloatingPointTy(fromTy)) { 807 if (isFloatingPointTy(toTy)) { 808 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 809 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 810 auto v = convertFpToFp(op0, fromBits, toBits, toTy); 811 rewriter.replaceOp(convert, v); 812 return mlir::success(); 813 } 814 if (toTy.isa<mlir::IntegerType>()) { 815 rewriter.replaceOpWithNewOp<mlir::LLVM::FPToSIOp>(convert, toTy, op0); 816 return mlir::success(); 817 } 818 } else if (fromTy.isa<mlir::IntegerType>()) { 819 // Integer to integer conversion. 820 if (toTy.isa<mlir::IntegerType>()) { 821 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 822 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 823 assert(fromBits != toBits); 824 if (fromBits > toBits) { 825 rewriter.replaceOpWithNewOp<mlir::LLVM::TruncOp>(convert, toTy, op0); 826 return mlir::success(); 827 } 828 rewriter.replaceOpWithNewOp<mlir::LLVM::SExtOp>(convert, toTy, op0); 829 return mlir::success(); 830 } 831 // Integer to floating point conversion. 832 if (isFloatingPointTy(toTy)) { 833 rewriter.replaceOpWithNewOp<mlir::LLVM::SIToFPOp>(convert, toTy, op0); 834 return mlir::success(); 835 } 836 // Integer to pointer conversion. 837 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 838 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(convert, toTy, op0); 839 return mlir::success(); 840 } 841 } else if (fromTy.isa<mlir::LLVM::LLVMPointerType>()) { 842 // Pointer to integer conversion. 843 if (toTy.isa<mlir::IntegerType>()) { 844 rewriter.replaceOpWithNewOp<mlir::LLVM::PtrToIntOp>(convert, toTy, op0); 845 return mlir::success(); 846 } 847 // Pointer to pointer conversion. 848 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 849 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(convert, toTy, op0); 850 return mlir::success(); 851 } 852 } 853 return emitError(loc) << "cannot convert " << fromTy << " to " << toTy; 854 } 855 }; 856 857 /// Lower `fir.dispatch` operation. A virtual call to a method in a dispatch 858 /// table. 859 struct DispatchOpConversion : public FIROpConversion<fir::DispatchOp> { 860 using FIROpConversion::FIROpConversion; 861 862 mlir::LogicalResult 863 matchAndRewrite(fir::DispatchOp dispatch, OpAdaptor adaptor, 864 mlir::ConversionPatternRewriter &rewriter) const override { 865 TODO(dispatch.getLoc(), "fir.dispatch codegen"); 866 return failure(); 867 } 868 }; 869 870 /// Lower `fir.dispatch_table` operation. The dispatch table for a Fortran 871 /// derived type. 872 struct DispatchTableOpConversion 873 : public FIROpConversion<fir::DispatchTableOp> { 874 using FIROpConversion::FIROpConversion; 875 876 mlir::LogicalResult 877 matchAndRewrite(fir::DispatchTableOp dispTab, OpAdaptor adaptor, 878 mlir::ConversionPatternRewriter &rewriter) const override { 879 TODO(dispTab.getLoc(), "fir.dispatch_table codegen"); 880 return failure(); 881 } 882 }; 883 884 /// Lower `fir.dt_entry` operation. An entry in a dispatch table; binds a 885 /// method-name to a function. 886 struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> { 887 using FIROpConversion::FIROpConversion; 888 889 mlir::LogicalResult 890 matchAndRewrite(fir::DTEntryOp dtEnt, OpAdaptor adaptor, 891 mlir::ConversionPatternRewriter &rewriter) const override { 892 TODO(dtEnt.getLoc(), "fir.dt_entry codegen"); 893 return failure(); 894 } 895 }; 896 897 /// Lower `fir.global_len` operation. 898 struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> { 899 using FIROpConversion::FIROpConversion; 900 901 mlir::LogicalResult 902 matchAndRewrite(fir::GlobalLenOp globalLen, OpAdaptor adaptor, 903 mlir::ConversionPatternRewriter &rewriter) const override { 904 TODO(globalLen.getLoc(), "fir.global_len codegen"); 905 return failure(); 906 } 907 }; 908 909 /// Lower fir.len_param_index 910 struct LenParamIndexOpConversion 911 : public FIROpConversion<fir::LenParamIndexOp> { 912 using FIROpConversion::FIROpConversion; 913 914 // FIXME: this should be specialized by the runtime target 915 mlir::LogicalResult 916 matchAndRewrite(fir::LenParamIndexOp lenp, OpAdaptor, 917 mlir::ConversionPatternRewriter &rewriter) const override { 918 TODO(lenp.getLoc(), "fir.len_param_index codegen"); 919 } 920 }; 921 922 /// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of 923 /// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element 924 /// in this struct is a pointer. Its type is determined from `KIND`. The 2nd 925 /// element is the length of the character buffer (`#n`). 926 struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> { 927 using FIROpConversion::FIROpConversion; 928 929 mlir::LogicalResult 930 matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor, 931 mlir::ConversionPatternRewriter &rewriter) const override { 932 mlir::ValueRange operands = adaptor.getOperands(); 933 MLIRContext *ctx = emboxChar.getContext(); 934 935 mlir::Value charBuffer = operands[0]; 936 mlir::Value charBufferLen = operands[1]; 937 938 mlir::Location loc = emboxChar.getLoc(); 939 mlir::Type llvmStructTy = convertType(emboxChar.getType()); 940 auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy); 941 942 mlir::Type lenTy = 943 llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1]; 944 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen); 945 946 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 947 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 948 auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>( 949 loc, llvmStructTy, llvmStruct, charBuffer, c0); 950 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 951 emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1); 952 953 return success(); 954 } 955 }; 956 } // namespace 957 958 /// Return the LLVMFuncOp corresponding to the standard malloc call. 959 static mlir::LLVM::LLVMFuncOp 960 getMalloc(fir::AllocMemOp op, mlir::ConversionPatternRewriter &rewriter) { 961 auto module = op->getParentOfType<mlir::ModuleOp>(); 962 if (mlir::LLVM::LLVMFuncOp mallocFunc = 963 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("malloc")) 964 return mallocFunc; 965 mlir::OpBuilder moduleBuilder( 966 op->getParentOfType<mlir::ModuleOp>().getBodyRegion()); 967 auto indexType = mlir::IntegerType::get(op.getContext(), 64); 968 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 969 rewriter.getUnknownLoc(), "malloc", 970 mlir::LLVM::LLVMFunctionType::get(getVoidPtrType(op.getContext()), 971 indexType, 972 /*isVarArg=*/false)); 973 } 974 975 /// Helper function for generating the LLVM IR that computes the size 976 /// in bytes for a derived type. 977 static mlir::Value 978 computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy, 979 mlir::ConversionPatternRewriter &rewriter) { 980 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 981 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 982 llvm::SmallVector<mlir::Value> args{one}; 983 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, args); 984 return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep); 985 } 986 987 namespace { 988 /// Lower a `fir.allocmem` instruction into `llvm.call @malloc` 989 struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> { 990 using FIROpConversion::FIROpConversion; 991 992 mlir::LogicalResult 993 matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor, 994 mlir::ConversionPatternRewriter &rewriter) const override { 995 auto heapTy = heap.getType(); 996 auto ty = convertType(heapTy); 997 mlir::LLVM::LLVMFuncOp mallocFunc = getMalloc(heap, rewriter); 998 mlir::Location loc = heap.getLoc(); 999 auto ity = lowerTy().indexType(); 1000 auto dataTy = fir::unwrapRefType(heapTy); 1001 if (fir::isRecordWithTypeParameters(fir::unwrapSequenceType(dataTy))) 1002 TODO(loc, "fir.allocmem codegen of derived type with length parameters"); 1003 mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, ty); 1004 // !fir.array<NxMx!fir.char<K,?>> sets `size` to the width of !fir.char<K>. 1005 // So multiply the constant dimensions here. 1006 if (fir::hasDynamicSize(dataTy)) 1007 if (auto seqTy = dataTy.dyn_cast<fir::SequenceType>()) 1008 if (fir::characterWithDynamicLen(seqTy.getEleTy())) { 1009 fir::SequenceType::Extent arrSize = 1; 1010 for (auto d : seqTy.getShape()) 1011 if (d != fir::SequenceType::getUnknownExtent()) 1012 arrSize *= d; 1013 size = rewriter.create<mlir::LLVM::MulOp>( 1014 loc, ity, size, genConstantIndex(loc, ity, rewriter, arrSize)); 1015 } 1016 for (mlir::Value opnd : adaptor.getOperands()) 1017 size = rewriter.create<mlir::LLVM::MulOp>( 1018 loc, ity, size, integerCast(loc, rewriter, ity, opnd)); 1019 heap->setAttr("callee", mlir::SymbolRefAttr::get(mallocFunc)); 1020 auto malloc = rewriter.create<mlir::LLVM::CallOp>( 1021 loc, ::getVoidPtrType(heap.getContext()), size, heap->getAttrs()); 1022 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(heap, ty, 1023 malloc.getResult(0)); 1024 return success(); 1025 } 1026 1027 // Compute the (allocation) size of the allocmem type in bytes. 1028 mlir::Value genTypeSizeInBytes(mlir::Location loc, mlir::Type idxTy, 1029 mlir::ConversionPatternRewriter &rewriter, 1030 mlir::Type llTy) const { 1031 // Use the primitive size, if available. 1032 auto ptrTy = llTy.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1033 if (auto size = 1034 mlir::LLVM::getPrimitiveTypeSizeInBits(ptrTy.getElementType())) 1035 return genConstantIndex(loc, idxTy, rewriter, size / 8); 1036 1037 // Otherwise, generate the GEP trick in LLVM IR to compute the size. 1038 return computeDerivedTypeSize(loc, ptrTy, idxTy, rewriter); 1039 } 1040 }; 1041 } // namespace 1042 1043 /// Return the LLVMFuncOp corresponding to the standard free call. 1044 static mlir::LLVM::LLVMFuncOp 1045 getFree(fir::FreeMemOp op, mlir::ConversionPatternRewriter &rewriter) { 1046 auto module = op->getParentOfType<mlir::ModuleOp>(); 1047 if (mlir::LLVM::LLVMFuncOp freeFunc = 1048 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("free")) 1049 return freeFunc; 1050 mlir::OpBuilder moduleBuilder(module.getBodyRegion()); 1051 auto voidType = mlir::LLVM::LLVMVoidType::get(op.getContext()); 1052 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 1053 rewriter.getUnknownLoc(), "free", 1054 mlir::LLVM::LLVMFunctionType::get(voidType, 1055 getVoidPtrType(op.getContext()), 1056 /*isVarArg=*/false)); 1057 } 1058 1059 namespace { 1060 /// Lower a `fir.freemem` instruction into `llvm.call @free` 1061 struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> { 1062 using FIROpConversion::FIROpConversion; 1063 1064 mlir::LogicalResult 1065 matchAndRewrite(fir::FreeMemOp freemem, OpAdaptor adaptor, 1066 mlir::ConversionPatternRewriter &rewriter) const override { 1067 mlir::LLVM::LLVMFuncOp freeFunc = getFree(freemem, rewriter); 1068 mlir::Location loc = freemem.getLoc(); 1069 auto bitcast = rewriter.create<mlir::LLVM::BitcastOp>( 1070 freemem.getLoc(), voidPtrTy(), adaptor.getOperands()[0]); 1071 freemem->setAttr("callee", mlir::SymbolRefAttr::get(freeFunc)); 1072 rewriter.create<mlir::LLVM::CallOp>( 1073 loc, mlir::TypeRange{}, mlir::ValueRange{bitcast}, freemem->getAttrs()); 1074 rewriter.eraseOp(freemem); 1075 return success(); 1076 } 1077 }; 1078 } // namespace 1079 1080 namespace {} // namespace 1081 1082 /// Common base class for embox to descriptor conversion. 1083 template <typename OP> 1084 struct EmboxCommonConversion : public FIROpConversion<OP> { 1085 using FIROpConversion<OP>::FIROpConversion; 1086 1087 // Find the LLVMFuncOp in whose entry block the alloca should be inserted. 1088 // The order to find the LLVMFuncOp is as follows: 1089 // 1. The parent operation of the current block if it is a LLVMFuncOp. 1090 // 2. The first ancestor that is a LLVMFuncOp. 1091 mlir::LLVM::LLVMFuncOp 1092 getFuncForAllocaInsert(mlir::ConversionPatternRewriter &rewriter) const { 1093 mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp(); 1094 return mlir::isa<mlir::LLVM::LLVMFuncOp>(parentOp) 1095 ? mlir::cast<mlir::LLVM::LLVMFuncOp>(parentOp) 1096 : parentOp->getParentOfType<mlir::LLVM::LLVMFuncOp>(); 1097 } 1098 1099 // Generate an alloca of size 1 and type \p toTy. 1100 mlir::LLVM::AllocaOp 1101 genAllocaWithType(mlir::Location loc, mlir::Type toTy, unsigned alignment, 1102 mlir::ConversionPatternRewriter &rewriter) const { 1103 auto thisPt = rewriter.saveInsertionPoint(); 1104 mlir::LLVM::LLVMFuncOp func = getFuncForAllocaInsert(rewriter); 1105 rewriter.setInsertionPointToStart(&func.front()); 1106 auto size = this->genI32Constant(loc, rewriter, 1); 1107 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, toTy, size, alignment); 1108 rewriter.restoreInsertionPoint(thisPt); 1109 return al; 1110 } 1111 1112 static int getCFIAttr(fir::BoxType boxTy) { 1113 auto eleTy = boxTy.getEleTy(); 1114 if (eleTy.isa<fir::PointerType>()) 1115 return CFI_attribute_pointer; 1116 if (eleTy.isa<fir::HeapType>()) 1117 return CFI_attribute_allocatable; 1118 return CFI_attribute_other; 1119 } 1120 1121 static fir::RecordType unwrapIfDerived(fir::BoxType boxTy) { 1122 return fir::unwrapSequenceType(fir::dyn_cast_ptrOrBoxEleTy(boxTy)) 1123 .template dyn_cast<fir::RecordType>(); 1124 } 1125 static bool isDerivedTypeWithLenParams(fir::BoxType boxTy) { 1126 auto recTy = unwrapIfDerived(boxTy); 1127 return recTy && recTy.getNumLenParams() > 0; 1128 } 1129 static bool isDerivedType(fir::BoxType boxTy) { 1130 return unwrapIfDerived(boxTy) != nullptr; 1131 } 1132 1133 // Get the element size and CFI type code of the boxed value. 1134 std::tuple<mlir::Value, mlir::Value> getSizeAndTypeCode( 1135 mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 1136 mlir::Type boxEleTy, mlir::ValueRange lenParams = {}) const { 1137 auto doInteger = 1138 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1139 int typeCode = fir::integerBitsToTypeCode(width); 1140 return {this->genConstantOffset(loc, rewriter, width / 8), 1141 this->genConstantOffset(loc, rewriter, typeCode)}; 1142 }; 1143 auto doLogical = 1144 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1145 int typeCode = fir::logicalBitsToTypeCode(width); 1146 return {this->genConstantOffset(loc, rewriter, width / 8), 1147 this->genConstantOffset(loc, rewriter, typeCode)}; 1148 }; 1149 auto doFloat = [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1150 int typeCode = fir::realBitsToTypeCode(width); 1151 return {this->genConstantOffset(loc, rewriter, width / 8), 1152 this->genConstantOffset(loc, rewriter, typeCode)}; 1153 }; 1154 auto doComplex = 1155 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1156 auto typeCode = fir::complexBitsToTypeCode(width); 1157 return {this->genConstantOffset(loc, rewriter, width / 8 * 2), 1158 this->genConstantOffset(loc, rewriter, typeCode)}; 1159 }; 1160 auto doCharacter = 1161 [&](unsigned width, 1162 mlir::Value len) -> std::tuple<mlir::Value, mlir::Value> { 1163 auto typeCode = fir::characterBitsToTypeCode(width); 1164 auto typeCodeVal = this->genConstantOffset(loc, rewriter, typeCode); 1165 if (width == 8) 1166 return {len, typeCodeVal}; 1167 auto byteWidth = this->genConstantOffset(loc, rewriter, width / 8); 1168 auto i64Ty = mlir::IntegerType::get(&this->lowerTy().getContext(), 64); 1169 auto size = 1170 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len); 1171 return {size, typeCodeVal}; 1172 }; 1173 auto getKindMap = [&]() -> fir::KindMapping & { 1174 return this->lowerTy().getKindMap(); 1175 }; 1176 // Pointer-like types. 1177 if (auto eleTy = fir::dyn_cast_ptrEleTy(boxEleTy)) 1178 boxEleTy = eleTy; 1179 // Integer types. 1180 if (fir::isa_integer(boxEleTy)) { 1181 if (auto ty = boxEleTy.dyn_cast<mlir::IntegerType>()) 1182 return doInteger(ty.getWidth()); 1183 auto ty = boxEleTy.cast<fir::IntegerType>(); 1184 return doInteger(getKindMap().getIntegerBitsize(ty.getFKind())); 1185 } 1186 // Floating point types. 1187 if (fir::isa_real(boxEleTy)) { 1188 if (auto ty = boxEleTy.dyn_cast<mlir::FloatType>()) 1189 return doFloat(ty.getWidth()); 1190 auto ty = boxEleTy.cast<fir::RealType>(); 1191 return doFloat(getKindMap().getRealBitsize(ty.getFKind())); 1192 } 1193 // Complex types. 1194 if (fir::isa_complex(boxEleTy)) { 1195 if (auto ty = boxEleTy.dyn_cast<mlir::ComplexType>()) 1196 return doComplex( 1197 ty.getElementType().cast<mlir::FloatType>().getWidth()); 1198 auto ty = boxEleTy.cast<fir::ComplexType>(); 1199 return doComplex(getKindMap().getRealBitsize(ty.getFKind())); 1200 } 1201 // Character types. 1202 if (auto ty = boxEleTy.dyn_cast<fir::CharacterType>()) { 1203 auto charWidth = getKindMap().getCharacterBitsize(ty.getFKind()); 1204 if (ty.getLen() != fir::CharacterType::unknownLen()) { 1205 auto len = this->genConstantOffset(loc, rewriter, ty.getLen()); 1206 return doCharacter(charWidth, len); 1207 } 1208 assert(!lenParams.empty()); 1209 return doCharacter(charWidth, lenParams.back()); 1210 } 1211 // Logical type. 1212 if (auto ty = boxEleTy.dyn_cast<fir::LogicalType>()) 1213 return doLogical(getKindMap().getLogicalBitsize(ty.getFKind())); 1214 // Array types. 1215 if (auto seqTy = boxEleTy.dyn_cast<fir::SequenceType>()) 1216 return getSizeAndTypeCode(loc, rewriter, seqTy.getEleTy(), lenParams); 1217 // Derived-type types. 1218 if (boxEleTy.isa<fir::RecordType>()) { 1219 auto ptrTy = mlir::LLVM::LLVMPointerType::get( 1220 this->lowerTy().convertType(boxEleTy)); 1221 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 1222 auto one = 1223 genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1); 1224 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, 1225 mlir::ValueRange{one}); 1226 auto eleSize = rewriter.create<mlir::LLVM::PtrToIntOp>( 1227 loc, this->lowerTy().indexType(), gep); 1228 return {eleSize, 1229 this->genConstantOffset(loc, rewriter, fir::derivedToTypeCode())}; 1230 } 1231 // Reference type. 1232 if (fir::isa_ref_type(boxEleTy)) { 1233 // FIXME: use the target pointer size rather than sizeof(void*) 1234 return {this->genConstantOffset(loc, rewriter, sizeof(void *)), 1235 this->genConstantOffset(loc, rewriter, CFI_type_cptr)}; 1236 } 1237 fir::emitFatalError(loc, "unhandled type in fir.box code generation"); 1238 } 1239 1240 /// Basic pattern to write a field in the descriptor 1241 mlir::Value insertField(mlir::ConversionPatternRewriter &rewriter, 1242 mlir::Location loc, mlir::Value dest, 1243 ArrayRef<unsigned> fldIndexes, mlir::Value value, 1244 bool bitcast = false) const { 1245 auto boxTy = dest.getType(); 1246 auto fldTy = this->getBoxEleTy(boxTy, fldIndexes); 1247 if (bitcast) 1248 value = rewriter.create<mlir::LLVM::BitcastOp>(loc, fldTy, value); 1249 else 1250 value = this->integerCast(loc, rewriter, fldTy, value); 1251 SmallVector<mlir::Attribute, 2> attrs; 1252 for (auto i : fldIndexes) 1253 attrs.push_back(rewriter.getI32IntegerAttr(i)); 1254 auto indexesAttr = mlir::ArrayAttr::get(rewriter.getContext(), attrs); 1255 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, boxTy, dest, value, 1256 indexesAttr); 1257 } 1258 1259 inline mlir::Value 1260 insertBaseAddress(mlir::ConversionPatternRewriter &rewriter, 1261 mlir::Location loc, mlir::Value dest, 1262 mlir::Value base) const { 1263 return insertField(rewriter, loc, dest, {kAddrPosInBox}, base, 1264 /*bitCast=*/true); 1265 } 1266 1267 inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter, 1268 mlir::Location loc, mlir::Value dest, 1269 unsigned dim, mlir::Value lb) const { 1270 return insertField(rewriter, loc, dest, 1271 {kDimsPosInBox, dim, kDimLowerBoundPos}, lb); 1272 } 1273 1274 inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter, 1275 mlir::Location loc, mlir::Value dest, 1276 unsigned dim, mlir::Value extent) const { 1277 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos}, 1278 extent); 1279 } 1280 1281 inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter, 1282 mlir::Location loc, mlir::Value dest, 1283 unsigned dim, mlir::Value stride) const { 1284 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos}, 1285 stride); 1286 } 1287 1288 /// Get the address of the type descriptor global variable that was created by 1289 /// lowering for derived type \p recType. 1290 template <typename BOX> 1291 mlir::Value 1292 getTypeDescriptor(BOX box, mlir::ConversionPatternRewriter &rewriter, 1293 mlir::Location loc, fir::RecordType recType) const { 1294 std::string name = 1295 fir::NameUniquer::getTypeDescriptorName(recType.getName()); 1296 auto module = box->template getParentOfType<mlir::ModuleOp>(); 1297 if (auto global = module.template lookupSymbol<fir::GlobalOp>(name)) { 1298 auto ty = mlir::LLVM::LLVMPointerType::get( 1299 this->lowerTy().convertType(global.getType())); 1300 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1301 global.getSymName()); 1302 } 1303 if (auto global = 1304 module.template lookupSymbol<mlir::LLVM::GlobalOp>(name)) { 1305 // The global may have already been translated to LLVM. 1306 auto ty = mlir::LLVM::LLVMPointerType::get(global.getType()); 1307 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1308 global.getSymName()); 1309 } 1310 // Type info derived types do not have type descriptors since they are the 1311 // types defining type descriptors. 1312 if (!this->options.ignoreMissingTypeDescriptors && 1313 !fir::NameUniquer::belongsToModule( 1314 name, Fortran::semantics::typeInfoBuiltinModule)) 1315 fir::emitFatalError( 1316 loc, "runtime derived type info descriptor was not generated"); 1317 return rewriter.create<mlir::LLVM::NullOp>( 1318 loc, ::getVoidPtrType(box.getContext())); 1319 } 1320 1321 template <typename BOX> 1322 std::tuple<fir::BoxType, mlir::Value, mlir::Value> 1323 consDescriptorPrefix(BOX box, mlir::ConversionPatternRewriter &rewriter, 1324 unsigned rank, mlir::ValueRange lenParams) const { 1325 auto loc = box.getLoc(); 1326 auto boxTy = box.getType().template dyn_cast<fir::BoxType>(); 1327 auto convTy = this->lowerTy().convertBoxType(boxTy, rank); 1328 auto llvmBoxPtrTy = convTy.template cast<mlir::LLVM::LLVMPointerType>(); 1329 auto llvmBoxTy = llvmBoxPtrTy.getElementType(); 1330 mlir::Value descriptor = 1331 rewriter.create<mlir::LLVM::UndefOp>(loc, llvmBoxTy); 1332 1333 llvm::SmallVector<mlir::Value> typeparams = lenParams; 1334 if constexpr (!std::is_same_v<BOX, fir::EmboxOp>) { 1335 if (!box.substr().empty() && fir::hasDynamicSize(boxTy.getEleTy())) 1336 typeparams.push_back(box.substr()[1]); 1337 } 1338 1339 // Write each of the fields with the appropriate values 1340 auto [eleSize, cfiTy] = 1341 getSizeAndTypeCode(loc, rewriter, boxTy.getEleTy(), typeparams); 1342 descriptor = 1343 insertField(rewriter, loc, descriptor, {kElemLenPosInBox}, eleSize); 1344 descriptor = insertField(rewriter, loc, descriptor, {kVersionPosInBox}, 1345 this->genI32Constant(loc, rewriter, CFI_VERSION)); 1346 descriptor = insertField(rewriter, loc, descriptor, {kRankPosInBox}, 1347 this->genI32Constant(loc, rewriter, rank)); 1348 descriptor = insertField(rewriter, loc, descriptor, {kTypePosInBox}, cfiTy); 1349 descriptor = 1350 insertField(rewriter, loc, descriptor, {kAttributePosInBox}, 1351 this->genI32Constant(loc, rewriter, getCFIAttr(boxTy))); 1352 const bool hasAddendum = isDerivedType(boxTy); 1353 descriptor = 1354 insertField(rewriter, loc, descriptor, {kF18AddendumPosInBox}, 1355 this->genI32Constant(loc, rewriter, hasAddendum ? 1 : 0)); 1356 1357 if (hasAddendum) { 1358 auto isArray = 1359 fir::dyn_cast_ptrOrBoxEleTy(boxTy).template isa<fir::SequenceType>(); 1360 unsigned typeDescFieldId = isArray ? kOptTypePtrPosInBox : kDimsPosInBox; 1361 auto typeDesc = 1362 getTypeDescriptor(box, rewriter, loc, unwrapIfDerived(boxTy)); 1363 descriptor = 1364 insertField(rewriter, loc, descriptor, {typeDescFieldId}, typeDesc, 1365 /*bitCast=*/true); 1366 } 1367 1368 return {boxTy, descriptor, eleSize}; 1369 } 1370 1371 /// Compute the base address of a substring given the base address of a scalar 1372 /// string and the zero based string lower bound. 1373 mlir::Value shiftSubstringBase(mlir::ConversionPatternRewriter &rewriter, 1374 mlir::Location loc, mlir::Value base, 1375 mlir::Value lowerBound) const { 1376 llvm::SmallVector<mlir::Value> gepOperands; 1377 auto baseType = 1378 base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1379 if (baseType.isa<mlir::LLVM::LLVMArrayType>()) { 1380 auto idxTy = this->lowerTy().indexType(); 1381 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1382 gepOperands.push_back(zero); 1383 } 1384 gepOperands.push_back(lowerBound); 1385 return this->genGEP(loc, base.getType(), rewriter, base, gepOperands); 1386 } 1387 1388 /// If the embox is not in a globalOp body, allocate storage for the box; 1389 /// store the value inside and return the generated alloca. Return the input 1390 /// value otherwise. 1391 mlir::Value 1392 placeInMemoryIfNotGlobalInit(mlir::ConversionPatternRewriter &rewriter, 1393 mlir::Location loc, mlir::Value boxValue) const { 1394 auto *thisBlock = rewriter.getInsertionBlock(); 1395 if (thisBlock && mlir::isa<mlir::LLVM::GlobalOp>(thisBlock->getParentOp())) 1396 return boxValue; 1397 auto boxPtrTy = mlir::LLVM::LLVMPointerType::get(boxValue.getType()); 1398 auto alloca = genAllocaWithType(loc, boxPtrTy, defaultAlign, rewriter); 1399 rewriter.create<mlir::LLVM::StoreOp>(loc, boxValue, alloca); 1400 return alloca; 1401 } 1402 }; 1403 1404 /// Compute the extent of a triplet slice (lb:ub:step). 1405 static mlir::Value 1406 computeTripletExtent(mlir::ConversionPatternRewriter &rewriter, 1407 mlir::Location loc, mlir::Value lb, mlir::Value ub, 1408 mlir::Value step, mlir::Value zero, mlir::Type type) { 1409 mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb); 1410 extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step); 1411 extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step); 1412 // If the resulting extent is negative (`ub-lb` and `step` have different 1413 // signs), zero must be returned instead. 1414 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1415 loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero); 1416 return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero); 1417 } 1418 1419 /// Create a generic box on a memory reference. This conversions lowers the 1420 /// abstract box to the appropriate, initialized descriptor. 1421 struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> { 1422 using EmboxCommonConversion::EmboxCommonConversion; 1423 1424 mlir::LogicalResult 1425 matchAndRewrite(fir::EmboxOp embox, OpAdaptor adaptor, 1426 mlir::ConversionPatternRewriter &rewriter) const override { 1427 assert(!embox.getShape() && "There should be no dims on this embox op"); 1428 auto [boxTy, dest, eleSize] = 1429 consDescriptorPrefix(embox, rewriter, /*rank=*/0, 1430 /*lenParams=*/adaptor.getOperands().drop_front(1)); 1431 dest = insertBaseAddress(rewriter, embox.getLoc(), dest, 1432 adaptor.getOperands()[0]); 1433 if (isDerivedTypeWithLenParams(boxTy)) { 1434 TODO(embox.getLoc(), 1435 "fir.embox codegen of derived with length parameters"); 1436 return failure(); 1437 } 1438 auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest); 1439 rewriter.replaceOp(embox, result); 1440 return success(); 1441 } 1442 }; 1443 1444 /// Create a generic box on a memory reference. 1445 struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> { 1446 using EmboxCommonConversion::EmboxCommonConversion; 1447 1448 mlir::LogicalResult 1449 matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor, 1450 mlir::ConversionPatternRewriter &rewriter) const override { 1451 auto [boxTy, dest, eleSize] = consDescriptorPrefix( 1452 xbox, rewriter, xbox.getOutRank(), 1453 adaptor.getOperands().drop_front(xbox.lenParamOffset())); 1454 // Generate the triples in the dims field of the descriptor 1455 mlir::ValueRange operands = adaptor.getOperands(); 1456 auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64); 1457 mlir::Value base = operands[0]; 1458 assert(!xbox.shape().empty() && "must have a shape"); 1459 unsigned shapeOffset = xbox.shapeOffset(); 1460 bool hasShift = !xbox.shift().empty(); 1461 unsigned shiftOffset = xbox.shiftOffset(); 1462 bool hasSlice = !xbox.slice().empty(); 1463 unsigned sliceOffset = xbox.sliceOffset(); 1464 mlir::Location loc = xbox.getLoc(); 1465 mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0); 1466 mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1); 1467 mlir::Value prevDim = integerCast(loc, rewriter, i64Ty, eleSize); 1468 mlir::Value prevPtrOff = one; 1469 mlir::Type eleTy = boxTy.getEleTy(); 1470 const unsigned rank = xbox.getRank(); 1471 llvm::SmallVector<mlir::Value> gepArgs; 1472 unsigned constRows = 0; 1473 mlir::Value ptrOffset = zero; 1474 if (auto memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType())) 1475 if (auto seqTy = memEleTy.dyn_cast<fir::SequenceType>()) { 1476 mlir::Type seqEleTy = seqTy.getEleTy(); 1477 // Adjust the element scaling factor if the element is a dependent type. 1478 if (fir::hasDynamicSize(seqEleTy)) { 1479 if (fir::isa_char(seqEleTy)) { 1480 assert(xbox.lenParams().size() == 1); 1481 prevPtrOff = integerCast(loc, rewriter, i64Ty, 1482 operands[xbox.lenParamOffset()]); 1483 } else if (seqEleTy.isa<fir::RecordType>()) { 1484 TODO(loc, "generate call to calculate size of PDT"); 1485 } else { 1486 return rewriter.notifyMatchFailure(xbox, "unexpected dynamic type"); 1487 } 1488 } else { 1489 constRows = seqTy.getConstantRows(); 1490 } 1491 } 1492 1493 bool hasSubcomp = !xbox.subcomponent().empty(); 1494 if (!xbox.substr().empty()) 1495 TODO(loc, "codegen of fir.embox with substring"); 1496 1497 mlir::Value stepExpr; 1498 if (hasSubcomp) { 1499 // We have a subcomponent. The step value needs to be the number of 1500 // bytes per element (which is a derived type). 1501 mlir::Type ty0 = base.getType(); 1502 [[maybe_unused]] auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1503 assert(ptrTy && "expected pointer type"); 1504 mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()); 1505 assert(memEleTy && "expected fir pointer type"); 1506 auto seqTy = memEleTy.dyn_cast<fir::SequenceType>(); 1507 assert(seqTy && "expected sequence type"); 1508 mlir::Type seqEleTy = seqTy.getEleTy(); 1509 auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy)); 1510 stepExpr = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter); 1511 } 1512 1513 // Process the array subspace arguments (shape, shift, etc.), if any, 1514 // translating everything to values in the descriptor wherever the entity 1515 // has a dynamic array dimension. 1516 for (unsigned di = 0, descIdx = 0; di < rank; ++di) { 1517 mlir::Value extent = operands[shapeOffset]; 1518 mlir::Value outerExtent = extent; 1519 bool skipNext = false; 1520 if (hasSlice) { 1521 mlir::Value off = operands[sliceOffset]; 1522 mlir::Value adj = one; 1523 if (hasShift) 1524 adj = operands[shiftOffset]; 1525 auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj); 1526 if (constRows > 0) { 1527 gepArgs.push_back(ao); 1528 --constRows; 1529 } else { 1530 auto dimOff = 1531 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff); 1532 ptrOffset = 1533 rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset); 1534 } 1535 if (mlir::isa_and_nonnull<fir::UndefOp>( 1536 xbox.slice()[3 * di + 1].getDefiningOp())) { 1537 // This dimension contains a scalar expression in the array slice op. 1538 // The dimension is loop invariant, will be dropped, and will not 1539 // appear in the descriptor. 1540 skipNext = true; 1541 } 1542 } 1543 if (!skipNext) { 1544 if (hasSlice) 1545 extent = computeTripletExtent(rewriter, loc, operands[sliceOffset], 1546 operands[sliceOffset + 1], 1547 operands[sliceOffset + 2], zero, i64Ty); 1548 // store lower bound (normally 0) for BIND(C) interoperability. 1549 mlir::Value lb = zero; 1550 const bool isaPointerOrAllocatable = 1551 eleTy.isa<fir::PointerType>() || eleTy.isa<fir::HeapType>(); 1552 // Lower bound is defaults to 1 for POINTER, ALLOCATABLE, and 1553 // denormalized descriptors. 1554 if (isaPointerOrAllocatable || !normalizedLowerBound(xbox)) { 1555 lb = one; 1556 // If there is a shifted origin, and no fir.slice, and this is not 1557 // a normalized descriptor then use the value from the shift op as 1558 // the lower bound. 1559 if (hasShift && !(hasSlice || hasSubcomp)) { 1560 lb = operands[shiftOffset]; 1561 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1562 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1563 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, 1564 lb); 1565 } 1566 } 1567 dest = insertLowerBound(rewriter, loc, dest, descIdx, lb); 1568 1569 dest = insertExtent(rewriter, loc, dest, descIdx, extent); 1570 1571 // store step (scaled by shaped extent) 1572 1573 mlir::Value step = hasSubcomp ? stepExpr : prevDim; 1574 if (hasSlice) 1575 step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step, 1576 operands[sliceOffset + 2]); 1577 dest = insertStride(rewriter, loc, dest, descIdx, step); 1578 ++descIdx; 1579 } 1580 1581 // compute the stride and offset for the next natural dimension 1582 prevDim = 1583 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevDim, outerExtent); 1584 if (constRows == 0) 1585 prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff, 1586 outerExtent); 1587 1588 // increment iterators 1589 ++shapeOffset; 1590 if (hasShift) 1591 ++shiftOffset; 1592 if (hasSlice) 1593 sliceOffset += 3; 1594 } 1595 if (hasSlice || hasSubcomp || !xbox.substr().empty()) { 1596 llvm::SmallVector<mlir::Value> args = {ptrOffset}; 1597 args.append(gepArgs.rbegin(), gepArgs.rend()); 1598 if (hasSubcomp) { 1599 // For each field in the path add the offset to base via the args list. 1600 // In the most general case, some offsets must be computed since 1601 // they are not be known until runtime. 1602 if (fir::hasDynamicSize(fir::unwrapSequenceType( 1603 fir::unwrapPassByRefType(xbox.memref().getType())))) 1604 TODO(loc, "fir.embox codegen dynamic size component in derived type"); 1605 args.append(operands.begin() + xbox.subcomponentOffset(), 1606 operands.begin() + xbox.subcomponentOffset() + 1607 xbox.subcomponent().size()); 1608 } 1609 base = 1610 rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), base, args); 1611 if (!xbox.substr().empty()) 1612 base = shiftSubstringBase(rewriter, loc, base, 1613 operands[xbox.substrOffset()]); 1614 } 1615 dest = insertBaseAddress(rewriter, loc, dest, base); 1616 if (isDerivedTypeWithLenParams(boxTy)) 1617 TODO(loc, "fir.embox codegen of derived with length parameters"); 1618 1619 mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest); 1620 rewriter.replaceOp(xbox, result); 1621 return success(); 1622 } 1623 1624 /// Return true if `xbox` has a normalized lower bounds attribute. A box value 1625 /// that is neither a POINTER nor an ALLOCATABLE should be normalized to a 1626 /// zero origin lower bound for interoperability with BIND(C). 1627 inline static bool normalizedLowerBound(fir::cg::XEmboxOp xbox) { 1628 return xbox->hasAttr(fir::getNormalizedLowerBoundAttrName()); 1629 } 1630 }; 1631 1632 /// Create a new box given a box reference. 1633 struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> { 1634 using EmboxCommonConversion::EmboxCommonConversion; 1635 1636 mlir::LogicalResult 1637 matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor, 1638 mlir::ConversionPatternRewriter &rewriter) const override { 1639 mlir::Location loc = rebox.getLoc(); 1640 mlir::Type idxTy = lowerTy().indexType(); 1641 mlir::Value loweredBox = adaptor.getOperands()[0]; 1642 mlir::ValueRange operands = adaptor.getOperands(); 1643 1644 // Create new descriptor and fill its non-shape related data. 1645 llvm::SmallVector<mlir::Value, 2> lenParams; 1646 mlir::Type inputEleTy = getInputEleTy(rebox); 1647 if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) { 1648 mlir::Value len = 1649 loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter); 1650 if (charTy.getFKind() != 1) { 1651 mlir::Value width = 1652 genConstantIndex(loc, idxTy, rewriter, charTy.getFKind()); 1653 len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width); 1654 } 1655 lenParams.emplace_back(len); 1656 } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) { 1657 if (recTy.getNumLenParams() != 0) 1658 TODO(loc, "reboxing descriptor of derived type with length parameters"); 1659 } 1660 auto [boxTy, dest, eleSize] = 1661 consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams); 1662 1663 // Read input extents, strides, and base address 1664 llvm::SmallVector<mlir::Value> inputExtents; 1665 llvm::SmallVector<mlir::Value> inputStrides; 1666 const unsigned inputRank = rebox.getRank(); 1667 for (unsigned i = 0; i < inputRank; ++i) { 1668 mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i); 1669 SmallVector<mlir::Value, 3> dimInfo = 1670 getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter); 1671 inputExtents.emplace_back(dimInfo[1]); 1672 inputStrides.emplace_back(dimInfo[2]); 1673 } 1674 1675 mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType()); 1676 mlir::Value baseAddr = 1677 loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter); 1678 1679 if (!rebox.slice().empty() || !rebox.subcomponent().empty()) 1680 return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1681 operands, rewriter); 1682 return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1683 operands, rewriter); 1684 } 1685 1686 private: 1687 /// Write resulting shape and base address in descriptor, and replace rebox 1688 /// op. 1689 mlir::LogicalResult 1690 finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1691 mlir::ValueRange lbounds, mlir::ValueRange extents, 1692 mlir::ValueRange strides, 1693 mlir::ConversionPatternRewriter &rewriter) const { 1694 mlir::Location loc = rebox.getLoc(); 1695 mlir::Value zero = 1696 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 1697 mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1); 1698 for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) { 1699 mlir::Value extent = std::get<0>(iter.value()); 1700 unsigned dim = iter.index(); 1701 mlir::Value lb = one; 1702 if (!lbounds.empty()) { 1703 lb = lbounds[dim]; 1704 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1705 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1706 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, lb); 1707 }; 1708 dest = insertLowerBound(rewriter, loc, dest, dim, lb); 1709 dest = insertExtent(rewriter, loc, dest, dim, extent); 1710 dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value())); 1711 } 1712 dest = insertBaseAddress(rewriter, loc, dest, base); 1713 mlir::Value result = 1714 placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest); 1715 rewriter.replaceOp(rebox, result); 1716 return success(); 1717 } 1718 1719 // Apply slice given the base address, extents and strides of the input box. 1720 mlir::LogicalResult 1721 sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1722 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1723 mlir::ValueRange operands, 1724 mlir::ConversionPatternRewriter &rewriter) const { 1725 mlir::Location loc = rebox.getLoc(); 1726 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1727 mlir::Type idxTy = lowerTy().indexType(); 1728 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1729 // Apply subcomponent and substring shift on base address. 1730 if (!rebox.subcomponent().empty() || !rebox.substr().empty()) { 1731 // Cast to inputEleTy* so that a GEP can be used. 1732 mlir::Type inputEleTy = getInputEleTy(rebox); 1733 auto llvmElePtrTy = 1734 mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy)); 1735 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base); 1736 1737 if (!rebox.subcomponent().empty()) { 1738 llvm::SmallVector<mlir::Value> gepOperands = {zero}; 1739 for (unsigned i = 0; i < rebox.subcomponent().size(); ++i) 1740 gepOperands.push_back(operands[rebox.subcomponentOffset() + i]); 1741 base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands); 1742 } 1743 if (!rebox.substr().empty()) 1744 base = shiftSubstringBase(rewriter, loc, base, 1745 operands[rebox.substrOffset()]); 1746 } 1747 1748 if (rebox.slice().empty()) 1749 // The array section is of the form array[%component][substring], keep 1750 // the input array extents and strides. 1751 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1752 inputExtents, inputStrides, rewriter); 1753 1754 // Strides from the fir.box are in bytes. 1755 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1756 1757 // The slice is of the form array(i:j:k)[%component]. Compute new extents 1758 // and strides. 1759 llvm::SmallVector<mlir::Value> slicedExtents; 1760 llvm::SmallVector<mlir::Value> slicedStrides; 1761 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 1762 const bool sliceHasOrigins = !rebox.shift().empty(); 1763 unsigned sliceOps = rebox.sliceOffset(); 1764 unsigned shiftOps = rebox.shiftOffset(); 1765 auto strideOps = inputStrides.begin(); 1766 const unsigned inputRank = inputStrides.size(); 1767 for (unsigned i = 0; i < inputRank; 1768 ++i, ++strideOps, ++shiftOps, sliceOps += 3) { 1769 mlir::Value sliceLb = 1770 integerCast(loc, rewriter, idxTy, operands[sliceOps]); 1771 mlir::Value inputStride = *strideOps; // already idxTy 1772 // Apply origin shift: base += (lb-shift)*input_stride 1773 mlir::Value sliceOrigin = 1774 sliceHasOrigins 1775 ? integerCast(loc, rewriter, idxTy, operands[shiftOps]) 1776 : one; 1777 mlir::Value diff = 1778 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin); 1779 mlir::Value offset = 1780 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride); 1781 base = genGEP(loc, voidPtrTy, rewriter, base, offset); 1782 // Apply upper bound and step if this is a triplet. Otherwise, the 1783 // dimension is dropped and no extents/strides are computed. 1784 mlir::Value upper = operands[sliceOps + 1]; 1785 const bool isTripletSlice = 1786 !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp()); 1787 if (isTripletSlice) { 1788 mlir::Value step = 1789 integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]); 1790 // extent = ub-lb+step/step 1791 mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper); 1792 mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb, 1793 sliceUb, step, zero, idxTy); 1794 slicedExtents.emplace_back(extent); 1795 // stride = step*input_stride 1796 mlir::Value stride = 1797 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride); 1798 slicedStrides.emplace_back(stride); 1799 } 1800 } 1801 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1802 slicedExtents, slicedStrides, rewriter); 1803 } 1804 1805 /// Apply a new shape to the data described by a box given the base address, 1806 /// extents and strides of the box. 1807 mlir::LogicalResult 1808 reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1809 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1810 mlir::ValueRange operands, 1811 mlir::ConversionPatternRewriter &rewriter) const { 1812 mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(), 1813 operands.begin() + rebox.shiftOffset() + 1814 rebox.shift().size()}; 1815 if (rebox.shape().empty()) { 1816 // Only setting new lower bounds. 1817 return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents, 1818 inputStrides, rewriter); 1819 } 1820 1821 mlir::Location loc = rebox.getLoc(); 1822 // Strides from the fir.box are in bytes. 1823 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1824 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1825 1826 llvm::SmallVector<mlir::Value> newStrides; 1827 llvm::SmallVector<mlir::Value> newExtents; 1828 mlir::Type idxTy = lowerTy().indexType(); 1829 // First stride from input box is kept. The rest is assumed contiguous 1830 // (it is not possible to reshape otherwise). If the input is scalar, 1831 // which may be OK if all new extents are ones, the stride does not 1832 // matter, use one. 1833 mlir::Value stride = inputStrides.empty() 1834 ? genConstantIndex(loc, idxTy, rewriter, 1) 1835 : inputStrides[0]; 1836 for (unsigned i = 0; i < rebox.shape().size(); ++i) { 1837 mlir::Value rawExtent = operands[rebox.shapeOffset() + i]; 1838 mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent); 1839 newExtents.emplace_back(extent); 1840 newStrides.emplace_back(stride); 1841 // nextStride = extent * stride; 1842 stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride); 1843 } 1844 return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides, 1845 rewriter); 1846 } 1847 1848 /// Return scalar element type of the input box. 1849 static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) { 1850 auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType()); 1851 if (auto seqTy = ty.dyn_cast<fir::SequenceType>()) 1852 return seqTy.getEleTy(); 1853 return ty; 1854 } 1855 }; 1856 1857 /// Lower `fir.emboxproc` operation. Creates a procedure box. 1858 /// TODO: Part of supporting Fortran 2003 procedure pointers. 1859 struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> { 1860 using FIROpConversion::FIROpConversion; 1861 1862 mlir::LogicalResult 1863 matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor, 1864 mlir::ConversionPatternRewriter &rewriter) const override { 1865 TODO(emboxproc.getLoc(), "fir.emboxproc codegen"); 1866 return failure(); 1867 } 1868 }; 1869 1870 // Code shared between insert_value and extract_value Ops. 1871 struct ValueOpCommon { 1872 // Translate the arguments pertaining to any multidimensional array to 1873 // row-major order for LLVM-IR. 1874 static void toRowMajor(SmallVectorImpl<mlir::Attribute> &attrs, 1875 mlir::Type ty) { 1876 assert(ty && "type is null"); 1877 const auto end = attrs.size(); 1878 for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) { 1879 if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 1880 const auto dim = getDimension(seq); 1881 if (dim > 1) { 1882 auto ub = std::min(i + dim, end); 1883 std::reverse(attrs.begin() + i, attrs.begin() + ub); 1884 i += dim - 1; 1885 } 1886 ty = getArrayElementType(seq); 1887 } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) { 1888 ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()]; 1889 } else { 1890 llvm_unreachable("index into invalid type"); 1891 } 1892 } 1893 } 1894 1895 static llvm::SmallVector<mlir::Attribute> 1896 collectIndices(mlir::ConversionPatternRewriter &rewriter, 1897 mlir::ArrayAttr arrAttr) { 1898 llvm::SmallVector<mlir::Attribute> attrs; 1899 for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) { 1900 if (i->isa<mlir::IntegerAttr>()) { 1901 attrs.push_back(*i); 1902 } else { 1903 auto fieldName = i->cast<mlir::StringAttr>().getValue(); 1904 ++i; 1905 auto ty = i->cast<mlir::TypeAttr>().getValue(); 1906 auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName); 1907 attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index)); 1908 } 1909 } 1910 return attrs; 1911 } 1912 1913 private: 1914 static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) { 1915 unsigned result = 1; 1916 for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>(); 1917 eleTy; 1918 eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>()) 1919 ++result; 1920 return result; 1921 } 1922 1923 static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) { 1924 auto eleTy = ty.getElementType(); 1925 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 1926 eleTy = arrTy.getElementType(); 1927 return eleTy; 1928 } 1929 }; 1930 1931 namespace { 1932 /// Extract a subobject value from an ssa-value of aggregate type 1933 struct ExtractValueOpConversion 1934 : public FIROpAndTypeConversion<fir::ExtractValueOp>, 1935 public ValueOpCommon { 1936 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1937 1938 mlir::LogicalResult 1939 doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor, 1940 mlir::ConversionPatternRewriter &rewriter) const override { 1941 auto attrs = collectIndices(rewriter, extractVal.getCoor()); 1942 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 1943 auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs); 1944 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>( 1945 extractVal, ty, adaptor.getOperands()[0], position); 1946 return success(); 1947 } 1948 }; 1949 1950 /// InsertValue is the generalized instruction for the composition of new 1951 /// aggregate type values. 1952 struct InsertValueOpConversion 1953 : public FIROpAndTypeConversion<fir::InsertValueOp>, 1954 public ValueOpCommon { 1955 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1956 1957 mlir::LogicalResult 1958 doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor, 1959 mlir::ConversionPatternRewriter &rewriter) const override { 1960 auto attrs = collectIndices(rewriter, insertVal.getCoor()); 1961 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 1962 auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs); 1963 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 1964 insertVal, ty, adaptor.getOperands()[0], adaptor.getOperands()[1], 1965 position); 1966 return success(); 1967 } 1968 }; 1969 1970 /// InsertOnRange inserts a value into a sequence over a range of offsets. 1971 struct InsertOnRangeOpConversion 1972 : public FIROpAndTypeConversion<fir::InsertOnRangeOp> { 1973 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1974 1975 // Increments an array of subscripts in a row major fasion. 1976 void incrementSubscripts(const SmallVector<uint64_t> &dims, 1977 SmallVector<uint64_t> &subscripts) const { 1978 for (size_t i = dims.size(); i > 0; --i) { 1979 if (++subscripts[i - 1] < dims[i - 1]) { 1980 return; 1981 } 1982 subscripts[i - 1] = 0; 1983 } 1984 } 1985 1986 mlir::LogicalResult 1987 doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor, 1988 mlir::ConversionPatternRewriter &rewriter) const override { 1989 1990 llvm::SmallVector<uint64_t> dims; 1991 auto type = adaptor.getOperands()[0].getType(); 1992 1993 // Iteratively extract the array dimensions from the type. 1994 while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 1995 dims.push_back(t.getNumElements()); 1996 type = t.getElementType(); 1997 } 1998 1999 SmallVector<uint64_t> lBounds; 2000 SmallVector<uint64_t> uBounds; 2001 2002 // Unzip the upper and lower bound and convert to a row major format. 2003 mlir::DenseIntElementsAttr coor = range.getCoor(); 2004 auto reversedCoor = llvm::reverse(coor.getValues<int64_t>()); 2005 for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) { 2006 uBounds.push_back(*i++); 2007 lBounds.push_back(*i); 2008 } 2009 2010 auto &subscripts = lBounds; 2011 auto loc = range.getLoc(); 2012 mlir::Value lastOp = adaptor.getOperands()[0]; 2013 mlir::Value insertVal = adaptor.getOperands()[1]; 2014 2015 auto i64Ty = rewriter.getI64Type(); 2016 while (subscripts != uBounds) { 2017 // Convert uint64_t's to Attribute's. 2018 SmallVector<mlir::Attribute> subscriptAttrs; 2019 for (const auto &subscript : subscripts) 2020 subscriptAttrs.push_back(IntegerAttr::get(i64Ty, subscript)); 2021 lastOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2022 loc, ty, lastOp, insertVal, 2023 ArrayAttr::get(range.getContext(), subscriptAttrs)); 2024 2025 incrementSubscripts(dims, subscripts); 2026 } 2027 2028 // Convert uint64_t's to Attribute's. 2029 SmallVector<mlir::Attribute> subscriptAttrs; 2030 for (const auto &subscript : subscripts) 2031 subscriptAttrs.push_back( 2032 IntegerAttr::get(rewriter.getI64Type(), subscript)); 2033 mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs); 2034 2035 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2036 range, ty, lastOp, insertVal, 2037 ArrayAttr::get(range.getContext(), arrayRef)); 2038 2039 return success(); 2040 } 2041 }; 2042 } // namespace 2043 2044 namespace { 2045 /// XArrayCoor is the address arithmetic on a dynamically shaped, sliced, 2046 /// shifted etc. array. 2047 /// (See the static restriction on coordinate_of.) array_coor determines the 2048 /// coordinate (location) of a specific element. 2049 struct XArrayCoorOpConversion 2050 : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> { 2051 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2052 2053 mlir::LogicalResult 2054 doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor, 2055 mlir::ConversionPatternRewriter &rewriter) const override { 2056 auto loc = coor.getLoc(); 2057 mlir::ValueRange operands = adaptor.getOperands(); 2058 unsigned rank = coor.getRank(); 2059 assert(coor.indices().size() == rank); 2060 assert(coor.shape().empty() || coor.shape().size() == rank); 2061 assert(coor.shift().empty() || coor.shift().size() == rank); 2062 assert(coor.slice().empty() || coor.slice().size() == 3 * rank); 2063 mlir::Type idxTy = lowerTy().indexType(); 2064 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2065 mlir::Value prevExt = one; 2066 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2067 mlir::Value offset = zero; 2068 const bool isShifted = !coor.shift().empty(); 2069 const bool isSliced = !coor.slice().empty(); 2070 const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>(); 2071 2072 auto indexOps = coor.indices().begin(); 2073 auto shapeOps = coor.shape().begin(); 2074 auto shiftOps = coor.shift().begin(); 2075 auto sliceOps = coor.slice().begin(); 2076 // For each dimension of the array, generate the offset calculation. 2077 for (unsigned i = 0; i < rank; 2078 ++i, ++indexOps, ++shapeOps, ++shiftOps, sliceOps += 3) { 2079 mlir::Value index = 2080 integerCast(loc, rewriter, idxTy, operands[coor.indicesOffset() + i]); 2081 mlir::Value lb = isShifted ? integerCast(loc, rewriter, idxTy, 2082 operands[coor.shiftOffset() + i]) 2083 : one; 2084 mlir::Value step = one; 2085 bool normalSlice = isSliced; 2086 // Compute zero based index in dimension i of the element, applying 2087 // potential triplets and lower bounds. 2088 if (isSliced) { 2089 mlir::Value ub = *(sliceOps + 1); 2090 normalSlice = !mlir::isa_and_nonnull<fir::UndefOp>(ub.getDefiningOp()); 2091 if (normalSlice) 2092 step = integerCast(loc, rewriter, idxTy, *(sliceOps + 2)); 2093 } 2094 auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb); 2095 mlir::Value diff = 2096 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step); 2097 if (normalSlice) { 2098 mlir::Value sliceLb = 2099 integerCast(loc, rewriter, idxTy, operands[coor.sliceOffset() + i]); 2100 auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb); 2101 diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj); 2102 } 2103 // Update the offset given the stride and the zero based index `diff` 2104 // that was just computed. 2105 if (baseIsBoxed) { 2106 // Use stride in bytes from the descriptor. 2107 mlir::Value stride = 2108 loadStrideFromBox(loc, adaptor.getOperands()[0], i, rewriter); 2109 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride); 2110 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2111 } else { 2112 // Use stride computed at last iteration. 2113 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt); 2114 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2115 // Compute next stride assuming contiguity of the base array 2116 // (in element number). 2117 auto nextExt = 2118 integerCast(loc, rewriter, idxTy, operands[coor.shapeOffset() + i]); 2119 prevExt = 2120 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt); 2121 } 2122 } 2123 2124 // Add computed offset to the base address. 2125 if (baseIsBoxed) { 2126 // Working with byte offsets. The base address is read from the fir.box. 2127 // and need to be casted to i8* to do the pointer arithmetic. 2128 mlir::Type baseTy = 2129 getBaseAddrTypeFromBox(adaptor.getOperands()[0].getType()); 2130 mlir::Value base = 2131 loadBaseAddrFromBox(loc, baseTy, adaptor.getOperands()[0], rewriter); 2132 mlir::Type voidPtrTy = getVoidPtrType(); 2133 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2134 llvm::SmallVector<mlir::Value> args{offset}; 2135 auto addr = 2136 rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args); 2137 if (coor.subcomponent().empty()) { 2138 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, baseTy, addr); 2139 return success(); 2140 } 2141 auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr); 2142 args.clear(); 2143 args.push_back(zero); 2144 if (!coor.lenParams().empty()) { 2145 // If type parameters are present, then we don't want to use a GEPOp 2146 // as below, as the LLVM struct type cannot be statically defined. 2147 TODO(loc, "derived type with type parameters"); 2148 } 2149 // TODO: array offset subcomponents must be converted to LLVM's 2150 // row-major layout here. 2151 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2152 args.push_back(operands[i]); 2153 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, baseTy, casted, 2154 args); 2155 return success(); 2156 } 2157 2158 // The array was not boxed, so it must be contiguous. offset is therefore an 2159 // element offset and the base type is kept in the GEP unless the element 2160 // type size is itself dynamic. 2161 mlir::Value base; 2162 if (coor.subcomponent().empty()) { 2163 // No subcomponent. 2164 if (!coor.lenParams().empty()) { 2165 // Type parameters. Adjust element size explicitly. 2166 auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType()); 2167 assert(eleTy && "result must be a reference-like type"); 2168 if (fir::characterWithDynamicLen(eleTy)) { 2169 assert(coor.lenParams().size() == 1); 2170 auto bitsInChar = lowerTy().getKindMap().getCharacterBitsize( 2171 eleTy.cast<fir::CharacterType>().getFKind()); 2172 auto scaling = genConstantIndex(loc, idxTy, rewriter, bitsInChar / 8); 2173 auto scaledBySize = 2174 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, scaling); 2175 auto length = 2176 integerCast(loc, rewriter, idxTy, 2177 adaptor.getOperands()[coor.lenParamsOffset()]); 2178 offset = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, scaledBySize, 2179 length); 2180 } else { 2181 TODO(loc, "compute size of derived type with type parameters"); 2182 } 2183 } 2184 // Cast the base address to a pointer to T. 2185 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty, 2186 adaptor.getOperands()[0]); 2187 } else { 2188 // Operand #0 must have a pointer type. For subcomponent slicing, we 2189 // want to cast away the array type and have a plain struct type. 2190 mlir::Type ty0 = adaptor.getOperands()[0].getType(); 2191 auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 2192 assert(ptrTy && "expected pointer type"); 2193 mlir::Type eleTy = ptrTy.getElementType(); 2194 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2195 eleTy = arrTy.getElementType(); 2196 auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy); 2197 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy, 2198 adaptor.getOperands()[0]); 2199 } 2200 SmallVector<mlir::Value> args = {offset}; 2201 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2202 args.push_back(operands[i]); 2203 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args); 2204 return success(); 2205 } 2206 }; 2207 } // namespace 2208 2209 /// Convert to (memory) reference to a reference to a subobject. 2210 /// The coordinate_of op is a Swiss army knife operation that can be used on 2211 /// (memory) references to records, arrays, complex, etc. as well as boxes. 2212 /// With unboxed arrays, there is the restriction that the array have a static 2213 /// shape in all but the last column. 2214 struct CoordinateOpConversion 2215 : public FIROpAndTypeConversion<fir::CoordinateOp> { 2216 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2217 2218 mlir::LogicalResult 2219 doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor, 2220 mlir::ConversionPatternRewriter &rewriter) const override { 2221 mlir::ValueRange operands = adaptor.getOperands(); 2222 2223 mlir::Location loc = coor.getLoc(); 2224 mlir::Value base = operands[0]; 2225 mlir::Type baseObjectTy = coor.getBaseType(); 2226 mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2227 assert(objectTy && "fir.coordinate_of expects a reference type"); 2228 2229 // Complex type - basically, extract the real or imaginary part 2230 if (fir::isa_complex(objectTy)) { 2231 mlir::LLVM::ConstantOp c0 = 2232 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2233 llvm::SmallVector<mlir::Value> offs = {c0, operands[1]}; 2234 mlir::Value gep = genGEP(loc, ty, rewriter, base, offs); 2235 rewriter.replaceOp(coor, gep); 2236 return success(); 2237 } 2238 2239 // Boxed type - get the base pointer from the box 2240 if (baseObjectTy.dyn_cast<fir::BoxType>()) 2241 return doRewriteBox(coor, ty, operands, loc, rewriter); 2242 2243 // Reference, pointer or a heap type 2244 if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType, fir::HeapType>()) 2245 return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter); 2246 2247 return rewriter.notifyMatchFailure( 2248 coor, "fir.coordinate_of base operand has unsupported type"); 2249 } 2250 2251 static unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) { 2252 return fir::hasDynamicSize(ty) 2253 ? op.getDefiningOp() 2254 ->getAttrOfType<mlir::IntegerAttr>("field") 2255 .getInt() 2256 : getIntValue(op); 2257 } 2258 2259 static int64_t getIntValue(mlir::Value val) { 2260 assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value"); 2261 mlir::Operation *defop = val.getDefiningOp(); 2262 2263 if (auto constOp = dyn_cast<mlir::arith::ConstantIntOp>(defop)) 2264 return constOp.value(); 2265 if (auto llConstOp = dyn_cast<mlir::LLVM::ConstantOp>(defop)) 2266 if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>()) 2267 return attr.getValue().getSExtValue(); 2268 fir::emitFatalError(val.getLoc(), "must be a constant"); 2269 } 2270 2271 static bool hasSubDimensions(mlir::Type type) { 2272 return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>(); 2273 } 2274 2275 /// Check whether this form of `!fir.coordinate_of` is supported. These 2276 /// additional checks are required, because we are not yet able to convert 2277 /// all valid forms of `!fir.coordinate_of`. 2278 /// TODO: Either implement the unsupported cases or extend the verifier 2279 /// in FIROps.cpp instead. 2280 static bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) { 2281 const std::size_t numOfCoors = coors.size(); 2282 std::size_t i = 0; 2283 bool subEle = false; 2284 bool ptrEle = false; 2285 for (; i < numOfCoors; ++i) { 2286 mlir::Value nxtOpnd = coors[i]; 2287 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2288 subEle = true; 2289 i += arrTy.getDimension() - 1; 2290 type = arrTy.getEleTy(); 2291 } else if (auto recTy = type.dyn_cast<fir::RecordType>()) { 2292 subEle = true; 2293 type = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2294 } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) { 2295 subEle = true; 2296 type = tupTy.getType(getIntValue(nxtOpnd)); 2297 } else { 2298 ptrEle = true; 2299 } 2300 } 2301 if (ptrEle) 2302 return (!subEle) && (numOfCoors == 1); 2303 return subEle && (i >= numOfCoors); 2304 } 2305 2306 /// Walk the abstract memory layout and determine if the path traverses any 2307 /// array types with unknown shape. Return true iff all the array types have a 2308 /// constant shape along the path. 2309 static bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) { 2310 for (std::size_t i = 0, sz = coors.size(); i < sz; ++i) { 2311 mlir::Value nxtOpnd = coors[i]; 2312 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2313 if (fir::sequenceWithNonConstantShape(arrTy)) 2314 return false; 2315 i += arrTy.getDimension() - 1; 2316 type = arrTy.getEleTy(); 2317 } else if (auto strTy = type.dyn_cast<fir::RecordType>()) { 2318 type = strTy.getType(getFieldNumber(strTy, nxtOpnd)); 2319 } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) { 2320 type = strTy.getType(getIntValue(nxtOpnd)); 2321 } else { 2322 return true; 2323 } 2324 } 2325 return true; 2326 } 2327 2328 private: 2329 mlir::LogicalResult 2330 doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands, 2331 mlir::Location loc, 2332 mlir::ConversionPatternRewriter &rewriter) const { 2333 mlir::Type boxObjTy = coor.getBaseType(); 2334 assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`"); 2335 2336 mlir::Value boxBaseAddr = operands[0]; 2337 2338 // 1. SPECIAL CASE (uses `fir.len_param_index`): 2339 // %box = ... : !fir.box<!fir.type<derived{len1:i32}>> 2340 // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}> 2341 // %addr = coordinate_of %box, %lenp 2342 if (coor.getNumOperands() == 2) { 2343 mlir::Operation *coordinateDef = 2344 (*coor.getCoor().begin()).getDefiningOp(); 2345 if (isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef)) 2346 TODO(loc, 2347 "fir.coordinate_of - fir.len_param_index is not supported yet"); 2348 } 2349 2350 // 2. GENERAL CASE: 2351 // 2.1. (`fir.array`) 2352 // %box = ... : !fix.box<!fir.array<?xU>> 2353 // %idx = ... : index 2354 // %resultAddr = coordinate_of %box, %idx : !fir.ref<U> 2355 // 2.2 (`fir.derived`) 2356 // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>> 2357 // %idx = ... : i32 2358 // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32> 2359 // 2.3 (`fir.derived` inside `fir.array`) 2360 // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32, 2361 // field_2:f32}>>> %idx1 = ... : index %idx2 = ... : i32 %resultAddr = 2362 // coordinate_of %box, %idx1, %idx2 : !fir.ref<f32> 2363 // 2.4. TODO: Either document or disable any other case that the following 2364 // implementation might convert. 2365 mlir::LLVM::ConstantOp c0 = 2366 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2367 mlir::Value resultAddr = 2368 loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()), 2369 boxBaseAddr, rewriter); 2370 // Component Type 2371 auto cpnTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy); 2372 mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext()); 2373 2374 for (unsigned i = 1, last = operands.size(); i < last; ++i) { 2375 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2376 if (i != 1) 2377 TODO(loc, "fir.array nested inside other array and/or derived type"); 2378 // Applies byte strides from the box. Ignore lower bound from box 2379 // since fir.coordinate_of indexes are zero based. Lowering takes care 2380 // of lower bound aspects. This both accounts for dynamically sized 2381 // types and non contiguous arrays. 2382 auto idxTy = lowerTy().indexType(); 2383 mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0); 2384 for (unsigned index = i, lastIndex = i + arrTy.getDimension(); 2385 index < lastIndex; ++index) { 2386 mlir::Value stride = 2387 loadStrideFromBox(loc, operands[0], index - i, rewriter); 2388 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, 2389 operands[index], stride); 2390 off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off); 2391 } 2392 auto voidPtrBase = 2393 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr); 2394 SmallVector<mlir::Value> args{off}; 2395 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, 2396 voidPtrBase, args); 2397 i += arrTy.getDimension() - 1; 2398 cpnTy = arrTy.getEleTy(); 2399 } else if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) { 2400 auto recRefTy = 2401 mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy)); 2402 mlir::Value nxtOpnd = operands[i]; 2403 auto memObj = 2404 rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr); 2405 llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd}; 2406 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2407 auto llvmCurrentObjTy = lowerTy().convertType(cpnTy); 2408 auto gep = rewriter.create<mlir::LLVM::GEPOp>( 2409 loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj, 2410 args); 2411 resultAddr = 2412 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep); 2413 } else { 2414 fir::emitFatalError(loc, "unexpected type in coordinate_of"); 2415 } 2416 } 2417 2418 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr); 2419 return success(); 2420 } 2421 2422 mlir::LogicalResult 2423 doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty, 2424 mlir::ValueRange operands, mlir::Location loc, 2425 mlir::ConversionPatternRewriter &rewriter) const { 2426 mlir::Type baseObjectTy = coor.getBaseType(); 2427 2428 // Component Type 2429 mlir::Type cpnTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2430 bool hasSubdimension = hasSubDimensions(cpnTy); 2431 bool columnIsDeferred = !hasSubdimension; 2432 2433 if (!supportedCoordinate(cpnTy, operands.drop_front(1))) 2434 TODO(loc, "unsupported combination of coordinate operands"); 2435 2436 const bool hasKnownShape = 2437 arraysHaveKnownShape(cpnTy, operands.drop_front(1)); 2438 2439 // If only the column is `?`, then we can simply place the column value in 2440 // the 0-th GEP position. 2441 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2442 if (!hasKnownShape) { 2443 const unsigned sz = arrTy.getDimension(); 2444 if (arraysHaveKnownShape(arrTy.getEleTy(), 2445 operands.drop_front(1 + sz))) { 2446 fir::SequenceType::ShapeRef shape = arrTy.getShape(); 2447 bool allConst = true; 2448 for (unsigned i = 0; i < sz - 1; ++i) { 2449 if (shape[i] < 0) { 2450 allConst = false; 2451 break; 2452 } 2453 } 2454 if (allConst) 2455 columnIsDeferred = true; 2456 } 2457 } 2458 } 2459 2460 if (fir::hasDynamicSize(fir::unwrapSequenceType(cpnTy))) 2461 return mlir::emitError( 2462 loc, "fir.coordinate_of with a dynamic element size is unsupported"); 2463 2464 if (hasKnownShape || columnIsDeferred) { 2465 SmallVector<mlir::Value> offs; 2466 if (hasKnownShape && hasSubdimension) { 2467 mlir::LLVM::ConstantOp c0 = 2468 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2469 offs.push_back(c0); 2470 } 2471 Optional<int> dims; 2472 SmallVector<mlir::Value> arrIdx; 2473 for (std::size_t i = 1, sz = operands.size(); i < sz; ++i) { 2474 mlir::Value nxtOpnd = operands[i]; 2475 2476 if (!cpnTy) 2477 return mlir::emitError(loc, "invalid coordinate/check failed"); 2478 2479 // check if the i-th coordinate relates to an array 2480 if (dims.hasValue()) { 2481 arrIdx.push_back(nxtOpnd); 2482 int dimsLeft = *dims; 2483 if (dimsLeft > 1) { 2484 dims = dimsLeft - 1; 2485 continue; 2486 } 2487 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2488 // append array range in reverse (FIR arrays are column-major) 2489 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2490 arrIdx.clear(); 2491 dims.reset(); 2492 continue; 2493 } 2494 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2495 int d = arrTy.getDimension() - 1; 2496 if (d > 0) { 2497 dims = d; 2498 arrIdx.push_back(nxtOpnd); 2499 continue; 2500 } 2501 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2502 offs.push_back(nxtOpnd); 2503 continue; 2504 } 2505 2506 // check if the i-th coordinate relates to a field 2507 if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) 2508 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2509 else if (auto tupTy = cpnTy.dyn_cast<mlir::TupleType>()) 2510 cpnTy = tupTy.getType(getIntValue(nxtOpnd)); 2511 else 2512 cpnTy = nullptr; 2513 2514 offs.push_back(nxtOpnd); 2515 } 2516 if (dims.hasValue()) 2517 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2518 mlir::Value base = operands[0]; 2519 mlir::Value retval = genGEP(loc, ty, rewriter, base, offs); 2520 rewriter.replaceOp(coor, retval); 2521 return success(); 2522 } 2523 2524 return mlir::emitError( 2525 loc, "fir.coordinate_of base operand has unsupported type"); 2526 } 2527 }; 2528 2529 /// Convert `fir.field_index`. The conversion depends on whether the size of 2530 /// the record is static or dynamic. 2531 struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> { 2532 using FIROpConversion::FIROpConversion; 2533 2534 // NB: most field references should be resolved by this point 2535 mlir::LogicalResult 2536 matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor, 2537 mlir::ConversionPatternRewriter &rewriter) const override { 2538 auto recTy = field.getOnType().cast<fir::RecordType>(); 2539 unsigned index = recTy.getFieldIndex(field.getFieldId()); 2540 2541 if (!fir::hasDynamicSize(recTy)) { 2542 // Derived type has compile-time constant layout. Return index of the 2543 // component type in the parent type (to be used in GEP). 2544 rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset( 2545 field.getLoc(), rewriter, index)}); 2546 return success(); 2547 } 2548 2549 // Derived type has compile-time constant layout. Call the compiler 2550 // generated function to determine the byte offset of the field at runtime. 2551 // This returns a non-constant. 2552 FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get( 2553 field.getContext(), getOffsetMethodName(recTy, field.getFieldId())); 2554 NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr); 2555 NamedAttribute fieldAttr = rewriter.getNamedAttr( 2556 "field", mlir::IntegerAttr::get(lowerTy().indexType(), index)); 2557 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 2558 field, lowerTy().offsetType(), adaptor.getOperands(), 2559 llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr}); 2560 return success(); 2561 } 2562 2563 // Re-Construct the name of the compiler generated method that calculates the 2564 // offset 2565 inline static std::string getOffsetMethodName(fir::RecordType recTy, 2566 llvm::StringRef field) { 2567 return recTy.getName().str() + "P." + field.str() + ".offset"; 2568 } 2569 }; 2570 2571 /// Convert `fir.end` 2572 struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> { 2573 using FIROpConversion::FIROpConversion; 2574 2575 mlir::LogicalResult 2576 matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor, 2577 mlir::ConversionPatternRewriter &rewriter) const override { 2578 TODO(firEnd.getLoc(), "fir.end codegen"); 2579 return failure(); 2580 } 2581 }; 2582 2583 /// Lower `fir.gentypedesc` to a global constant. 2584 struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> { 2585 using FIROpConversion::FIROpConversion; 2586 2587 mlir::LogicalResult 2588 matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor, 2589 mlir::ConversionPatternRewriter &rewriter) const override { 2590 TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen"); 2591 return failure(); 2592 } 2593 }; 2594 2595 /// Lower `fir.has_value` operation to `llvm.return` operation. 2596 struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> { 2597 using FIROpConversion::FIROpConversion; 2598 2599 mlir::LogicalResult 2600 matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor, 2601 mlir::ConversionPatternRewriter &rewriter) const override { 2602 rewriter.replaceOpWithNewOp<LLVM::ReturnOp>(op, adaptor.getOperands()); 2603 return success(); 2604 } 2605 }; 2606 2607 /// Lower `fir.global` operation to `llvm.global` operation. 2608 /// `fir.insert_on_range` operations are replaced with constant dense attribute 2609 /// if they are applied on the full range. 2610 struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> { 2611 using FIROpConversion::FIROpConversion; 2612 2613 mlir::LogicalResult 2614 matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor, 2615 mlir::ConversionPatternRewriter &rewriter) const override { 2616 auto tyAttr = convertType(global.getType()); 2617 if (global.getType().isa<fir::BoxType>()) 2618 tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType(); 2619 auto loc = global.getLoc(); 2620 mlir::Attribute initAttr{}; 2621 if (global.getInitVal()) 2622 initAttr = global.getInitVal().getValue(); 2623 auto linkage = convertLinkage(global.getLinkName()); 2624 auto isConst = global.getConstant().hasValue(); 2625 auto g = rewriter.create<mlir::LLVM::GlobalOp>( 2626 loc, tyAttr, isConst, linkage, global.getSymName(), initAttr); 2627 auto &gr = g.getInitializerRegion(); 2628 rewriter.inlineRegionBefore(global.getRegion(), gr, gr.end()); 2629 if (!gr.empty()) { 2630 // Replace insert_on_range with a constant dense attribute if the 2631 // initialization is on the full range. 2632 auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>(); 2633 for (auto insertOp : insertOnRangeOps) { 2634 if (isFullRange(insertOp.getCoor(), insertOp.getType())) { 2635 auto seqTyAttr = convertType(insertOp.getType()); 2636 auto *op = insertOp.getVal().getDefiningOp(); 2637 auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op); 2638 if (!constant) { 2639 auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op); 2640 if (!convertOp) 2641 continue; 2642 constant = cast<mlir::arith::ConstantOp>( 2643 convertOp.getValue().getDefiningOp()); 2644 } 2645 mlir::Type vecType = mlir::VectorType::get( 2646 insertOp.getType().getShape(), constant.getType()); 2647 auto denseAttr = mlir::DenseElementsAttr::get( 2648 vecType.cast<ShapedType>(), constant.getValue()); 2649 rewriter.setInsertionPointAfter(insertOp); 2650 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>( 2651 insertOp, seqTyAttr, denseAttr); 2652 } 2653 } 2654 } 2655 rewriter.eraseOp(global); 2656 return success(); 2657 } 2658 2659 bool isFullRange(mlir::DenseIntElementsAttr indexes, 2660 fir::SequenceType seqTy) const { 2661 auto extents = seqTy.getShape(); 2662 if (indexes.size() / 2 != static_cast<int64_t>(extents.size())) 2663 return false; 2664 auto cur_index = indexes.value_begin<int64_t>(); 2665 for (unsigned i = 0; i < indexes.size(); i += 2) { 2666 if (*(cur_index++) != 0) 2667 return false; 2668 if (*(cur_index++) != extents[i / 2] - 1) 2669 return false; 2670 } 2671 return true; 2672 } 2673 2674 // TODO: String comparaison should be avoided. Replace linkName with an 2675 // enumeration. 2676 mlir::LLVM::Linkage convertLinkage(Optional<StringRef> optLinkage) const { 2677 if (optLinkage.hasValue()) { 2678 auto name = optLinkage.getValue(); 2679 if (name == "internal") 2680 return mlir::LLVM::Linkage::Internal; 2681 if (name == "linkonce") 2682 return mlir::LLVM::Linkage::Linkonce; 2683 if (name == "linkonce_odr") 2684 return mlir::LLVM::Linkage::LinkonceODR; 2685 if (name == "common") 2686 return mlir::LLVM::Linkage::Common; 2687 if (name == "weak") 2688 return mlir::LLVM::Linkage::Weak; 2689 } 2690 return mlir::LLVM::Linkage::External; 2691 } 2692 }; 2693 2694 /// `fir.load` --> `llvm.load` 2695 struct LoadOpConversion : public FIROpConversion<fir::LoadOp> { 2696 using FIROpConversion::FIROpConversion; 2697 2698 mlir::LogicalResult 2699 matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor, 2700 mlir::ConversionPatternRewriter &rewriter) const override { 2701 // fir.box is a special case because it is considered as an ssa values in 2702 // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box> 2703 // and fir.box end up being the same llvm types and loading a 2704 // fir.ref<fir.box> is actually a no op in LLVM. 2705 if (load.getType().isa<fir::BoxType>()) { 2706 rewriter.replaceOp(load, adaptor.getOperands()[0]); 2707 } else { 2708 mlir::Type ty = convertType(load.getType()); 2709 ArrayRef<NamedAttribute> at = load->getAttrs(); 2710 rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>( 2711 load, ty, adaptor.getOperands(), at); 2712 } 2713 return success(); 2714 } 2715 }; 2716 2717 /// Lower `fir.no_reassoc` to LLVM IR dialect. 2718 /// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast 2719 /// math flags? 2720 struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> { 2721 using FIROpConversion::FIROpConversion; 2722 2723 mlir::LogicalResult 2724 matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor, 2725 mlir::ConversionPatternRewriter &rewriter) const override { 2726 rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]); 2727 return success(); 2728 } 2729 }; 2730 2731 static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest, 2732 Optional<mlir::ValueRange> destOps, 2733 mlir::ConversionPatternRewriter &rewriter, 2734 mlir::Block *newBlock) { 2735 if (destOps.hasValue()) 2736 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, destOps.getValue(), 2737 newBlock, mlir::ValueRange()); 2738 else 2739 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock); 2740 } 2741 2742 template <typename A, typename B> 2743 static void genBrOp(A caseOp, mlir::Block *dest, Optional<B> destOps, 2744 mlir::ConversionPatternRewriter &rewriter) { 2745 if (destOps.hasValue()) 2746 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, destOps.getValue(), 2747 dest); 2748 else 2749 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest); 2750 } 2751 2752 static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp, 2753 mlir::Block *dest, 2754 Optional<mlir::ValueRange> destOps, 2755 mlir::ConversionPatternRewriter &rewriter) { 2756 auto *thisBlock = rewriter.getInsertionBlock(); 2757 auto *newBlock = createBlock(rewriter, dest); 2758 rewriter.setInsertionPointToEnd(thisBlock); 2759 genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock); 2760 rewriter.setInsertionPointToEnd(newBlock); 2761 } 2762 2763 /// Conversion of `fir.select_case` 2764 /// 2765 /// The `fir.select_case` operation is converted to a if-then-else ladder. 2766 /// Depending on the case condition type, one or several comparison and 2767 /// conditional branching can be generated. 2768 /// 2769 /// A a point value case such as `case(4)`, a lower bound case such as 2770 /// `case(5:)` or an upper bound case such as `case(:3)` are converted to a 2771 /// simple comparison between the selector value and the constant value in the 2772 /// case. The block associated with the case condition is then executed if 2773 /// the comparison succeed otherwise it branch to the next block with the 2774 /// comparison for the the next case conditon. 2775 /// 2776 /// A closed interval case condition such as `case(7:10)` is converted with a 2777 /// first comparison and conditional branching for the lower bound. If 2778 /// successful, it branch to a second block with the comparison for the 2779 /// upper bound in the same case condition. 2780 /// 2781 /// TODO: lowering of CHARACTER type cases is not handled yet. 2782 struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> { 2783 using FIROpConversion::FIROpConversion; 2784 2785 mlir::LogicalResult 2786 matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor, 2787 mlir::ConversionPatternRewriter &rewriter) const override { 2788 unsigned conds = caseOp.getNumConditions(); 2789 llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue(); 2790 // Type can be CHARACTER, INTEGER, or LOGICAL (C1145) 2791 auto ty = caseOp.getSelector().getType(); 2792 if (ty.isa<fir::CharacterType>()) { 2793 TODO(caseOp.getLoc(), "fir.select_case codegen with character type"); 2794 return failure(); 2795 } 2796 mlir::Value selector = caseOp.getSelector(adaptor.getOperands()); 2797 auto loc = caseOp.getLoc(); 2798 for (unsigned t = 0; t != conds; ++t) { 2799 mlir::Block *dest = caseOp.getSuccessor(t); 2800 llvm::Optional<mlir::ValueRange> destOps = 2801 caseOp.getSuccessorOperands(adaptor.getOperands(), t); 2802 llvm::Optional<mlir::ValueRange> cmpOps = 2803 *caseOp.getCompareOperands(adaptor.getOperands(), t); 2804 mlir::Value caseArg = *(cmpOps.getValue().begin()); 2805 mlir::Attribute attr = cases[t]; 2806 if (attr.isa<fir::PointIntervalAttr>()) { 2807 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2808 loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg); 2809 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2810 continue; 2811 } 2812 if (attr.isa<fir::LowerBoundAttr>()) { 2813 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2814 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2815 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2816 continue; 2817 } 2818 if (attr.isa<fir::UpperBoundAttr>()) { 2819 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2820 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg); 2821 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2822 continue; 2823 } 2824 if (attr.isa<fir::ClosedIntervalAttr>()) { 2825 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2826 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2827 auto *thisBlock = rewriter.getInsertionBlock(); 2828 auto *newBlock1 = createBlock(rewriter, dest); 2829 auto *newBlock2 = createBlock(rewriter, dest); 2830 rewriter.setInsertionPointToEnd(thisBlock); 2831 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2); 2832 rewriter.setInsertionPointToEnd(newBlock1); 2833 mlir::Value caseArg0 = *(cmpOps.getValue().begin() + 1); 2834 auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>( 2835 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0); 2836 genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2); 2837 rewriter.setInsertionPointToEnd(newBlock2); 2838 continue; 2839 } 2840 assert(attr.isa<mlir::UnitAttr>()); 2841 assert((t + 1 == conds) && "unit must be last"); 2842 genBrOp(caseOp, dest, destOps, rewriter); 2843 } 2844 return success(); 2845 } 2846 }; 2847 2848 template <typename OP> 2849 static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select, 2850 typename OP::Adaptor adaptor, 2851 mlir::ConversionPatternRewriter &rewriter) { 2852 unsigned conds = select.getNumConditions(); 2853 auto cases = select.getCases().getValue(); 2854 mlir::Value selector = adaptor.getSelector(); 2855 auto loc = select.getLoc(); 2856 assert(conds > 0 && "select must have cases"); 2857 2858 llvm::SmallVector<mlir::Block *> destinations; 2859 llvm::SmallVector<mlir::ValueRange> destinationsOperands; 2860 mlir::Block *defaultDestination; 2861 mlir::ValueRange defaultOperands; 2862 llvm::SmallVector<int32_t> caseValues; 2863 2864 for (unsigned t = 0; t != conds; ++t) { 2865 mlir::Block *dest = select.getSuccessor(t); 2866 auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t); 2867 const mlir::Attribute &attr = cases[t]; 2868 if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) { 2869 destinations.push_back(dest); 2870 destinationsOperands.push_back(destOps.hasValue() ? *destOps 2871 : ValueRange()); 2872 caseValues.push_back(intAttr.getInt()); 2873 continue; 2874 } 2875 assert(attr.template dyn_cast_or_null<mlir::UnitAttr>()); 2876 assert((t + 1 == conds) && "unit must be last"); 2877 defaultDestination = dest; 2878 defaultOperands = destOps.hasValue() ? *destOps : ValueRange(); 2879 } 2880 2881 // LLVM::SwitchOp takes a i32 type for the selector. 2882 if (select.getSelector().getType() != rewriter.getI32Type()) 2883 selector = 2884 rewriter.create<LLVM::TruncOp>(loc, rewriter.getI32Type(), selector); 2885 2886 rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>( 2887 select, selector, 2888 /*defaultDestination=*/defaultDestination, 2889 /*defaultOperands=*/defaultOperands, 2890 /*caseValues=*/caseValues, 2891 /*caseDestinations=*/destinations, 2892 /*caseOperands=*/destinationsOperands, 2893 /*branchWeights=*/ArrayRef<int32_t>()); 2894 } 2895 2896 /// conversion of fir::SelectOp to an if-then-else ladder 2897 struct SelectOpConversion : public FIROpConversion<fir::SelectOp> { 2898 using FIROpConversion::FIROpConversion; 2899 2900 mlir::LogicalResult 2901 matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor, 2902 mlir::ConversionPatternRewriter &rewriter) const override { 2903 selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter); 2904 return success(); 2905 } 2906 }; 2907 2908 /// conversion of fir::SelectRankOp to an if-then-else ladder 2909 struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> { 2910 using FIROpConversion::FIROpConversion; 2911 2912 mlir::LogicalResult 2913 matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor, 2914 mlir::ConversionPatternRewriter &rewriter) const override { 2915 selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter); 2916 return success(); 2917 } 2918 }; 2919 2920 /// Lower `fir.select_type` to LLVM IR dialect. 2921 struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> { 2922 using FIROpConversion::FIROpConversion; 2923 2924 mlir::LogicalResult 2925 matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor, 2926 mlir::ConversionPatternRewriter &rewriter) const override { 2927 mlir::emitError(select.getLoc(), 2928 "fir.select_type should have already been converted"); 2929 return failure(); 2930 } 2931 }; 2932 2933 /// `fir.store` --> `llvm.store` 2934 struct StoreOpConversion : public FIROpConversion<fir::StoreOp> { 2935 using FIROpConversion::FIROpConversion; 2936 2937 mlir::LogicalResult 2938 matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor, 2939 mlir::ConversionPatternRewriter &rewriter) const override { 2940 if (store.getValue().getType().isa<fir::BoxType>()) { 2941 // fir.box value is actually in memory, load it first before storing it. 2942 mlir::Location loc = store.getLoc(); 2943 mlir::Type boxPtrTy = adaptor.getOperands()[0].getType(); 2944 auto val = rewriter.create<mlir::LLVM::LoadOp>( 2945 loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(), 2946 adaptor.getOperands()[0]); 2947 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2948 store, val, adaptor.getOperands()[1]); 2949 } else { 2950 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2951 store, adaptor.getOperands()[0], adaptor.getOperands()[1]); 2952 } 2953 return success(); 2954 } 2955 }; 2956 2957 namespace { 2958 2959 /// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for 2960 /// the character buffer and one for the buffer length. 2961 struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> { 2962 using FIROpConversion::FIROpConversion; 2963 2964 mlir::LogicalResult 2965 matchAndRewrite(fir::UnboxCharOp unboxchar, OpAdaptor adaptor, 2966 mlir::ConversionPatternRewriter &rewriter) const override { 2967 MLIRContext *ctx = unboxchar.getContext(); 2968 2969 mlir::Type lenTy = convertType(unboxchar.getType(1)); 2970 mlir::Value tuple = adaptor.getOperands()[0]; 2971 mlir::Type tupleTy = tuple.getType(); 2972 2973 mlir::Location loc = unboxchar.getLoc(); 2974 mlir::Value ptrToBuffer = 2975 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 0); 2976 2977 mlir::LLVM::ExtractValueOp len = 2978 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 1); 2979 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, len); 2980 2981 rewriter.replaceOp(unboxchar, 2982 ArrayRef<mlir::Value>{ptrToBuffer, lenAfterCast}); 2983 return success(); 2984 } 2985 }; 2986 2987 /// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its 2988 /// components. 2989 /// TODO: Part of supporting Fortran 2003 procedure pointers. 2990 struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> { 2991 using FIROpConversion::FIROpConversion; 2992 2993 mlir::LogicalResult 2994 matchAndRewrite(fir::UnboxProcOp unboxproc, OpAdaptor adaptor, 2995 mlir::ConversionPatternRewriter &rewriter) const override { 2996 TODO(unboxproc.getLoc(), "fir.unboxproc codegen"); 2997 return failure(); 2998 } 2999 }; 3000 3001 /// convert to LLVM IR dialect `undef` 3002 struct UndefOpConversion : public FIROpConversion<fir::UndefOp> { 3003 using FIROpConversion::FIROpConversion; 3004 3005 mlir::LogicalResult 3006 matchAndRewrite(fir::UndefOp undef, OpAdaptor, 3007 mlir::ConversionPatternRewriter &rewriter) const override { 3008 rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>( 3009 undef, convertType(undef.getType())); 3010 return success(); 3011 } 3012 }; 3013 3014 struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> { 3015 using FIROpConversion::FIROpConversion; 3016 3017 mlir::LogicalResult 3018 matchAndRewrite(fir::ZeroOp zero, OpAdaptor, 3019 mlir::ConversionPatternRewriter &rewriter) const override { 3020 mlir::Type ty = convertType(zero.getType()); 3021 if (ty.isa<mlir::LLVM::LLVMPointerType>()) { 3022 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty); 3023 } else if (ty.isa<mlir::IntegerType>()) { 3024 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3025 zero, ty, mlir::IntegerAttr::get(zero.getType(), 0)); 3026 } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) { 3027 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3028 zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0)); 3029 } else { 3030 // TODO: create ConstantAggregateZero for FIR aggregate/array types. 3031 return rewriter.notifyMatchFailure( 3032 zero, 3033 "conversion of fir.zero with aggregate type not implemented yet"); 3034 } 3035 return success(); 3036 } 3037 }; 3038 3039 /// `fir.unreachable` --> `llvm.unreachable` 3040 struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> { 3041 using FIROpConversion::FIROpConversion; 3042 3043 mlir::LogicalResult 3044 matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor, 3045 mlir::ConversionPatternRewriter &rewriter) const override { 3046 rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach); 3047 return success(); 3048 } 3049 }; 3050 3051 /// `fir.is_present` --> 3052 /// ``` 3053 /// %0 = llvm.mlir.constant(0 : i64) 3054 /// %1 = llvm.ptrtoint %0 3055 /// %2 = llvm.icmp "ne" %1, %0 : i64 3056 /// ``` 3057 struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> { 3058 using FIROpConversion::FIROpConversion; 3059 3060 mlir::LogicalResult 3061 matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor, 3062 mlir::ConversionPatternRewriter &rewriter) const override { 3063 mlir::Type idxTy = lowerTy().indexType(); 3064 mlir::Location loc = isPresent.getLoc(); 3065 auto ptr = adaptor.getOperands()[0]; 3066 3067 if (isPresent.getVal().getType().isa<fir::BoxCharType>()) { 3068 auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>(); 3069 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3070 3071 mlir::Type ty = structTy.getBody()[0]; 3072 mlir::MLIRContext *ctx = isPresent.getContext(); 3073 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3074 ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0); 3075 } 3076 mlir::LLVM::ConstantOp c0 = 3077 genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0); 3078 auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr); 3079 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 3080 isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0); 3081 3082 return success(); 3083 } 3084 }; 3085 3086 /// Create value signaling an absent optional argument in a call, e.g. 3087 /// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>` 3088 struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> { 3089 using FIROpConversion::FIROpConversion; 3090 3091 mlir::LogicalResult 3092 matchAndRewrite(fir::AbsentOp absent, OpAdaptor, 3093 mlir::ConversionPatternRewriter &rewriter) const override { 3094 mlir::Type ty = convertType(absent.getType()); 3095 mlir::Location loc = absent.getLoc(); 3096 3097 if (absent.getType().isa<fir::BoxCharType>()) { 3098 auto structTy = ty.cast<mlir::LLVM::LLVMStructType>(); 3099 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3100 auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3101 auto nullField = 3102 rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]); 3103 mlir::MLIRContext *ctx = absent.getContext(); 3104 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3105 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 3106 absent, ty, undefStruct, nullField, c0); 3107 } else { 3108 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty); 3109 } 3110 return success(); 3111 } 3112 }; 3113 3114 // 3115 // Primitive operations on Complex types 3116 // 3117 3118 /// Generate inline code for complex addition/subtraction 3119 template <typename LLVMOP, typename OPTY> 3120 static mlir::LLVM::InsertValueOp 3121 complexSum(OPTY sumop, mlir::ValueRange opnds, 3122 mlir::ConversionPatternRewriter &rewriter, 3123 fir::LLVMTypeConverter &lowering) { 3124 mlir::Value a = opnds[0]; 3125 mlir::Value b = opnds[1]; 3126 auto loc = sumop.getLoc(); 3127 auto ctx = sumop.getContext(); 3128 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3129 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3130 mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType())); 3131 mlir::Type ty = lowering.convertType(sumop.getType()); 3132 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3133 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3134 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3135 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3136 auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1); 3137 auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1); 3138 auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3139 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0); 3140 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1); 3141 } 3142 } // namespace 3143 3144 namespace { 3145 struct AddcOpConversion : public FIROpConversion<fir::AddcOp> { 3146 using FIROpConversion::FIROpConversion; 3147 3148 mlir::LogicalResult 3149 matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor, 3150 mlir::ConversionPatternRewriter &rewriter) const override { 3151 // given: (x + iy) + (x' + iy') 3152 // result: (x + x') + i(y + y') 3153 auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(), 3154 rewriter, lowerTy()); 3155 rewriter.replaceOp(addc, r.getResult()); 3156 return success(); 3157 } 3158 }; 3159 3160 struct SubcOpConversion : public FIROpConversion<fir::SubcOp> { 3161 using FIROpConversion::FIROpConversion; 3162 3163 mlir::LogicalResult 3164 matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor, 3165 mlir::ConversionPatternRewriter &rewriter) const override { 3166 // given: (x + iy) - (x' + iy') 3167 // result: (x - x') + i(y - y') 3168 auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(), 3169 rewriter, lowerTy()); 3170 rewriter.replaceOp(subc, r.getResult()); 3171 return success(); 3172 } 3173 }; 3174 3175 /// Inlined complex multiply 3176 struct MulcOpConversion : public FIROpConversion<fir::MulcOp> { 3177 using FIROpConversion::FIROpConversion; 3178 3179 mlir::LogicalResult 3180 matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor, 3181 mlir::ConversionPatternRewriter &rewriter) const override { 3182 // TODO: Can we use a call to __muldc3 ? 3183 // given: (x + iy) * (x' + iy') 3184 // result: (xx'-yy')+i(xy'+yx') 3185 mlir::Value a = adaptor.getOperands()[0]; 3186 mlir::Value b = adaptor.getOperands()[1]; 3187 auto loc = mulc.getLoc(); 3188 auto *ctx = mulc.getContext(); 3189 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3190 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3191 mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType())); 3192 mlir::Type ty = convertType(mulc.getType()); 3193 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3194 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3195 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3196 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3197 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3198 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3199 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3200 auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx); 3201 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3202 auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy); 3203 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3204 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3205 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3206 rewriter.replaceOp(mulc, r0.getResult()); 3207 return success(); 3208 } 3209 }; 3210 3211 /// Inlined complex division 3212 struct DivcOpConversion : public FIROpConversion<fir::DivcOp> { 3213 using FIROpConversion::FIROpConversion; 3214 3215 mlir::LogicalResult 3216 matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor, 3217 mlir::ConversionPatternRewriter &rewriter) const override { 3218 // TODO: Can we use a call to __divdc3 instead? 3219 // Just generate inline code for now. 3220 // given: (x + iy) / (x' + iy') 3221 // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y' 3222 mlir::Value a = adaptor.getOperands()[0]; 3223 mlir::Value b = adaptor.getOperands()[1]; 3224 auto loc = divc.getLoc(); 3225 auto *ctx = divc.getContext(); 3226 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3227 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3228 mlir::Type eleTy = convertType(getComplexEleTy(divc.getType())); 3229 mlir::Type ty = convertType(divc.getType()); 3230 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3231 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3232 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3233 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3234 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3235 auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1); 3236 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3237 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3238 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3239 auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1); 3240 auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1); 3241 auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy); 3242 auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy); 3243 auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d); 3244 auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d); 3245 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3246 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3247 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3248 rewriter.replaceOp(divc, r0.getResult()); 3249 return success(); 3250 } 3251 }; 3252 3253 /// Inlined complex negation 3254 struct NegcOpConversion : public FIROpConversion<fir::NegcOp> { 3255 using FIROpConversion::FIROpConversion; 3256 3257 mlir::LogicalResult 3258 matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor, 3259 mlir::ConversionPatternRewriter &rewriter) const override { 3260 // given: -(x + iy) 3261 // result: -x - iy 3262 auto *ctxt = neg.getContext(); 3263 auto eleTy = convertType(getComplexEleTy(neg.getType())); 3264 auto ty = convertType(neg.getType()); 3265 auto loc = neg.getLoc(); 3266 mlir::Value o0 = adaptor.getOperands()[0]; 3267 auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 3268 auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 3269 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0); 3270 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1); 3271 auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp); 3272 auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip); 3273 auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0); 3274 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1); 3275 return success(); 3276 } 3277 }; 3278 3279 /// Conversion pattern for operation that must be dead. The information in these 3280 /// operations is used by other operation. At this point they should not have 3281 /// anymore uses. 3282 /// These operations are normally dead after the pre-codegen pass. 3283 template <typename FromOp> 3284 struct MustBeDeadConversion : public FIROpConversion<FromOp> { 3285 explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering, 3286 const fir::FIRToLLVMPassOptions &options) 3287 : FIROpConversion<FromOp>(lowering, options) {} 3288 using OpAdaptor = typename FromOp::Adaptor; 3289 3290 mlir::LogicalResult 3291 matchAndRewrite(FromOp op, OpAdaptor adaptor, 3292 mlir::ConversionPatternRewriter &rewriter) const final { 3293 if (!op->getUses().empty()) 3294 return rewriter.notifyMatchFailure(op, "op must be dead"); 3295 rewriter.eraseOp(op); 3296 return success(); 3297 } 3298 }; 3299 3300 struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> { 3301 using MustBeDeadConversion::MustBeDeadConversion; 3302 }; 3303 3304 struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> { 3305 using MustBeDeadConversion::MustBeDeadConversion; 3306 }; 3307 3308 struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> { 3309 using MustBeDeadConversion::MustBeDeadConversion; 3310 }; 3311 3312 struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> { 3313 using MustBeDeadConversion::MustBeDeadConversion; 3314 }; 3315 3316 } // namespace 3317 3318 namespace { 3319 /// Convert FIR dialect to LLVM dialect 3320 /// 3321 /// This pass lowers all FIR dialect operations to LLVM IR dialect. An 3322 /// MLIR pass is used to lower residual Std dialect to LLVM IR dialect. 3323 /// 3324 /// This pass is not complete yet. We are upstreaming it in small patches. 3325 class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> { 3326 public: 3327 FIRToLLVMLowering() = default; 3328 FIRToLLVMLowering(fir::FIRToLLVMPassOptions options) : options{options} {} 3329 mlir::ModuleOp getModule() { return getOperation(); } 3330 3331 void runOnOperation() override final { 3332 auto mod = getModule(); 3333 if (!forcedTargetTriple.empty()) { 3334 fir::setTargetTriple(mod, forcedTargetTriple); 3335 } 3336 3337 auto *context = getModule().getContext(); 3338 fir::LLVMTypeConverter typeConverter{getModule()}; 3339 mlir::RewritePatternSet pattern(context); 3340 pattern.insert< 3341 AbsentOpConversion, AddcOpConversion, AddrOfOpConversion, 3342 AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion, 3343 BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion, 3344 BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion, 3345 BoxProcHostOpConversion, BoxRankOpConversion, BoxTypeDescOpConversion, 3346 CallOpConversion, CmpcOpConversion, ConstcOpConversion, 3347 ConvertOpConversion, CoordinateOpConversion, DispatchOpConversion, 3348 DispatchTableOpConversion, DTEntryOpConversion, DivcOpConversion, 3349 EmboxOpConversion, EmboxCharOpConversion, EmboxProcOpConversion, 3350 ExtractValueOpConversion, FieldIndexOpConversion, FirEndOpConversion, 3351 FreeMemOpConversion, GenTypeDescOpConversion, GlobalLenOpConversion, 3352 GlobalOpConversion, HasValueOpConversion, InsertOnRangeOpConversion, 3353 InsertValueOpConversion, IsPresentOpConversion, 3354 LenParamIndexOpConversion, LoadOpConversion, MulcOpConversion, 3355 NegcOpConversion, NoReassocOpConversion, SelectCaseOpConversion, 3356 SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion, 3357 ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion, 3358 SliceOpConversion, StoreOpConversion, StringLitOpConversion, 3359 SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion, 3360 UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion, 3361 XEmboxOpConversion, XReboxOpConversion, ZeroOpConversion>(typeConverter, 3362 options); 3363 mlir::populateFuncToLLVMConversionPatterns(typeConverter, pattern); 3364 mlir::populateOpenMPToLLVMConversionPatterns(typeConverter, pattern); 3365 mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter, 3366 pattern); 3367 mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter, 3368 pattern); 3369 mlir::ConversionTarget target{*context}; 3370 target.addLegalDialect<mlir::LLVM::LLVMDialect>(); 3371 // The OpenMP dialect is legal for Operations without regions, for those 3372 // which contains regions it is legal if the region contains only the 3373 // LLVM dialect. 3374 target.addDynamicallyLegalOp<mlir::omp::ParallelOp, mlir::omp::WsLoopOp, 3375 mlir::omp::MasterOp>([&](Operation *op) { 3376 return typeConverter.isLegal(&op->getRegion(0)); 3377 }); 3378 target.addLegalDialect<mlir::omp::OpenMPDialect>(); 3379 3380 // required NOPs for applying a full conversion 3381 target.addLegalOp<mlir::ModuleOp>(); 3382 3383 // apply the patterns 3384 if (mlir::failed(mlir::applyFullConversion(getModule(), target, 3385 std::move(pattern)))) { 3386 signalPassFailure(); 3387 } 3388 } 3389 3390 private: 3391 fir::FIRToLLVMPassOptions options; 3392 }; 3393 3394 /// Lower from LLVM IR dialect to proper LLVM-IR and dump the module 3395 struct LLVMIRLoweringPass 3396 : public mlir::PassWrapper<LLVMIRLoweringPass, 3397 mlir::OperationPass<mlir::ModuleOp>> { 3398 MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(LLVMIRLoweringPass) 3399 3400 using Printer = fir::LLVMIRLoweringPrinter; 3401 LLVMIRLoweringPass(raw_ostream &output, Printer p) 3402 : output{output}, printer{p} {} 3403 3404 mlir::ModuleOp getModule() { return getOperation(); } 3405 3406 void runOnOperation() override final { 3407 auto *ctx = getModule().getContext(); 3408 auto optName = getModule().getName(); 3409 llvm::LLVMContext llvmCtx; 3410 if (auto llvmModule = mlir::translateModuleToLLVMIR( 3411 getModule(), llvmCtx, optName ? *optName : "FIRModule")) { 3412 printer(*llvmModule, output); 3413 return; 3414 } 3415 3416 mlir::emitError(mlir::UnknownLoc::get(ctx), "could not emit LLVM-IR\n"); 3417 signalPassFailure(); 3418 } 3419 3420 private: 3421 raw_ostream &output; 3422 Printer printer; 3423 }; 3424 3425 } // namespace 3426 3427 std::unique_ptr<mlir::Pass> fir::createFIRToLLVMPass() { 3428 return std::make_unique<FIRToLLVMLowering>(); 3429 } 3430 3431 std::unique_ptr<mlir::Pass> 3432 fir::createFIRToLLVMPass(FIRToLLVMPassOptions options) { 3433 return std::make_unique<FIRToLLVMLowering>(options); 3434 } 3435 3436 std::unique_ptr<mlir::Pass> 3437 fir::createLLVMDialectToLLVMPass(raw_ostream &output, 3438 fir::LLVMIRLoweringPrinter printer) { 3439 return std::make_unique<LLVMIRLoweringPass>(output, printer); 3440 } 3441