1 //===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/ 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "flang/Optimizer/CodeGen/CodeGen.h" 14 #include "CGOps.h" 15 #include "PassDetail.h" 16 #include "flang/ISO_Fortran_binding.h" 17 #include "flang/Optimizer/Dialect/FIRAttr.h" 18 #include "flang/Optimizer/Dialect/FIROps.h" 19 #include "flang/Optimizer/Support/InternalNames.h" 20 #include "flang/Optimizer/Support/TypeCode.h" 21 #include "flang/Semantics/runtime-type-info.h" 22 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" 23 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" 24 #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" 25 #include "mlir/Conversion/LLVMCommon/Pattern.h" 26 #include "mlir/Conversion/MathToLLVM/MathToLLVM.h" 27 #include "mlir/Conversion/MathToLibm/MathToLibm.h" 28 #include "mlir/Conversion/OpenMPToLLVM/ConvertOpenMPToLLVM.h" 29 #include "mlir/IR/BuiltinTypes.h" 30 #include "mlir/IR/Matchers.h" 31 #include "mlir/Pass/Pass.h" 32 #include "mlir/Target/LLVMIR/ModuleTranslation.h" 33 #include "llvm/ADT/ArrayRef.h" 34 35 #define DEBUG_TYPE "flang-codegen" 36 37 // fir::LLVMTypeConverter for converting to LLVM IR dialect types. 38 #include "TypeConverter.h" 39 40 // TODO: This should really be recovered from the specified target. 41 static constexpr unsigned defaultAlign = 8; 42 43 /// `fir.box` attribute values as defined for CFI_attribute_t in 44 /// flang/ISO_Fortran_binding.h. 45 static constexpr unsigned kAttrPointer = CFI_attribute_pointer; 46 static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable; 47 48 static inline mlir::Type getVoidPtrType(mlir::MLIRContext *context) { 49 return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8)); 50 } 51 52 static mlir::LLVM::ConstantOp 53 genConstantIndex(mlir::Location loc, mlir::Type ity, 54 mlir::ConversionPatternRewriter &rewriter, 55 std::int64_t offset) { 56 auto cattr = rewriter.getI64IntegerAttr(offset); 57 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 58 } 59 60 static mlir::Block *createBlock(mlir::ConversionPatternRewriter &rewriter, 61 mlir::Block *insertBefore) { 62 assert(insertBefore && "expected valid insertion block"); 63 return rewriter.createBlock(insertBefore->getParent(), 64 mlir::Region::iterator(insertBefore)); 65 } 66 67 namespace { 68 /// FIR conversion pattern template 69 template <typename FromOp> 70 class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> { 71 public: 72 explicit FIROpConversion(fir::LLVMTypeConverter &lowering, 73 const fir::FIRToLLVMPassOptions &options) 74 : mlir::ConvertOpToLLVMPattern<FromOp>(lowering), options(options) {} 75 76 protected: 77 mlir::Type convertType(mlir::Type ty) const { 78 return lowerTy().convertType(ty); 79 } 80 mlir::Type voidPtrTy() const { return getVoidPtrType(); } 81 82 mlir::Type getVoidPtrType() const { 83 return mlir::LLVM::LLVMPointerType::get( 84 mlir::IntegerType::get(&lowerTy().getContext(), 8)); 85 } 86 87 mlir::LLVM::ConstantOp 88 genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 89 int value) const { 90 mlir::Type i32Ty = rewriter.getI32Type(); 91 mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value); 92 return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr); 93 } 94 95 mlir::LLVM::ConstantOp 96 genConstantOffset(mlir::Location loc, 97 mlir::ConversionPatternRewriter &rewriter, 98 int offset) const { 99 mlir::Type ity = lowerTy().offsetType(); 100 mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset); 101 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 102 } 103 104 /// Perform an extension or truncation as needed on an integer value. Lowering 105 /// to the specific target may involve some sign-extending or truncation of 106 /// values, particularly to fit them from abstract box types to the 107 /// appropriate reified structures. 108 mlir::Value integerCast(mlir::Location loc, 109 mlir::ConversionPatternRewriter &rewriter, 110 mlir::Type ty, mlir::Value val) const { 111 auto valTy = val.getType(); 112 // If the value was not yet lowered, lower its type so that it can 113 // be used in getPrimitiveTypeSizeInBits. 114 if (!valTy.isa<mlir::IntegerType>()) 115 valTy = convertType(valTy); 116 auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 117 auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy); 118 if (toSize < fromSize) 119 return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val); 120 if (toSize > fromSize) 121 return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val); 122 return val; 123 } 124 125 /// Construct code sequence to extract the specifc value from a `fir.box`. 126 mlir::Value getValueFromBox(mlir::Location loc, mlir::Value box, 127 mlir::Type resultTy, 128 mlir::ConversionPatternRewriter &rewriter, 129 unsigned boxValue) const { 130 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 131 mlir::LLVM::ConstantOp cValuePos = 132 genConstantOffset(loc, rewriter, boxValue); 133 auto pty = mlir::LLVM::LLVMPointerType::get(resultTy); 134 auto p = rewriter.create<mlir::LLVM::GEPOp>( 135 loc, pty, box, mlir::ValueRange{c0, cValuePos}); 136 return rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p); 137 } 138 139 /// Method to construct code sequence to get the triple for dimension `dim` 140 /// from a box. 141 llvm::SmallVector<mlir::Value, 3> 142 getDimsFromBox(mlir::Location loc, llvm::ArrayRef<mlir::Type> retTys, 143 mlir::Value box, mlir::Value dim, 144 mlir::ConversionPatternRewriter &rewriter) const { 145 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 146 mlir::LLVM::ConstantOp cDims = 147 genConstantOffset(loc, rewriter, kDimsPosInBox); 148 mlir::LLVM::LoadOp l0 = 149 loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter); 150 mlir::LLVM::LoadOp l1 = 151 loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter); 152 mlir::LLVM::LoadOp l2 = 153 loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter); 154 return {l0.getResult(), l1.getResult(), l2.getResult()}; 155 } 156 157 mlir::LLVM::LoadOp 158 loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0, 159 mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off, 160 mlir::Type ty, 161 mlir::ConversionPatternRewriter &rewriter) const { 162 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 163 mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off); 164 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c); 165 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 166 } 167 168 mlir::Value 169 loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim, 170 mlir::ConversionPatternRewriter &rewriter) const { 171 auto idxTy = lowerTy().indexType(); 172 auto c0 = genConstantOffset(loc, rewriter, 0); 173 auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox); 174 auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim); 175 return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy, 176 rewriter); 177 } 178 179 /// Read base address from a fir.box. Returned address has type ty. 180 mlir::Value 181 loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 182 mlir::ConversionPatternRewriter &rewriter) const { 183 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 184 mlir::LLVM::ConstantOp cAddr = 185 genConstantOffset(loc, rewriter, kAddrPosInBox); 186 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 187 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr); 188 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 189 } 190 191 mlir::Value 192 loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 193 mlir::ConversionPatternRewriter &rewriter) const { 194 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 195 mlir::LLVM::ConstantOp cElemLen = 196 genConstantOffset(loc, rewriter, kElemLenPosInBox); 197 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 198 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen); 199 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 200 } 201 202 // Get the element type given an LLVM type that is of the form 203 // [llvm.ptr](array|struct|vector)+ and the provided indexes. 204 static mlir::Type getBoxEleTy(mlir::Type type, 205 llvm::ArrayRef<unsigned> indexes) { 206 if (auto t = type.dyn_cast<mlir::LLVM::LLVMPointerType>()) 207 type = t.getElementType(); 208 for (auto i : indexes) { 209 if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) { 210 assert(!t.isOpaque() && i < t.getBody().size()); 211 type = t.getBody()[i]; 212 } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 213 type = t.getElementType(); 214 } else if (auto t = type.dyn_cast<mlir::VectorType>()) { 215 type = t.getElementType(); 216 } else { 217 fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()), 218 "request for invalid box element type"); 219 } 220 } 221 return type; 222 } 223 224 // Return LLVM type of the base address given the LLVM type 225 // of the related descriptor (lowered fir.box type). 226 static mlir::Type getBaseAddrTypeFromBox(mlir::Type type) { 227 return getBoxEleTy(type, {kAddrPosInBox}); 228 } 229 230 // Load the attribute from the \p box and perform a check against \p maskValue 231 // The final comparison is implemented as `(attribute & maskValue) != 0`. 232 mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box, 233 mlir::ConversionPatternRewriter &rewriter, 234 unsigned maskValue) const { 235 mlir::Type attrTy = rewriter.getI32Type(); 236 mlir::Value attribute = 237 getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox); 238 mlir::LLVM::ConstantOp attrMask = 239 genConstantOffset(loc, rewriter, maskValue); 240 auto maskRes = 241 rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask); 242 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 243 return rewriter.create<mlir::LLVM::ICmpOp>( 244 loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0); 245 } 246 247 template <typename... ARGS> 248 mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty, 249 mlir::ConversionPatternRewriter &rewriter, 250 mlir::Value base, ARGS... args) const { 251 llvm::SmallVector<mlir::Value> cv = {args...}; 252 return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv); 253 } 254 255 fir::LLVMTypeConverter &lowerTy() const { 256 return *static_cast<fir::LLVMTypeConverter *>(this->getTypeConverter()); 257 } 258 259 const fir::FIRToLLVMPassOptions &options; 260 }; 261 262 /// FIR conversion pattern template 263 template <typename FromOp> 264 class FIROpAndTypeConversion : public FIROpConversion<FromOp> { 265 public: 266 using FIROpConversion<FromOp>::FIROpConversion; 267 using OpAdaptor = typename FromOp::Adaptor; 268 269 mlir::LogicalResult 270 matchAndRewrite(FromOp op, OpAdaptor adaptor, 271 mlir::ConversionPatternRewriter &rewriter) const final { 272 mlir::Type ty = this->convertType(op.getType()); 273 return doRewrite(op, ty, adaptor, rewriter); 274 } 275 276 virtual mlir::LogicalResult 277 doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor, 278 mlir::ConversionPatternRewriter &rewriter) const = 0; 279 }; 280 } // namespace 281 282 namespace { 283 /// Lower `fir.address_of` operation to `llvm.address_of` operation. 284 struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> { 285 using FIROpConversion::FIROpConversion; 286 287 mlir::LogicalResult 288 matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor, 289 mlir::ConversionPatternRewriter &rewriter) const override { 290 auto ty = convertType(addr.getType()); 291 rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>( 292 addr, ty, addr.getSymbol().getRootReference().getValue()); 293 return mlir::success(); 294 } 295 }; 296 } // namespace 297 298 /// Lookup the function to compute the memory size of this parametric derived 299 /// type. The size of the object may depend on the LEN type parameters of the 300 /// derived type. 301 static mlir::LLVM::LLVMFuncOp 302 getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op, 303 mlir::ConversionPatternRewriter &rewriter) { 304 auto module = op->getParentOfType<mlir::ModuleOp>(); 305 std::string name = recTy.getName().str() + "P.mem.size"; 306 if (auto memSizeFunc = module.lookupSymbol<mlir::LLVM::LLVMFuncOp>(name)) 307 return memSizeFunc; 308 TODO(op.getLoc(), "did not find allocation function"); 309 } 310 311 // Compute the alloc scale size (constant factors encoded in the array type). 312 // We do this for arrays without a constant interior or arrays of character with 313 // dynamic length arrays, since those are the only ones that get decayed to a 314 // pointer to the element type. 315 template <typename OP> 316 static mlir::Value 317 genAllocationScaleSize(OP op, mlir::Type ity, 318 mlir::ConversionPatternRewriter &rewriter) { 319 mlir::Location loc = op.getLoc(); 320 mlir::Type dataTy = op.getInType(); 321 mlir::Type scalarType = fir::unwrapSequenceType(dataTy); 322 auto seqTy = dataTy.dyn_cast<fir::SequenceType>(); 323 if ((op.hasShapeOperands() && seqTy && !seqTy.hasConstantInterior()) || 324 (seqTy && fir::characterWithDynamicLen(scalarType))) { 325 fir::SequenceType::Extent constSize = 1; 326 for (auto extent : seqTy.getShape()) 327 if (extent != fir::SequenceType::getUnknownExtent()) 328 constSize *= extent; 329 if (constSize != 1) { 330 mlir::Value constVal{ 331 genConstantIndex(loc, ity, rewriter, constSize).getResult()}; 332 return constVal; 333 } 334 } 335 return nullptr; 336 } 337 338 namespace { 339 /// convert to LLVM IR dialect `alloca` 340 struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> { 341 using FIROpConversion::FIROpConversion; 342 343 mlir::LogicalResult 344 matchAndRewrite(fir::AllocaOp alloc, OpAdaptor adaptor, 345 mlir::ConversionPatternRewriter &rewriter) const override { 346 mlir::ValueRange operands = adaptor.getOperands(); 347 auto loc = alloc.getLoc(); 348 mlir::Type ity = lowerTy().indexType(); 349 unsigned i = 0; 350 mlir::Value size = genConstantIndex(loc, ity, rewriter, 1).getResult(); 351 mlir::Type ty = convertType(alloc.getType()); 352 mlir::Type resultTy = ty; 353 if (alloc.hasLenParams()) { 354 unsigned end = alloc.numLenParams(); 355 llvm::SmallVector<mlir::Value> lenParams; 356 for (; i < end; ++i) 357 lenParams.push_back(operands[i]); 358 mlir::Type scalarType = fir::unwrapSequenceType(alloc.getInType()); 359 if (auto chrTy = scalarType.dyn_cast<fir::CharacterType>()) { 360 fir::CharacterType rawCharTy = fir::CharacterType::getUnknownLen( 361 chrTy.getContext(), chrTy.getFKind()); 362 ty = mlir::LLVM::LLVMPointerType::get(convertType(rawCharTy)); 363 assert(end == 1); 364 size = integerCast(loc, rewriter, ity, lenParams[0]); 365 } else if (auto recTy = scalarType.dyn_cast<fir::RecordType>()) { 366 mlir::LLVM::LLVMFuncOp memSizeFn = 367 getDependentTypeMemSizeFn(recTy, alloc, rewriter); 368 if (!memSizeFn) 369 emitError(loc, "did not find allocation function"); 370 mlir::NamedAttribute attr = rewriter.getNamedAttr( 371 "callee", mlir::SymbolRefAttr::get(memSizeFn)); 372 auto call = rewriter.create<mlir::LLVM::CallOp>( 373 loc, ity, lenParams, llvm::ArrayRef<mlir::NamedAttribute>{attr}); 374 size = call.getResult(0); 375 ty = ::getVoidPtrType(alloc.getContext()); 376 } else { 377 return emitError(loc, "unexpected type ") 378 << scalarType << " with type parameters"; 379 } 380 } 381 if (auto scaleSize = genAllocationScaleSize(alloc, ity, rewriter)) 382 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize); 383 if (alloc.hasShapeOperands()) { 384 unsigned end = operands.size(); 385 for (; i < end; ++i) 386 size = rewriter.create<mlir::LLVM::MulOp>( 387 loc, ity, size, integerCast(loc, rewriter, ity, operands[i])); 388 } 389 if (ty == resultTy) { 390 // Do not emit the bitcast if ty and resultTy are the same. 391 rewriter.replaceOpWithNewOp<mlir::LLVM::AllocaOp>(alloc, ty, size, 392 alloc->getAttrs()); 393 } else { 394 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, ty, size, 395 alloc->getAttrs()); 396 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(alloc, resultTy, al); 397 } 398 return mlir::success(); 399 } 400 }; 401 } // namespace 402 403 /// Construct an `llvm.extractvalue` instruction. It will return value at 404 /// element \p x from \p tuple. 405 static mlir::LLVM::ExtractValueOp 406 genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty, 407 mlir::ConversionPatternRewriter &rewriter, 408 mlir::MLIRContext *ctx, int x) { 409 auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x)); 410 auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x]; 411 return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx); 412 } 413 414 namespace { 415 /// Lower `fir.box_addr` to the sequence of operations to extract the first 416 /// element of the box. 417 struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> { 418 using FIROpConversion::FIROpConversion; 419 420 mlir::LogicalResult 421 matchAndRewrite(fir::BoxAddrOp boxaddr, OpAdaptor adaptor, 422 mlir::ConversionPatternRewriter &rewriter) const override { 423 mlir::Value a = adaptor.getOperands()[0]; 424 auto loc = boxaddr.getLoc(); 425 mlir::Type ty = convertType(boxaddr.getType()); 426 if (auto argty = boxaddr.getVal().getType().dyn_cast<fir::BoxType>()) { 427 rewriter.replaceOp(boxaddr, loadBaseAddrFromBox(loc, ty, a, rewriter)); 428 } else { 429 auto c0attr = rewriter.getI32IntegerAttr(0); 430 auto c0 = mlir::ArrayAttr::get(boxaddr.getContext(), c0attr); 431 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(boxaddr, ty, a, 432 c0); 433 } 434 return mlir::success(); 435 } 436 }; 437 438 /// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the 439 /// boxchar. 440 struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> { 441 using FIROpConversion::FIROpConversion; 442 443 mlir::LogicalResult 444 matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor, 445 mlir::ConversionPatternRewriter &rewriter) const override { 446 mlir::Value boxChar = adaptor.getOperands()[0]; 447 mlir::Location loc = boxChar.getLoc(); 448 mlir::MLIRContext *ctx = boxChar.getContext(); 449 mlir::Type returnValTy = boxCharLen.getResult().getType(); 450 451 constexpr int boxcharLenIdx = 1; 452 mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex( 453 loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx); 454 mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len); 455 rewriter.replaceOp(boxCharLen, lenAfterCast); 456 457 return mlir::success(); 458 } 459 }; 460 461 /// Lower `fir.box_dims` to a sequence of operations to extract the requested 462 /// dimension infomartion from the boxed value. 463 /// Result in a triple set of GEPs and loads. 464 struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> { 465 using FIROpConversion::FIROpConversion; 466 467 mlir::LogicalResult 468 matchAndRewrite(fir::BoxDimsOp boxdims, OpAdaptor adaptor, 469 mlir::ConversionPatternRewriter &rewriter) const override { 470 llvm::SmallVector<mlir::Type, 3> resultTypes = { 471 convertType(boxdims.getResult(0).getType()), 472 convertType(boxdims.getResult(1).getType()), 473 convertType(boxdims.getResult(2).getType()), 474 }; 475 auto results = 476 getDimsFromBox(boxdims.getLoc(), resultTypes, adaptor.getOperands()[0], 477 adaptor.getOperands()[1], rewriter); 478 rewriter.replaceOp(boxdims, results); 479 return mlir::success(); 480 } 481 }; 482 483 /// Lower `fir.box_elesize` to a sequence of operations ro extract the size of 484 /// an element in the boxed value. 485 struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> { 486 using FIROpConversion::FIROpConversion; 487 488 mlir::LogicalResult 489 matchAndRewrite(fir::BoxEleSizeOp boxelesz, OpAdaptor adaptor, 490 mlir::ConversionPatternRewriter &rewriter) const override { 491 mlir::Value a = adaptor.getOperands()[0]; 492 auto loc = boxelesz.getLoc(); 493 auto ty = convertType(boxelesz.getType()); 494 auto elemSize = getValueFromBox(loc, a, ty, rewriter, kElemLenPosInBox); 495 rewriter.replaceOp(boxelesz, elemSize); 496 return mlir::success(); 497 } 498 }; 499 500 /// Lower `fir.box_isalloc` to a sequence of operations to determine if the 501 /// boxed value was from an ALLOCATABLE entity. 502 struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> { 503 using FIROpConversion::FIROpConversion; 504 505 mlir::LogicalResult 506 matchAndRewrite(fir::BoxIsAllocOp boxisalloc, OpAdaptor adaptor, 507 mlir::ConversionPatternRewriter &rewriter) const override { 508 mlir::Value box = adaptor.getOperands()[0]; 509 auto loc = boxisalloc.getLoc(); 510 mlir::Value check = 511 genBoxAttributeCheck(loc, box, rewriter, kAttrAllocatable); 512 rewriter.replaceOp(boxisalloc, check); 513 return mlir::success(); 514 } 515 }; 516 517 /// Lower `fir.box_isarray` to a sequence of operations to determine if the 518 /// boxed is an array. 519 struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> { 520 using FIROpConversion::FIROpConversion; 521 522 mlir::LogicalResult 523 matchAndRewrite(fir::BoxIsArrayOp boxisarray, OpAdaptor adaptor, 524 mlir::ConversionPatternRewriter &rewriter) const override { 525 mlir::Value a = adaptor.getOperands()[0]; 526 auto loc = boxisarray.getLoc(); 527 auto rank = 528 getValueFromBox(loc, a, rewriter.getI32Type(), rewriter, kRankPosInBox); 529 auto c0 = genConstantOffset(loc, rewriter, 0); 530 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 531 boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0); 532 return mlir::success(); 533 } 534 }; 535 536 /// Lower `fir.box_isptr` to a sequence of operations to determined if the 537 /// boxed value was from a POINTER entity. 538 struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> { 539 using FIROpConversion::FIROpConversion; 540 541 mlir::LogicalResult 542 matchAndRewrite(fir::BoxIsPtrOp boxisptr, OpAdaptor adaptor, 543 mlir::ConversionPatternRewriter &rewriter) const override { 544 mlir::Value box = adaptor.getOperands()[0]; 545 auto loc = boxisptr.getLoc(); 546 mlir::Value check = genBoxAttributeCheck(loc, box, rewriter, kAttrPointer); 547 rewriter.replaceOp(boxisptr, check); 548 return mlir::success(); 549 } 550 }; 551 552 /// Lower `fir.box_rank` to the sequence of operation to extract the rank from 553 /// the box. 554 struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> { 555 using FIROpConversion::FIROpConversion; 556 557 mlir::LogicalResult 558 matchAndRewrite(fir::BoxRankOp boxrank, OpAdaptor adaptor, 559 mlir::ConversionPatternRewriter &rewriter) const override { 560 mlir::Value a = adaptor.getOperands()[0]; 561 auto loc = boxrank.getLoc(); 562 mlir::Type ty = convertType(boxrank.getType()); 563 auto result = getValueFromBox(loc, a, ty, rewriter, kRankPosInBox); 564 rewriter.replaceOp(boxrank, result); 565 return mlir::success(); 566 } 567 }; 568 569 /// Lower `fir.boxproc_host` operation. Extracts the host pointer from the 570 /// boxproc. 571 /// TODO: Part of supporting Fortran 2003 procedure pointers. 572 struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> { 573 using FIROpConversion::FIROpConversion; 574 575 mlir::LogicalResult 576 matchAndRewrite(fir::BoxProcHostOp boxprochost, OpAdaptor adaptor, 577 mlir::ConversionPatternRewriter &rewriter) const override { 578 TODO(boxprochost.getLoc(), "fir.boxproc_host codegen"); 579 return mlir::failure(); 580 } 581 }; 582 583 /// Lower `fir.box_tdesc` to the sequence of operations to extract the type 584 /// descriptor from the box. 585 struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> { 586 using FIROpConversion::FIROpConversion; 587 588 mlir::LogicalResult 589 matchAndRewrite(fir::BoxTypeDescOp boxtypedesc, OpAdaptor adaptor, 590 mlir::ConversionPatternRewriter &rewriter) const override { 591 mlir::Value box = adaptor.getOperands()[0]; 592 auto loc = boxtypedesc.getLoc(); 593 mlir::Type typeTy = 594 fir::getDescFieldTypeModel<kTypePosInBox>()(boxtypedesc.getContext()); 595 auto result = getValueFromBox(loc, box, typeTy, rewriter, kTypePosInBox); 596 auto typePtrTy = mlir::LLVM::LLVMPointerType::get(typeTy); 597 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(boxtypedesc, typePtrTy, 598 result); 599 return mlir::success(); 600 } 601 }; 602 603 /// Lower `fir.string_lit` to LLVM IR dialect operation. 604 struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> { 605 using FIROpConversion::FIROpConversion; 606 607 mlir::LogicalResult 608 matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor, 609 mlir::ConversionPatternRewriter &rewriter) const override { 610 auto ty = convertType(constop.getType()); 611 auto attr = constop.getValue(); 612 if (attr.isa<mlir::StringAttr>()) { 613 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr); 614 return mlir::success(); 615 } 616 617 auto charTy = constop.getType().cast<fir::CharacterType>(); 618 unsigned bits = lowerTy().characterBitsize(charTy); 619 mlir::Type intTy = rewriter.getIntegerType(bits); 620 mlir::Location loc = constop.getLoc(); 621 mlir::Value cst = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 622 if (auto arr = attr.dyn_cast<mlir::DenseElementsAttr>()) { 623 cst = rewriter.create<mlir::LLVM::ConstantOp>(loc, ty, arr); 624 } else if (auto arr = attr.dyn_cast<mlir::ArrayAttr>()) { 625 for (auto a : llvm::enumerate(arr.getValue())) { 626 // convert each character to a precise bitsize 627 auto elemAttr = mlir::IntegerAttr::get( 628 intTy, 629 a.value().cast<mlir::IntegerAttr>().getValue().zextOrTrunc(bits)); 630 auto elemCst = 631 rewriter.create<mlir::LLVM::ConstantOp>(loc, intTy, elemAttr); 632 auto index = mlir::ArrayAttr::get( 633 constop.getContext(), rewriter.getI32IntegerAttr(a.index())); 634 cst = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, cst, elemCst, 635 index); 636 } 637 } else { 638 return mlir::failure(); 639 } 640 rewriter.replaceOp(constop, cst); 641 return mlir::success(); 642 } 643 }; 644 645 /// `fir.call` -> `llvm.call` 646 struct CallOpConversion : public FIROpConversion<fir::CallOp> { 647 using FIROpConversion::FIROpConversion; 648 649 mlir::LogicalResult 650 matchAndRewrite(fir::CallOp call, OpAdaptor adaptor, 651 mlir::ConversionPatternRewriter &rewriter) const override { 652 llvm::SmallVector<mlir::Type> resultTys; 653 for (auto r : call.getResults()) 654 resultTys.push_back(convertType(r.getType())); 655 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 656 call, resultTys, adaptor.getOperands(), call->getAttrs()); 657 return mlir::success(); 658 } 659 }; 660 } // namespace 661 662 static mlir::Type getComplexEleTy(mlir::Type complex) { 663 if (auto cc = complex.dyn_cast<mlir::ComplexType>()) 664 return cc.getElementType(); 665 return complex.cast<fir::ComplexType>().getElementType(); 666 } 667 668 namespace { 669 /// Compare complex values 670 /// 671 /// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une). 672 /// 673 /// For completeness, all other comparison are done on the real component only. 674 struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> { 675 using FIROpConversion::FIROpConversion; 676 677 mlir::LogicalResult 678 matchAndRewrite(fir::CmpcOp cmp, OpAdaptor adaptor, 679 mlir::ConversionPatternRewriter &rewriter) const override { 680 mlir::ValueRange operands = adaptor.getOperands(); 681 mlir::MLIRContext *ctxt = cmp.getContext(); 682 mlir::Type eleTy = convertType(getComplexEleTy(cmp.getLhs().getType())); 683 mlir::Type resTy = convertType(cmp.getType()); 684 mlir::Location loc = cmp.getLoc(); 685 auto pos0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 686 llvm::SmallVector<mlir::Value, 2> rp = { 687 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 688 pos0), 689 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 690 pos0)}; 691 auto rcp = 692 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, rp, cmp->getAttrs()); 693 auto pos1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 694 llvm::SmallVector<mlir::Value, 2> ip = { 695 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 696 pos1), 697 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 698 pos1)}; 699 auto icp = 700 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, ip, cmp->getAttrs()); 701 llvm::SmallVector<mlir::Value, 2> cp = {rcp, icp}; 702 switch (cmp.getPredicate()) { 703 case mlir::arith::CmpFPredicate::OEQ: // .EQ. 704 rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(cmp, resTy, cp); 705 break; 706 case mlir::arith::CmpFPredicate::UNE: // .NE. 707 rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(cmp, resTy, cp); 708 break; 709 default: 710 rewriter.replaceOp(cmp, rcp.getResult()); 711 break; 712 } 713 return mlir::success(); 714 } 715 }; 716 717 /// Lower complex constants 718 struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> { 719 using FIROpConversion::FIROpConversion; 720 721 mlir::LogicalResult 722 matchAndRewrite(fir::ConstcOp conc, OpAdaptor, 723 mlir::ConversionPatternRewriter &rewriter) const override { 724 mlir::Location loc = conc.getLoc(); 725 mlir::MLIRContext *ctx = conc.getContext(); 726 mlir::Type ty = convertType(conc.getType()); 727 mlir::Type ety = convertType(getComplexEleTy(conc.getType())); 728 auto realFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getReal())); 729 auto realPart = 730 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, realFloatAttr); 731 auto imFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getImaginary())); 732 auto imPart = 733 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, imFloatAttr); 734 auto realIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 735 auto imIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 736 auto undef = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 737 auto setReal = rewriter.create<mlir::LLVM::InsertValueOp>( 738 loc, ty, undef, realPart, realIndex); 739 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(conc, ty, setReal, 740 imPart, imIndex); 741 return mlir::success(); 742 } 743 744 inline llvm::APFloat getValue(mlir::Attribute attr) const { 745 return attr.cast<fir::RealAttr>().getValue(); 746 } 747 }; 748 749 /// convert value of from-type to value of to-type 750 struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> { 751 using FIROpConversion::FIROpConversion; 752 753 static bool isFloatingPointTy(mlir::Type ty) { 754 return ty.isa<mlir::FloatType>(); 755 } 756 757 mlir::LogicalResult 758 matchAndRewrite(fir::ConvertOp convert, OpAdaptor adaptor, 759 mlir::ConversionPatternRewriter &rewriter) const override { 760 auto fromFirTy = convert.getValue().getType(); 761 auto toFirTy = convert.getRes().getType(); 762 auto fromTy = convertType(fromFirTy); 763 auto toTy = convertType(toFirTy); 764 mlir::Value op0 = adaptor.getOperands()[0]; 765 if (fromTy == toTy) { 766 rewriter.replaceOp(convert, op0); 767 return mlir::success(); 768 } 769 auto loc = convert.getLoc(); 770 auto convertFpToFp = [&](mlir::Value val, unsigned fromBits, 771 unsigned toBits, mlir::Type toTy) -> mlir::Value { 772 if (fromBits == toBits) { 773 // TODO: Converting between two floating-point representations with the 774 // same bitwidth is not allowed for now. 775 mlir::emitError(loc, 776 "cannot implicitly convert between two floating-point " 777 "representations of the same bitwidth"); 778 return {}; 779 } 780 if (fromBits > toBits) 781 return rewriter.create<mlir::LLVM::FPTruncOp>(loc, toTy, val); 782 return rewriter.create<mlir::LLVM::FPExtOp>(loc, toTy, val); 783 }; 784 // Complex to complex conversion. 785 if (fir::isa_complex(fromFirTy) && fir::isa_complex(toFirTy)) { 786 // Special case: handle the conversion of a complex such that both the 787 // real and imaginary parts are converted together. 788 auto zero = mlir::ArrayAttr::get(convert.getContext(), 789 rewriter.getI32IntegerAttr(0)); 790 auto one = mlir::ArrayAttr::get(convert.getContext(), 791 rewriter.getI32IntegerAttr(1)); 792 auto ty = convertType(getComplexEleTy(convert.getValue().getType())); 793 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, zero); 794 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, one); 795 auto nt = convertType(getComplexEleTy(convert.getRes().getType())); 796 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 797 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(nt); 798 auto rc = convertFpToFp(rp, fromBits, toBits, nt); 799 auto ic = convertFpToFp(ip, fromBits, toBits, nt); 800 auto un = rewriter.create<mlir::LLVM::UndefOp>(loc, toTy); 801 auto i1 = 802 rewriter.create<mlir::LLVM::InsertValueOp>(loc, toTy, un, rc, zero); 803 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(convert, toTy, i1, 804 ic, one); 805 return mlir::success(); 806 } 807 808 // Follow UNIX F77 convention for logicals: 809 // 1. underlying integer is not zero => logical is .TRUE. 810 // 2. logical is .TRUE. => set underlying integer to 1. 811 auto i1Type = mlir::IntegerType::get(convert.getContext(), 1); 812 if (fromFirTy.isa<fir::LogicalType>() && toFirTy == i1Type) { 813 mlir::Value zero = genConstantIndex(loc, fromTy, rewriter, 0); 814 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 815 convert, mlir::LLVM::ICmpPredicate::ne, op0, zero); 816 return mlir::success(); 817 } 818 if (fromFirTy == i1Type && toFirTy.isa<fir::LogicalType>()) { 819 rewriter.replaceOpWithNewOp<mlir::LLVM::ZExtOp>(convert, toTy, op0); 820 return mlir::success(); 821 } 822 823 // Floating point to floating point conversion. 824 if (isFloatingPointTy(fromTy)) { 825 if (isFloatingPointTy(toTy)) { 826 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 827 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 828 auto v = convertFpToFp(op0, fromBits, toBits, toTy); 829 rewriter.replaceOp(convert, v); 830 return mlir::success(); 831 } 832 if (toTy.isa<mlir::IntegerType>()) { 833 rewriter.replaceOpWithNewOp<mlir::LLVM::FPToSIOp>(convert, toTy, op0); 834 return mlir::success(); 835 } 836 } else if (fromTy.isa<mlir::IntegerType>()) { 837 // Integer to integer conversion. 838 if (toTy.isa<mlir::IntegerType>()) { 839 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 840 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 841 assert(fromBits != toBits); 842 if (fromBits > toBits) { 843 rewriter.replaceOpWithNewOp<mlir::LLVM::TruncOp>(convert, toTy, op0); 844 return mlir::success(); 845 } 846 rewriter.replaceOpWithNewOp<mlir::LLVM::SExtOp>(convert, toTy, op0); 847 return mlir::success(); 848 } 849 // Integer to floating point conversion. 850 if (isFloatingPointTy(toTy)) { 851 rewriter.replaceOpWithNewOp<mlir::LLVM::SIToFPOp>(convert, toTy, op0); 852 return mlir::success(); 853 } 854 // Integer to pointer conversion. 855 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 856 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(convert, toTy, op0); 857 return mlir::success(); 858 } 859 } else if (fromTy.isa<mlir::LLVM::LLVMPointerType>()) { 860 // Pointer to integer conversion. 861 if (toTy.isa<mlir::IntegerType>()) { 862 rewriter.replaceOpWithNewOp<mlir::LLVM::PtrToIntOp>(convert, toTy, op0); 863 return mlir::success(); 864 } 865 // Pointer to pointer conversion. 866 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 867 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(convert, toTy, op0); 868 return mlir::success(); 869 } 870 } 871 return emitError(loc) << "cannot convert " << fromTy << " to " << toTy; 872 } 873 }; 874 875 /// Lower `fir.dispatch` operation. A virtual call to a method in a dispatch 876 /// table. 877 struct DispatchOpConversion : public FIROpConversion<fir::DispatchOp> { 878 using FIROpConversion::FIROpConversion; 879 880 mlir::LogicalResult 881 matchAndRewrite(fir::DispatchOp dispatch, OpAdaptor adaptor, 882 mlir::ConversionPatternRewriter &rewriter) const override { 883 TODO(dispatch.getLoc(), "fir.dispatch codegen"); 884 return mlir::failure(); 885 } 886 }; 887 888 /// Lower `fir.dispatch_table` operation. The dispatch table for a Fortran 889 /// derived type. 890 struct DispatchTableOpConversion 891 : public FIROpConversion<fir::DispatchTableOp> { 892 using FIROpConversion::FIROpConversion; 893 894 mlir::LogicalResult 895 matchAndRewrite(fir::DispatchTableOp dispTab, OpAdaptor adaptor, 896 mlir::ConversionPatternRewriter &rewriter) const override { 897 TODO(dispTab.getLoc(), "fir.dispatch_table codegen"); 898 return mlir::failure(); 899 } 900 }; 901 902 /// Lower `fir.dt_entry` operation. An entry in a dispatch table; binds a 903 /// method-name to a function. 904 struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> { 905 using FIROpConversion::FIROpConversion; 906 907 mlir::LogicalResult 908 matchAndRewrite(fir::DTEntryOp dtEnt, OpAdaptor adaptor, 909 mlir::ConversionPatternRewriter &rewriter) const override { 910 TODO(dtEnt.getLoc(), "fir.dt_entry codegen"); 911 return mlir::failure(); 912 } 913 }; 914 915 /// Lower `fir.global_len` operation. 916 struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> { 917 using FIROpConversion::FIROpConversion; 918 919 mlir::LogicalResult 920 matchAndRewrite(fir::GlobalLenOp globalLen, OpAdaptor adaptor, 921 mlir::ConversionPatternRewriter &rewriter) const override { 922 TODO(globalLen.getLoc(), "fir.global_len codegen"); 923 return mlir::failure(); 924 } 925 }; 926 927 /// Lower fir.len_param_index 928 struct LenParamIndexOpConversion 929 : public FIROpConversion<fir::LenParamIndexOp> { 930 using FIROpConversion::FIROpConversion; 931 932 // FIXME: this should be specialized by the runtime target 933 mlir::LogicalResult 934 matchAndRewrite(fir::LenParamIndexOp lenp, OpAdaptor, 935 mlir::ConversionPatternRewriter &rewriter) const override { 936 TODO(lenp.getLoc(), "fir.len_param_index codegen"); 937 } 938 }; 939 940 /// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of 941 /// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element 942 /// in this struct is a pointer. Its type is determined from `KIND`. The 2nd 943 /// element is the length of the character buffer (`#n`). 944 struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> { 945 using FIROpConversion::FIROpConversion; 946 947 mlir::LogicalResult 948 matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor, 949 mlir::ConversionPatternRewriter &rewriter) const override { 950 mlir::ValueRange operands = adaptor.getOperands(); 951 auto *ctx = emboxChar.getContext(); 952 953 mlir::Value charBuffer = operands[0]; 954 mlir::Value charBufferLen = operands[1]; 955 956 mlir::Location loc = emboxChar.getLoc(); 957 mlir::Type llvmStructTy = convertType(emboxChar.getType()); 958 auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy); 959 960 mlir::Type lenTy = 961 llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1]; 962 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen); 963 964 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 965 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 966 auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>( 967 loc, llvmStructTy, llvmStruct, charBuffer, c0); 968 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 969 emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1); 970 971 return mlir::success(); 972 } 973 }; 974 } // namespace 975 976 /// Return the LLVMFuncOp corresponding to the standard malloc call. 977 static mlir::LLVM::LLVMFuncOp 978 getMalloc(fir::AllocMemOp op, mlir::ConversionPatternRewriter &rewriter) { 979 auto module = op->getParentOfType<mlir::ModuleOp>(); 980 if (mlir::LLVM::LLVMFuncOp mallocFunc = 981 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("malloc")) 982 return mallocFunc; 983 mlir::OpBuilder moduleBuilder( 984 op->getParentOfType<mlir::ModuleOp>().getBodyRegion()); 985 auto indexType = mlir::IntegerType::get(op.getContext(), 64); 986 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 987 rewriter.getUnknownLoc(), "malloc", 988 mlir::LLVM::LLVMFunctionType::get(getVoidPtrType(op.getContext()), 989 indexType, 990 /*isVarArg=*/false)); 991 } 992 993 /// Helper function for generating the LLVM IR that computes the size 994 /// in bytes for a derived type. 995 static mlir::Value 996 computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy, 997 mlir::ConversionPatternRewriter &rewriter) { 998 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 999 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 1000 llvm::SmallVector<mlir::Value> args = {one}; 1001 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, args); 1002 return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep); 1003 } 1004 1005 namespace { 1006 /// Lower a `fir.allocmem` instruction into `llvm.call @malloc` 1007 struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> { 1008 using FIROpConversion::FIROpConversion; 1009 1010 mlir::LogicalResult 1011 matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor, 1012 mlir::ConversionPatternRewriter &rewriter) const override { 1013 mlir::Type heapTy = heap.getType(); 1014 mlir::Type ty = convertType(heapTy); 1015 mlir::LLVM::LLVMFuncOp mallocFunc = getMalloc(heap, rewriter); 1016 mlir::Location loc = heap.getLoc(); 1017 auto ity = lowerTy().indexType(); 1018 mlir::Type dataTy = fir::unwrapRefType(heapTy); 1019 if (fir::isRecordWithTypeParameters(fir::unwrapSequenceType(dataTy))) 1020 TODO(loc, "fir.allocmem codegen of derived type with length parameters"); 1021 mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, ty); 1022 if (auto scaleSize = genAllocationScaleSize(heap, ity, rewriter)) 1023 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize); 1024 for (mlir::Value opnd : adaptor.getOperands()) 1025 size = rewriter.create<mlir::LLVM::MulOp>( 1026 loc, ity, size, integerCast(loc, rewriter, ity, opnd)); 1027 heap->setAttr("callee", mlir::SymbolRefAttr::get(mallocFunc)); 1028 auto malloc = rewriter.create<mlir::LLVM::CallOp>( 1029 loc, ::getVoidPtrType(heap.getContext()), size, heap->getAttrs()); 1030 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(heap, ty, 1031 malloc.getResult(0)); 1032 return mlir::success(); 1033 } 1034 1035 // Compute the (allocation) size of the allocmem type in bytes. 1036 mlir::Value genTypeSizeInBytes(mlir::Location loc, mlir::Type idxTy, 1037 mlir::ConversionPatternRewriter &rewriter, 1038 mlir::Type llTy) const { 1039 // Use the primitive size, if available. 1040 auto ptrTy = llTy.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1041 if (auto size = 1042 mlir::LLVM::getPrimitiveTypeSizeInBits(ptrTy.getElementType())) 1043 return genConstantIndex(loc, idxTy, rewriter, size / 8); 1044 1045 // Otherwise, generate the GEP trick in LLVM IR to compute the size. 1046 return computeDerivedTypeSize(loc, ptrTy, idxTy, rewriter); 1047 } 1048 }; 1049 } // namespace 1050 1051 /// Return the LLVMFuncOp corresponding to the standard free call. 1052 static mlir::LLVM::LLVMFuncOp 1053 getFree(fir::FreeMemOp op, mlir::ConversionPatternRewriter &rewriter) { 1054 auto module = op->getParentOfType<mlir::ModuleOp>(); 1055 if (mlir::LLVM::LLVMFuncOp freeFunc = 1056 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("free")) 1057 return freeFunc; 1058 mlir::OpBuilder moduleBuilder(module.getBodyRegion()); 1059 auto voidType = mlir::LLVM::LLVMVoidType::get(op.getContext()); 1060 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 1061 rewriter.getUnknownLoc(), "free", 1062 mlir::LLVM::LLVMFunctionType::get(voidType, 1063 getVoidPtrType(op.getContext()), 1064 /*isVarArg=*/false)); 1065 } 1066 1067 namespace { 1068 /// Lower a `fir.freemem` instruction into `llvm.call @free` 1069 struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> { 1070 using FIROpConversion::FIROpConversion; 1071 1072 mlir::LogicalResult 1073 matchAndRewrite(fir::FreeMemOp freemem, OpAdaptor adaptor, 1074 mlir::ConversionPatternRewriter &rewriter) const override { 1075 mlir::LLVM::LLVMFuncOp freeFunc = getFree(freemem, rewriter); 1076 mlir::Location loc = freemem.getLoc(); 1077 auto bitcast = rewriter.create<mlir::LLVM::BitcastOp>( 1078 freemem.getLoc(), voidPtrTy(), adaptor.getOperands()[0]); 1079 freemem->setAttr("callee", mlir::SymbolRefAttr::get(freeFunc)); 1080 rewriter.create<mlir::LLVM::CallOp>( 1081 loc, mlir::TypeRange{}, mlir::ValueRange{bitcast}, freemem->getAttrs()); 1082 rewriter.eraseOp(freemem); 1083 return mlir::success(); 1084 } 1085 }; 1086 } // namespace 1087 1088 /// Common base class for embox to descriptor conversion. 1089 template <typename OP> 1090 struct EmboxCommonConversion : public FIROpConversion<OP> { 1091 using FIROpConversion<OP>::FIROpConversion; 1092 1093 // Find the LLVMFuncOp in whose entry block the alloca should be inserted. 1094 // The order to find the LLVMFuncOp is as follows: 1095 // 1. The parent operation of the current block if it is a LLVMFuncOp. 1096 // 2. The first ancestor that is a LLVMFuncOp. 1097 mlir::LLVM::LLVMFuncOp 1098 getFuncForAllocaInsert(mlir::ConversionPatternRewriter &rewriter) const { 1099 mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp(); 1100 return mlir::isa<mlir::LLVM::LLVMFuncOp>(parentOp) 1101 ? mlir::cast<mlir::LLVM::LLVMFuncOp>(parentOp) 1102 : parentOp->getParentOfType<mlir::LLVM::LLVMFuncOp>(); 1103 } 1104 1105 // Generate an alloca of size 1 and type \p toTy. 1106 mlir::LLVM::AllocaOp 1107 genAllocaWithType(mlir::Location loc, mlir::Type toTy, unsigned alignment, 1108 mlir::ConversionPatternRewriter &rewriter) const { 1109 auto thisPt = rewriter.saveInsertionPoint(); 1110 mlir::LLVM::LLVMFuncOp func = getFuncForAllocaInsert(rewriter); 1111 rewriter.setInsertionPointToStart(&func.front()); 1112 auto size = this->genI32Constant(loc, rewriter, 1); 1113 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, toTy, size, alignment); 1114 rewriter.restoreInsertionPoint(thisPt); 1115 return al; 1116 } 1117 1118 static int getCFIAttr(fir::BoxType boxTy) { 1119 auto eleTy = boxTy.getEleTy(); 1120 if (eleTy.isa<fir::PointerType>()) 1121 return CFI_attribute_pointer; 1122 if (eleTy.isa<fir::HeapType>()) 1123 return CFI_attribute_allocatable; 1124 return CFI_attribute_other; 1125 } 1126 1127 static fir::RecordType unwrapIfDerived(fir::BoxType boxTy) { 1128 return fir::unwrapSequenceType(fir::dyn_cast_ptrOrBoxEleTy(boxTy)) 1129 .template dyn_cast<fir::RecordType>(); 1130 } 1131 static bool isDerivedTypeWithLenParams(fir::BoxType boxTy) { 1132 auto recTy = unwrapIfDerived(boxTy); 1133 return recTy && recTy.getNumLenParams() > 0; 1134 } 1135 static bool isDerivedType(fir::BoxType boxTy) { 1136 return static_cast<bool>(unwrapIfDerived(boxTy)); 1137 } 1138 1139 // Get the element size and CFI type code of the boxed value. 1140 std::tuple<mlir::Value, mlir::Value> getSizeAndTypeCode( 1141 mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 1142 mlir::Type boxEleTy, mlir::ValueRange lenParams = {}) const { 1143 auto doInteger = 1144 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1145 int typeCode = fir::integerBitsToTypeCode(width); 1146 return {this->genConstantOffset(loc, rewriter, width / 8), 1147 this->genConstantOffset(loc, rewriter, typeCode)}; 1148 }; 1149 auto doLogical = 1150 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1151 int typeCode = fir::logicalBitsToTypeCode(width); 1152 return {this->genConstantOffset(loc, rewriter, width / 8), 1153 this->genConstantOffset(loc, rewriter, typeCode)}; 1154 }; 1155 auto doFloat = [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1156 int typeCode = fir::realBitsToTypeCode(width); 1157 return {this->genConstantOffset(loc, rewriter, width / 8), 1158 this->genConstantOffset(loc, rewriter, typeCode)}; 1159 }; 1160 auto doComplex = 1161 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1162 auto typeCode = fir::complexBitsToTypeCode(width); 1163 return {this->genConstantOffset(loc, rewriter, width / 8 * 2), 1164 this->genConstantOffset(loc, rewriter, typeCode)}; 1165 }; 1166 auto doCharacter = 1167 [&](unsigned width, 1168 mlir::Value len) -> std::tuple<mlir::Value, mlir::Value> { 1169 auto typeCode = fir::characterBitsToTypeCode(width); 1170 auto typeCodeVal = this->genConstantOffset(loc, rewriter, typeCode); 1171 if (width == 8) 1172 return {len, typeCodeVal}; 1173 auto i64Ty = mlir::IntegerType::get(&this->lowerTy().getContext(), 64); 1174 auto byteWidth = genConstantIndex(loc, i64Ty, rewriter, width / 8); 1175 auto len64 = FIROpConversion<OP>::integerCast(loc, rewriter, i64Ty, len); 1176 auto size = 1177 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len64); 1178 return {size, typeCodeVal}; 1179 }; 1180 auto getKindMap = [&]() -> fir::KindMapping & { 1181 return this->lowerTy().getKindMap(); 1182 }; 1183 // Pointer-like types. 1184 if (auto eleTy = fir::dyn_cast_ptrEleTy(boxEleTy)) 1185 boxEleTy = eleTy; 1186 // Integer types. 1187 if (fir::isa_integer(boxEleTy)) { 1188 if (auto ty = boxEleTy.dyn_cast<mlir::IntegerType>()) 1189 return doInteger(ty.getWidth()); 1190 auto ty = boxEleTy.cast<fir::IntegerType>(); 1191 return doInteger(getKindMap().getIntegerBitsize(ty.getFKind())); 1192 } 1193 // Floating point types. 1194 if (fir::isa_real(boxEleTy)) { 1195 if (auto ty = boxEleTy.dyn_cast<mlir::FloatType>()) 1196 return doFloat(ty.getWidth()); 1197 auto ty = boxEleTy.cast<fir::RealType>(); 1198 return doFloat(getKindMap().getRealBitsize(ty.getFKind())); 1199 } 1200 // Complex types. 1201 if (fir::isa_complex(boxEleTy)) { 1202 if (auto ty = boxEleTy.dyn_cast<mlir::ComplexType>()) 1203 return doComplex( 1204 ty.getElementType().cast<mlir::FloatType>().getWidth()); 1205 auto ty = boxEleTy.cast<fir::ComplexType>(); 1206 return doComplex(getKindMap().getRealBitsize(ty.getFKind())); 1207 } 1208 // Character types. 1209 if (auto ty = boxEleTy.dyn_cast<fir::CharacterType>()) { 1210 auto charWidth = getKindMap().getCharacterBitsize(ty.getFKind()); 1211 if (ty.getLen() != fir::CharacterType::unknownLen()) { 1212 auto len = this->genConstantOffset(loc, rewriter, ty.getLen()); 1213 return doCharacter(charWidth, len); 1214 } 1215 assert(!lenParams.empty()); 1216 return doCharacter(charWidth, lenParams.back()); 1217 } 1218 // Logical type. 1219 if (auto ty = boxEleTy.dyn_cast<fir::LogicalType>()) 1220 return doLogical(getKindMap().getLogicalBitsize(ty.getFKind())); 1221 // Array types. 1222 if (auto seqTy = boxEleTy.dyn_cast<fir::SequenceType>()) 1223 return getSizeAndTypeCode(loc, rewriter, seqTy.getEleTy(), lenParams); 1224 // Derived-type types. 1225 if (boxEleTy.isa<fir::RecordType>()) { 1226 auto ptrTy = mlir::LLVM::LLVMPointerType::get( 1227 this->lowerTy().convertType(boxEleTy)); 1228 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 1229 auto one = 1230 genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1); 1231 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, 1232 mlir::ValueRange{one}); 1233 auto eleSize = rewriter.create<mlir::LLVM::PtrToIntOp>( 1234 loc, this->lowerTy().indexType(), gep); 1235 return {eleSize, 1236 this->genConstantOffset(loc, rewriter, fir::derivedToTypeCode())}; 1237 } 1238 // Reference type. 1239 if (fir::isa_ref_type(boxEleTy)) { 1240 // FIXME: use the target pointer size rather than sizeof(void*) 1241 return {this->genConstantOffset(loc, rewriter, sizeof(void *)), 1242 this->genConstantOffset(loc, rewriter, CFI_type_cptr)}; 1243 } 1244 fir::emitFatalError(loc, "unhandled type in fir.box code generation"); 1245 } 1246 1247 /// Basic pattern to write a field in the descriptor 1248 mlir::Value insertField(mlir::ConversionPatternRewriter &rewriter, 1249 mlir::Location loc, mlir::Value dest, 1250 llvm::ArrayRef<unsigned> fldIndexes, 1251 mlir::Value value, bool bitcast = false) const { 1252 auto boxTy = dest.getType(); 1253 auto fldTy = this->getBoxEleTy(boxTy, fldIndexes); 1254 if (bitcast) 1255 value = rewriter.create<mlir::LLVM::BitcastOp>(loc, fldTy, value); 1256 else 1257 value = this->integerCast(loc, rewriter, fldTy, value); 1258 llvm::SmallVector<mlir::Attribute, 2> attrs; 1259 for (auto i : fldIndexes) 1260 attrs.push_back(rewriter.getI32IntegerAttr(i)); 1261 auto indexesAttr = mlir::ArrayAttr::get(rewriter.getContext(), attrs); 1262 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, boxTy, dest, value, 1263 indexesAttr); 1264 } 1265 1266 inline mlir::Value 1267 insertBaseAddress(mlir::ConversionPatternRewriter &rewriter, 1268 mlir::Location loc, mlir::Value dest, 1269 mlir::Value base) const { 1270 return insertField(rewriter, loc, dest, {kAddrPosInBox}, base, 1271 /*bitCast=*/true); 1272 } 1273 1274 inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter, 1275 mlir::Location loc, mlir::Value dest, 1276 unsigned dim, mlir::Value lb) const { 1277 return insertField(rewriter, loc, dest, 1278 {kDimsPosInBox, dim, kDimLowerBoundPos}, lb); 1279 } 1280 1281 inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter, 1282 mlir::Location loc, mlir::Value dest, 1283 unsigned dim, mlir::Value extent) const { 1284 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos}, 1285 extent); 1286 } 1287 1288 inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter, 1289 mlir::Location loc, mlir::Value dest, 1290 unsigned dim, mlir::Value stride) const { 1291 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos}, 1292 stride); 1293 } 1294 1295 /// Get the address of the type descriptor global variable that was created by 1296 /// lowering for derived type \p recType. 1297 template <typename BOX> 1298 mlir::Value 1299 getTypeDescriptor(BOX box, mlir::ConversionPatternRewriter &rewriter, 1300 mlir::Location loc, fir::RecordType recType) const { 1301 std::string name = 1302 fir::NameUniquer::getTypeDescriptorName(recType.getName()); 1303 auto module = box->template getParentOfType<mlir::ModuleOp>(); 1304 if (auto global = module.template lookupSymbol<fir::GlobalOp>(name)) { 1305 auto ty = mlir::LLVM::LLVMPointerType::get( 1306 this->lowerTy().convertType(global.getType())); 1307 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1308 global.getSymName()); 1309 } 1310 if (auto global = 1311 module.template lookupSymbol<mlir::LLVM::GlobalOp>(name)) { 1312 // The global may have already been translated to LLVM. 1313 auto ty = mlir::LLVM::LLVMPointerType::get(global.getType()); 1314 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1315 global.getSymName()); 1316 } 1317 // Type info derived types do not have type descriptors since they are the 1318 // types defining type descriptors. 1319 if (!this->options.ignoreMissingTypeDescriptors && 1320 !fir::NameUniquer::belongsToModule( 1321 name, Fortran::semantics::typeInfoBuiltinModule)) 1322 fir::emitFatalError( 1323 loc, "runtime derived type info descriptor was not generated"); 1324 return rewriter.create<mlir::LLVM::NullOp>( 1325 loc, ::getVoidPtrType(box.getContext())); 1326 } 1327 1328 template <typename BOX> 1329 std::tuple<fir::BoxType, mlir::Value, mlir::Value> 1330 consDescriptorPrefix(BOX box, mlir::ConversionPatternRewriter &rewriter, 1331 unsigned rank, mlir::ValueRange lenParams) const { 1332 auto loc = box.getLoc(); 1333 auto boxTy = box.getType().template dyn_cast<fir::BoxType>(); 1334 auto convTy = this->lowerTy().convertBoxType(boxTy, rank); 1335 auto llvmBoxPtrTy = convTy.template cast<mlir::LLVM::LLVMPointerType>(); 1336 auto llvmBoxTy = llvmBoxPtrTy.getElementType(); 1337 mlir::Value descriptor = 1338 rewriter.create<mlir::LLVM::UndefOp>(loc, llvmBoxTy); 1339 1340 llvm::SmallVector<mlir::Value> typeparams = lenParams; 1341 if constexpr (!std::is_same_v<BOX, fir::EmboxOp>) { 1342 if (!box.substr().empty() && fir::hasDynamicSize(boxTy.getEleTy())) 1343 typeparams.push_back(box.substr()[1]); 1344 } 1345 1346 // Write each of the fields with the appropriate values 1347 auto [eleSize, cfiTy] = 1348 getSizeAndTypeCode(loc, rewriter, boxTy.getEleTy(), typeparams); 1349 descriptor = 1350 insertField(rewriter, loc, descriptor, {kElemLenPosInBox}, eleSize); 1351 descriptor = insertField(rewriter, loc, descriptor, {kVersionPosInBox}, 1352 this->genI32Constant(loc, rewriter, CFI_VERSION)); 1353 descriptor = insertField(rewriter, loc, descriptor, {kRankPosInBox}, 1354 this->genI32Constant(loc, rewriter, rank)); 1355 descriptor = insertField(rewriter, loc, descriptor, {kTypePosInBox}, cfiTy); 1356 descriptor = 1357 insertField(rewriter, loc, descriptor, {kAttributePosInBox}, 1358 this->genI32Constant(loc, rewriter, getCFIAttr(boxTy))); 1359 const bool hasAddendum = isDerivedType(boxTy); 1360 descriptor = 1361 insertField(rewriter, loc, descriptor, {kF18AddendumPosInBox}, 1362 this->genI32Constant(loc, rewriter, hasAddendum ? 1 : 0)); 1363 1364 if (hasAddendum) { 1365 auto isArray = 1366 fir::dyn_cast_ptrOrBoxEleTy(boxTy).template isa<fir::SequenceType>(); 1367 unsigned typeDescFieldId = isArray ? kOptTypePtrPosInBox : kDimsPosInBox; 1368 auto typeDesc = 1369 getTypeDescriptor(box, rewriter, loc, unwrapIfDerived(boxTy)); 1370 descriptor = 1371 insertField(rewriter, loc, descriptor, {typeDescFieldId}, typeDesc, 1372 /*bitCast=*/true); 1373 } 1374 1375 return {boxTy, descriptor, eleSize}; 1376 } 1377 1378 /// Compute the base address of a substring given the base address of a scalar 1379 /// string and the zero based string lower bound. 1380 mlir::Value shiftSubstringBase(mlir::ConversionPatternRewriter &rewriter, 1381 mlir::Location loc, mlir::Value base, 1382 mlir::Value lowerBound) const { 1383 llvm::SmallVector<mlir::Value> gepOperands; 1384 auto baseType = 1385 base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1386 if (baseType.isa<mlir::LLVM::LLVMArrayType>()) { 1387 auto idxTy = this->lowerTy().indexType(); 1388 gepOperands.push_back(genConstantIndex(loc, idxTy, rewriter, 0)); 1389 gepOperands.push_back(lowerBound); 1390 } else { 1391 gepOperands.push_back(lowerBound); 1392 } 1393 return this->genGEP(loc, base.getType(), rewriter, base, gepOperands); 1394 } 1395 1396 /// If the embox is not in a globalOp body, allocate storage for the box; 1397 /// store the value inside and return the generated alloca. Return the input 1398 /// value otherwise. 1399 mlir::Value 1400 placeInMemoryIfNotGlobalInit(mlir::ConversionPatternRewriter &rewriter, 1401 mlir::Location loc, mlir::Value boxValue) const { 1402 auto *thisBlock = rewriter.getInsertionBlock(); 1403 if (thisBlock && mlir::isa<mlir::LLVM::GlobalOp>(thisBlock->getParentOp())) 1404 return boxValue; 1405 auto boxPtrTy = mlir::LLVM::LLVMPointerType::get(boxValue.getType()); 1406 auto alloca = genAllocaWithType(loc, boxPtrTy, defaultAlign, rewriter); 1407 rewriter.create<mlir::LLVM::StoreOp>(loc, boxValue, alloca); 1408 return alloca; 1409 } 1410 }; 1411 1412 /// Compute the extent of a triplet slice (lb:ub:step). 1413 static mlir::Value 1414 computeTripletExtent(mlir::ConversionPatternRewriter &rewriter, 1415 mlir::Location loc, mlir::Value lb, mlir::Value ub, 1416 mlir::Value step, mlir::Value zero, mlir::Type type) { 1417 mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb); 1418 extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step); 1419 extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step); 1420 // If the resulting extent is negative (`ub-lb` and `step` have different 1421 // signs), zero must be returned instead. 1422 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1423 loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero); 1424 return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero); 1425 } 1426 1427 /// Create a generic box on a memory reference. This conversions lowers the 1428 /// abstract box to the appropriate, initialized descriptor. 1429 struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> { 1430 using EmboxCommonConversion::EmboxCommonConversion; 1431 1432 mlir::LogicalResult 1433 matchAndRewrite(fir::EmboxOp embox, OpAdaptor adaptor, 1434 mlir::ConversionPatternRewriter &rewriter) const override { 1435 assert(!embox.getShape() && "There should be no dims on this embox op"); 1436 auto [boxTy, dest, eleSize] = 1437 consDescriptorPrefix(embox, rewriter, /*rank=*/0, 1438 /*lenParams=*/adaptor.getOperands().drop_front(1)); 1439 dest = insertBaseAddress(rewriter, embox.getLoc(), dest, 1440 adaptor.getOperands()[0]); 1441 if (isDerivedTypeWithLenParams(boxTy)) { 1442 TODO(embox.getLoc(), 1443 "fir.embox codegen of derived with length parameters"); 1444 return mlir::failure(); 1445 } 1446 auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest); 1447 rewriter.replaceOp(embox, result); 1448 return mlir::success(); 1449 } 1450 }; 1451 1452 /// Create a generic box on a memory reference. 1453 struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> { 1454 using EmboxCommonConversion::EmboxCommonConversion; 1455 1456 mlir::LogicalResult 1457 matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor, 1458 mlir::ConversionPatternRewriter &rewriter) const override { 1459 auto [boxTy, dest, eleSize] = consDescriptorPrefix( 1460 xbox, rewriter, xbox.getOutRank(), 1461 adaptor.getOperands().drop_front(xbox.lenParamOffset())); 1462 // Generate the triples in the dims field of the descriptor 1463 mlir::ValueRange operands = adaptor.getOperands(); 1464 auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64); 1465 mlir::Value base = operands[0]; 1466 assert(!xbox.shape().empty() && "must have a shape"); 1467 unsigned shapeOffset = xbox.shapeOffset(); 1468 bool hasShift = !xbox.shift().empty(); 1469 unsigned shiftOffset = xbox.shiftOffset(); 1470 bool hasSlice = !xbox.slice().empty(); 1471 unsigned sliceOffset = xbox.sliceOffset(); 1472 mlir::Location loc = xbox.getLoc(); 1473 mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0); 1474 mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1); 1475 mlir::Value prevPtrOff = one; 1476 mlir::Type eleTy = boxTy.getEleTy(); 1477 const unsigned rank = xbox.getRank(); 1478 llvm::SmallVector<mlir::Value> gepArgs; 1479 unsigned constRows = 0; 1480 mlir::Value ptrOffset = zero; 1481 mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()); 1482 assert(memEleTy.isa<fir::SequenceType>()); 1483 auto seqTy = memEleTy.cast<fir::SequenceType>(); 1484 mlir::Type seqEleTy = seqTy.getEleTy(); 1485 // Adjust the element scaling factor if the element is a dependent type. 1486 if (fir::hasDynamicSize(seqEleTy)) { 1487 if (auto charTy = seqEleTy.dyn_cast<fir::CharacterType>()) { 1488 assert(xbox.lenParams().size() == 1); 1489 mlir::LLVM::ConstantOp charSize = genConstantIndex( 1490 loc, i64Ty, rewriter, lowerTy().characterBitsize(charTy) / 8); 1491 mlir::Value castedLen = 1492 integerCast(loc, rewriter, i64Ty, operands[xbox.lenParamOffset()]); 1493 auto byteOffset = 1494 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, charSize, castedLen); 1495 prevPtrOff = integerCast(loc, rewriter, i64Ty, byteOffset); 1496 } else if (seqEleTy.isa<fir::RecordType>()) { 1497 // prevPtrOff = ; 1498 TODO(loc, "generate call to calculate size of PDT"); 1499 } else { 1500 fir::emitFatalError(loc, "unexpected dynamic type"); 1501 } 1502 } else { 1503 constRows = seqTy.getConstantRows(); 1504 } 1505 1506 const auto hasSubcomp = !xbox.subcomponent().empty(); 1507 const bool hasSubstr = !xbox.substr().empty(); 1508 /// Compute initial element stride that will be use to compute the step in 1509 /// each dimension. 1510 mlir::Value prevDimByteStride = integerCast(loc, rewriter, i64Ty, eleSize); 1511 if (hasSubcomp) { 1512 // We have a subcomponent. The step value needs to be the number of 1513 // bytes per element (which is a derived type). 1514 auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy)); 1515 prevDimByteStride = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter); 1516 } else if (hasSubstr) { 1517 // We have a substring. The step value needs to be the number of bytes 1518 // per CHARACTER element. 1519 auto charTy = seqEleTy.cast<fir::CharacterType>(); 1520 if (fir::hasDynamicSize(charTy)) { 1521 prevDimByteStride = prevPtrOff; 1522 } else { 1523 prevDimByteStride = genConstantIndex( 1524 loc, i64Ty, rewriter, 1525 charTy.getLen() * lowerTy().characterBitsize(charTy) / 8); 1526 } 1527 } 1528 1529 // Process the array subspace arguments (shape, shift, etc.), if any, 1530 // translating everything to values in the descriptor wherever the entity 1531 // has a dynamic array dimension. 1532 for (unsigned di = 0, descIdx = 0; di < rank; ++di) { 1533 mlir::Value extent = operands[shapeOffset]; 1534 mlir::Value outerExtent = extent; 1535 bool skipNext = false; 1536 if (hasSlice) { 1537 mlir::Value off = operands[sliceOffset]; 1538 mlir::Value adj = one; 1539 if (hasShift) 1540 adj = operands[shiftOffset]; 1541 auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj); 1542 if (constRows > 0) { 1543 gepArgs.push_back(ao); 1544 } else { 1545 auto dimOff = 1546 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff); 1547 ptrOffset = 1548 rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset); 1549 } 1550 if (mlir::isa_and_nonnull<fir::UndefOp>( 1551 xbox.slice()[3 * di + 1].getDefiningOp())) { 1552 // This dimension contains a scalar expression in the array slice op. 1553 // The dimension is loop invariant, will be dropped, and will not 1554 // appear in the descriptor. 1555 skipNext = true; 1556 } 1557 } 1558 if (!skipNext) { 1559 // store extent 1560 if (hasSlice) 1561 extent = computeTripletExtent(rewriter, loc, operands[sliceOffset], 1562 operands[sliceOffset + 1], 1563 operands[sliceOffset + 2], zero, i64Ty); 1564 // Lower bound is normalized to 0 for BIND(C) interoperability. 1565 mlir::Value lb = zero; 1566 const bool isaPointerOrAllocatable = 1567 eleTy.isa<fir::PointerType>() || eleTy.isa<fir::HeapType>(); 1568 // Lower bound is defaults to 1 for POINTER, ALLOCATABLE, and 1569 // denormalized descriptors. 1570 if (isaPointerOrAllocatable || !normalizedLowerBound(xbox)) 1571 lb = one; 1572 // If there is a shifted origin, and no fir.slice, and this is not 1573 // a normalized descriptor then use the value from the shift op as 1574 // the lower bound. 1575 if (hasShift && !(hasSlice || hasSubcomp || hasSubstr) && 1576 (isaPointerOrAllocatable || !normalizedLowerBound(xbox))) { 1577 lb = operands[shiftOffset]; 1578 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1579 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1580 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, 1581 lb); 1582 } 1583 dest = insertLowerBound(rewriter, loc, dest, descIdx, lb); 1584 1585 dest = insertExtent(rewriter, loc, dest, descIdx, extent); 1586 1587 // store step (scaled by shaped extent) 1588 mlir::Value step = prevDimByteStride; 1589 if (hasSlice) 1590 step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step, 1591 operands[sliceOffset + 2]); 1592 dest = insertStride(rewriter, loc, dest, descIdx, step); 1593 ++descIdx; 1594 } 1595 1596 // compute the stride and offset for the next natural dimension 1597 prevDimByteStride = rewriter.create<mlir::LLVM::MulOp>( 1598 loc, i64Ty, prevDimByteStride, outerExtent); 1599 if (constRows == 0) 1600 prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff, 1601 outerExtent); 1602 else 1603 --constRows; 1604 1605 // increment iterators 1606 ++shapeOffset; 1607 if (hasShift) 1608 ++shiftOffset; 1609 if (hasSlice) 1610 sliceOffset += 3; 1611 } 1612 if (hasSlice || hasSubcomp || hasSubstr) { 1613 llvm::SmallVector<mlir::Value> args = {ptrOffset}; 1614 args.append(gepArgs.rbegin(), gepArgs.rend()); 1615 if (hasSubcomp) { 1616 // For each field in the path add the offset to base via the args list. 1617 // In the most general case, some offsets must be computed since 1618 // they are not be known until runtime. 1619 if (fir::hasDynamicSize(fir::unwrapSequenceType( 1620 fir::unwrapPassByRefType(xbox.memref().getType())))) 1621 TODO(loc, "fir.embox codegen dynamic size component in derived type"); 1622 args.append(operands.begin() + xbox.subcomponentOffset(), 1623 operands.begin() + xbox.subcomponentOffset() + 1624 xbox.subcomponent().size()); 1625 } 1626 base = 1627 rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), base, args); 1628 if (hasSubstr) 1629 base = shiftSubstringBase(rewriter, loc, base, 1630 operands[xbox.substrOffset()]); 1631 } 1632 dest = insertBaseAddress(rewriter, loc, dest, base); 1633 if (isDerivedTypeWithLenParams(boxTy)) 1634 TODO(loc, "fir.embox codegen of derived with length parameters"); 1635 1636 mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest); 1637 rewriter.replaceOp(xbox, result); 1638 return mlir::success(); 1639 } 1640 1641 /// Return true if `xbox` has a normalized lower bounds attribute. A box value 1642 /// that is neither a POINTER nor an ALLOCATABLE should be normalized to a 1643 /// zero origin lower bound for interoperability with BIND(C). 1644 inline static bool normalizedLowerBound(fir::cg::XEmboxOp xbox) { 1645 return xbox->hasAttr(fir::getNormalizedLowerBoundAttrName()); 1646 } 1647 }; 1648 1649 /// Create a new box given a box reference. 1650 struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> { 1651 using EmboxCommonConversion::EmboxCommonConversion; 1652 1653 mlir::LogicalResult 1654 matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor, 1655 mlir::ConversionPatternRewriter &rewriter) const override { 1656 mlir::Location loc = rebox.getLoc(); 1657 mlir::Type idxTy = lowerTy().indexType(); 1658 mlir::Value loweredBox = adaptor.getOperands()[0]; 1659 mlir::ValueRange operands = adaptor.getOperands(); 1660 1661 // Create new descriptor and fill its non-shape related data. 1662 llvm::SmallVector<mlir::Value, 2> lenParams; 1663 mlir::Type inputEleTy = getInputEleTy(rebox); 1664 if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) { 1665 mlir::Value len = 1666 loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter); 1667 if (charTy.getFKind() != 1) { 1668 mlir::Value width = 1669 genConstantIndex(loc, idxTy, rewriter, charTy.getFKind()); 1670 len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width); 1671 } 1672 lenParams.emplace_back(len); 1673 } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) { 1674 if (recTy.getNumLenParams() != 0) 1675 TODO(loc, "reboxing descriptor of derived type with length parameters"); 1676 } 1677 auto [boxTy, dest, eleSize] = 1678 consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams); 1679 1680 // Read input extents, strides, and base address 1681 llvm::SmallVector<mlir::Value> inputExtents; 1682 llvm::SmallVector<mlir::Value> inputStrides; 1683 const unsigned inputRank = rebox.getRank(); 1684 for (unsigned i = 0; i < inputRank; ++i) { 1685 mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i); 1686 llvm::SmallVector<mlir::Value, 3> dimInfo = 1687 getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter); 1688 inputExtents.emplace_back(dimInfo[1]); 1689 inputStrides.emplace_back(dimInfo[2]); 1690 } 1691 1692 mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType()); 1693 mlir::Value baseAddr = 1694 loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter); 1695 1696 if (!rebox.slice().empty() || !rebox.subcomponent().empty()) 1697 return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1698 operands, rewriter); 1699 return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1700 operands, rewriter); 1701 } 1702 1703 private: 1704 /// Write resulting shape and base address in descriptor, and replace rebox 1705 /// op. 1706 mlir::LogicalResult 1707 finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1708 mlir::ValueRange lbounds, mlir::ValueRange extents, 1709 mlir::ValueRange strides, 1710 mlir::ConversionPatternRewriter &rewriter) const { 1711 mlir::Location loc = rebox.getLoc(); 1712 mlir::Value zero = 1713 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 1714 mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1); 1715 for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) { 1716 mlir::Value extent = std::get<0>(iter.value()); 1717 unsigned dim = iter.index(); 1718 mlir::Value lb = one; 1719 if (!lbounds.empty()) { 1720 lb = lbounds[dim]; 1721 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1722 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1723 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, lb); 1724 }; 1725 dest = insertLowerBound(rewriter, loc, dest, dim, lb); 1726 dest = insertExtent(rewriter, loc, dest, dim, extent); 1727 dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value())); 1728 } 1729 dest = insertBaseAddress(rewriter, loc, dest, base); 1730 mlir::Value result = 1731 placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest); 1732 rewriter.replaceOp(rebox, result); 1733 return mlir::success(); 1734 } 1735 1736 // Apply slice given the base address, extents and strides of the input box. 1737 mlir::LogicalResult 1738 sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1739 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1740 mlir::ValueRange operands, 1741 mlir::ConversionPatternRewriter &rewriter) const { 1742 mlir::Location loc = rebox.getLoc(); 1743 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1744 mlir::Type idxTy = lowerTy().indexType(); 1745 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1746 // Apply subcomponent and substring shift on base address. 1747 if (!rebox.subcomponent().empty() || !rebox.substr().empty()) { 1748 // Cast to inputEleTy* so that a GEP can be used. 1749 mlir::Type inputEleTy = getInputEleTy(rebox); 1750 auto llvmElePtrTy = 1751 mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy)); 1752 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base); 1753 1754 if (!rebox.subcomponent().empty()) { 1755 llvm::SmallVector<mlir::Value> gepOperands = {zero}; 1756 for (unsigned i = 0; i < rebox.subcomponent().size(); ++i) 1757 gepOperands.push_back(operands[rebox.subcomponentOffset() + i]); 1758 base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands); 1759 } 1760 if (!rebox.substr().empty()) 1761 base = shiftSubstringBase(rewriter, loc, base, 1762 operands[rebox.substrOffset()]); 1763 } 1764 1765 if (rebox.slice().empty()) 1766 // The array section is of the form array[%component][substring], keep 1767 // the input array extents and strides. 1768 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1769 inputExtents, inputStrides, rewriter); 1770 1771 // Strides from the fir.box are in bytes. 1772 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1773 1774 // The slice is of the form array(i:j:k)[%component]. Compute new extents 1775 // and strides. 1776 llvm::SmallVector<mlir::Value> slicedExtents; 1777 llvm::SmallVector<mlir::Value> slicedStrides; 1778 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 1779 const bool sliceHasOrigins = !rebox.shift().empty(); 1780 unsigned sliceOps = rebox.sliceOffset(); 1781 unsigned shiftOps = rebox.shiftOffset(); 1782 auto strideOps = inputStrides.begin(); 1783 const unsigned inputRank = inputStrides.size(); 1784 for (unsigned i = 0; i < inputRank; 1785 ++i, ++strideOps, ++shiftOps, sliceOps += 3) { 1786 mlir::Value sliceLb = 1787 integerCast(loc, rewriter, idxTy, operands[sliceOps]); 1788 mlir::Value inputStride = *strideOps; // already idxTy 1789 // Apply origin shift: base += (lb-shift)*input_stride 1790 mlir::Value sliceOrigin = 1791 sliceHasOrigins 1792 ? integerCast(loc, rewriter, idxTy, operands[shiftOps]) 1793 : one; 1794 mlir::Value diff = 1795 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin); 1796 mlir::Value offset = 1797 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride); 1798 base = genGEP(loc, voidPtrTy, rewriter, base, offset); 1799 // Apply upper bound and step if this is a triplet. Otherwise, the 1800 // dimension is dropped and no extents/strides are computed. 1801 mlir::Value upper = operands[sliceOps + 1]; 1802 const bool isTripletSlice = 1803 !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp()); 1804 if (isTripletSlice) { 1805 mlir::Value step = 1806 integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]); 1807 // extent = ub-lb+step/step 1808 mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper); 1809 mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb, 1810 sliceUb, step, zero, idxTy); 1811 slicedExtents.emplace_back(extent); 1812 // stride = step*input_stride 1813 mlir::Value stride = 1814 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride); 1815 slicedStrides.emplace_back(stride); 1816 } 1817 } 1818 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1819 slicedExtents, slicedStrides, rewriter); 1820 } 1821 1822 /// Apply a new shape to the data described by a box given the base address, 1823 /// extents and strides of the box. 1824 mlir::LogicalResult 1825 reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1826 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1827 mlir::ValueRange operands, 1828 mlir::ConversionPatternRewriter &rewriter) const { 1829 mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(), 1830 operands.begin() + rebox.shiftOffset() + 1831 rebox.shift().size()}; 1832 if (rebox.shape().empty()) { 1833 // Only setting new lower bounds. 1834 return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents, 1835 inputStrides, rewriter); 1836 } 1837 1838 mlir::Location loc = rebox.getLoc(); 1839 // Strides from the fir.box are in bytes. 1840 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1841 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1842 1843 llvm::SmallVector<mlir::Value> newStrides; 1844 llvm::SmallVector<mlir::Value> newExtents; 1845 mlir::Type idxTy = lowerTy().indexType(); 1846 // First stride from input box is kept. The rest is assumed contiguous 1847 // (it is not possible to reshape otherwise). If the input is scalar, 1848 // which may be OK if all new extents are ones, the stride does not 1849 // matter, use one. 1850 mlir::Value stride = inputStrides.empty() 1851 ? genConstantIndex(loc, idxTy, rewriter, 1) 1852 : inputStrides[0]; 1853 for (unsigned i = 0; i < rebox.shape().size(); ++i) { 1854 mlir::Value rawExtent = operands[rebox.shapeOffset() + i]; 1855 mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent); 1856 newExtents.emplace_back(extent); 1857 newStrides.emplace_back(stride); 1858 // nextStride = extent * stride; 1859 stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride); 1860 } 1861 return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides, 1862 rewriter); 1863 } 1864 1865 /// Return scalar element type of the input box. 1866 static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) { 1867 auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType()); 1868 if (auto seqTy = ty.dyn_cast<fir::SequenceType>()) 1869 return seqTy.getEleTy(); 1870 return ty; 1871 } 1872 }; 1873 1874 /// Lower `fir.emboxproc` operation. Creates a procedure box. 1875 /// TODO: Part of supporting Fortran 2003 procedure pointers. 1876 struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> { 1877 using FIROpConversion::FIROpConversion; 1878 1879 mlir::LogicalResult 1880 matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor, 1881 mlir::ConversionPatternRewriter &rewriter) const override { 1882 TODO(emboxproc.getLoc(), "fir.emboxproc codegen"); 1883 return mlir::failure(); 1884 } 1885 }; 1886 1887 // Code shared between insert_value and extract_value Ops. 1888 struct ValueOpCommon { 1889 // Translate the arguments pertaining to any multidimensional array to 1890 // row-major order for LLVM-IR. 1891 static void toRowMajor(llvm::SmallVectorImpl<mlir::Attribute> &attrs, 1892 mlir::Type ty) { 1893 assert(ty && "type is null"); 1894 const auto end = attrs.size(); 1895 for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) { 1896 if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 1897 const auto dim = getDimension(seq); 1898 if (dim > 1) { 1899 auto ub = std::min(i + dim, end); 1900 std::reverse(attrs.begin() + i, attrs.begin() + ub); 1901 i += dim - 1; 1902 } 1903 ty = getArrayElementType(seq); 1904 } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) { 1905 ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()]; 1906 } else { 1907 llvm_unreachable("index into invalid type"); 1908 } 1909 } 1910 } 1911 1912 static llvm::SmallVector<mlir::Attribute> 1913 collectIndices(mlir::ConversionPatternRewriter &rewriter, 1914 mlir::ArrayAttr arrAttr) { 1915 llvm::SmallVector<mlir::Attribute> attrs; 1916 for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) { 1917 if (i->isa<mlir::IntegerAttr>()) { 1918 attrs.push_back(*i); 1919 } else { 1920 auto fieldName = i->cast<mlir::StringAttr>().getValue(); 1921 ++i; 1922 auto ty = i->cast<mlir::TypeAttr>().getValue(); 1923 auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName); 1924 attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index)); 1925 } 1926 } 1927 return attrs; 1928 } 1929 1930 private: 1931 static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) { 1932 unsigned result = 1; 1933 for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>(); 1934 eleTy; 1935 eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>()) 1936 ++result; 1937 return result; 1938 } 1939 1940 static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) { 1941 auto eleTy = ty.getElementType(); 1942 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 1943 eleTy = arrTy.getElementType(); 1944 return eleTy; 1945 } 1946 }; 1947 1948 namespace { 1949 /// Extract a subobject value from an ssa-value of aggregate type 1950 struct ExtractValueOpConversion 1951 : public FIROpAndTypeConversion<fir::ExtractValueOp>, 1952 public ValueOpCommon { 1953 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1954 1955 mlir::LogicalResult 1956 doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor, 1957 mlir::ConversionPatternRewriter &rewriter) const override { 1958 auto attrs = collectIndices(rewriter, extractVal.getCoor()); 1959 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 1960 auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs); 1961 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>( 1962 extractVal, ty, adaptor.getOperands()[0], position); 1963 return mlir::success(); 1964 } 1965 }; 1966 1967 /// InsertValue is the generalized instruction for the composition of new 1968 /// aggregate type values. 1969 struct InsertValueOpConversion 1970 : public FIROpAndTypeConversion<fir::InsertValueOp>, 1971 public ValueOpCommon { 1972 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1973 1974 mlir::LogicalResult 1975 doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor, 1976 mlir::ConversionPatternRewriter &rewriter) const override { 1977 auto attrs = collectIndices(rewriter, insertVal.getCoor()); 1978 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 1979 auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs); 1980 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 1981 insertVal, ty, adaptor.getOperands()[0], adaptor.getOperands()[1], 1982 position); 1983 return mlir::success(); 1984 } 1985 }; 1986 1987 /// InsertOnRange inserts a value into a sequence over a range of offsets. 1988 struct InsertOnRangeOpConversion 1989 : public FIROpAndTypeConversion<fir::InsertOnRangeOp> { 1990 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1991 1992 // Increments an array of subscripts in a row major fasion. 1993 void incrementSubscripts(const llvm::SmallVector<uint64_t> &dims, 1994 llvm::SmallVector<uint64_t> &subscripts) const { 1995 for (size_t i = dims.size(); i > 0; --i) { 1996 if (++subscripts[i - 1] < dims[i - 1]) { 1997 return; 1998 } 1999 subscripts[i - 1] = 0; 2000 } 2001 } 2002 2003 mlir::LogicalResult 2004 doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor, 2005 mlir::ConversionPatternRewriter &rewriter) const override { 2006 2007 llvm::SmallVector<uint64_t> dims; 2008 auto type = adaptor.getOperands()[0].getType(); 2009 2010 // Iteratively extract the array dimensions from the type. 2011 while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 2012 dims.push_back(t.getNumElements()); 2013 type = t.getElementType(); 2014 } 2015 2016 llvm::SmallVector<std::uint64_t> lBounds; 2017 llvm::SmallVector<std::uint64_t> uBounds; 2018 2019 // Unzip the upper and lower bound and convert to a row major format. 2020 mlir::DenseIntElementsAttr coor = range.getCoor(); 2021 auto reversedCoor = llvm::reverse(coor.getValues<int64_t>()); 2022 for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) { 2023 uBounds.push_back(*i++); 2024 lBounds.push_back(*i); 2025 } 2026 2027 auto &subscripts = lBounds; 2028 auto loc = range.getLoc(); 2029 mlir::Value lastOp = adaptor.getOperands()[0]; 2030 mlir::Value insertVal = adaptor.getOperands()[1]; 2031 2032 auto i64Ty = rewriter.getI64Type(); 2033 while (subscripts != uBounds) { 2034 // Convert uint64_t's to Attribute's. 2035 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2036 for (const auto &subscript : subscripts) 2037 subscriptAttrs.push_back(mlir::IntegerAttr::get(i64Ty, subscript)); 2038 lastOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2039 loc, ty, lastOp, insertVal, 2040 mlir::ArrayAttr::get(range.getContext(), subscriptAttrs)); 2041 2042 incrementSubscripts(dims, subscripts); 2043 } 2044 2045 // Convert uint64_t's to Attribute's. 2046 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2047 for (const auto &subscript : subscripts) 2048 subscriptAttrs.push_back( 2049 mlir::IntegerAttr::get(rewriter.getI64Type(), subscript)); 2050 mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs); 2051 2052 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2053 range, ty, lastOp, insertVal, 2054 mlir::ArrayAttr::get(range.getContext(), arrayRef)); 2055 2056 return mlir::success(); 2057 } 2058 }; 2059 } // namespace 2060 2061 namespace { 2062 /// XArrayCoor is the address arithmetic on a dynamically shaped, sliced, 2063 /// shifted etc. array. 2064 /// (See the static restriction on coordinate_of.) array_coor determines the 2065 /// coordinate (location) of a specific element. 2066 struct XArrayCoorOpConversion 2067 : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> { 2068 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2069 2070 mlir::LogicalResult 2071 doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor, 2072 mlir::ConversionPatternRewriter &rewriter) const override { 2073 auto loc = coor.getLoc(); 2074 mlir::ValueRange operands = adaptor.getOperands(); 2075 unsigned rank = coor.getRank(); 2076 assert(coor.indices().size() == rank); 2077 assert(coor.shape().empty() || coor.shape().size() == rank); 2078 assert(coor.shift().empty() || coor.shift().size() == rank); 2079 assert(coor.slice().empty() || coor.slice().size() == 3 * rank); 2080 mlir::Type idxTy = lowerTy().indexType(); 2081 unsigned indexOffset = coor.indicesOffset(); 2082 unsigned shapeOffset = coor.shapeOffset(); 2083 unsigned shiftOffset = coor.shiftOffset(); 2084 unsigned sliceOffset = coor.sliceOffset(); 2085 auto sliceOps = coor.slice().begin(); 2086 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2087 mlir::Value prevExt = one; 2088 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2089 mlir::Value offset = zero; 2090 const bool isShifted = !coor.shift().empty(); 2091 const bool isSliced = !coor.slice().empty(); 2092 const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>(); 2093 2094 // For each dimension of the array, generate the offset calculation. 2095 for (unsigned i = 0; i < rank; ++i, ++indexOffset, ++shapeOffset, 2096 ++shiftOffset, sliceOffset += 3, sliceOps += 3) { 2097 mlir::Value index = 2098 integerCast(loc, rewriter, idxTy, operands[indexOffset]); 2099 mlir::Value lb = 2100 isShifted ? integerCast(loc, rewriter, idxTy, operands[shiftOffset]) 2101 : one; 2102 mlir::Value step = one; 2103 bool normalSlice = isSliced; 2104 // Compute zero based index in dimension i of the element, applying 2105 // potential triplets and lower bounds. 2106 if (isSliced) { 2107 mlir::Value originalUb = *(sliceOps + 1); 2108 normalSlice = 2109 !mlir::isa_and_nonnull<fir::UndefOp>(originalUb.getDefiningOp()); 2110 if (normalSlice) 2111 step = integerCast(loc, rewriter, idxTy, operands[sliceOffset + 2]); 2112 } 2113 auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb); 2114 mlir::Value diff = 2115 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step); 2116 if (normalSlice) { 2117 mlir::Value sliceLb = 2118 integerCast(loc, rewriter, idxTy, operands[sliceOffset]); 2119 auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb); 2120 diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj); 2121 } 2122 // Update the offset given the stride and the zero based index `diff` 2123 // that was just computed. 2124 if (baseIsBoxed) { 2125 // Use stride in bytes from the descriptor. 2126 mlir::Value stride = 2127 loadStrideFromBox(loc, adaptor.getOperands()[0], i, rewriter); 2128 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride); 2129 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2130 } else { 2131 // Use stride computed at last iteration. 2132 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt); 2133 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2134 // Compute next stride assuming contiguity of the base array 2135 // (in element number). 2136 auto nextExt = integerCast(loc, rewriter, idxTy, operands[shapeOffset]); 2137 prevExt = 2138 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt); 2139 } 2140 } 2141 2142 // Add computed offset to the base address. 2143 if (baseIsBoxed) { 2144 // Working with byte offsets. The base address is read from the fir.box. 2145 // and need to be casted to i8* to do the pointer arithmetic. 2146 mlir::Type baseTy = 2147 getBaseAddrTypeFromBox(adaptor.getOperands()[0].getType()); 2148 mlir::Value base = 2149 loadBaseAddrFromBox(loc, baseTy, adaptor.getOperands()[0], rewriter); 2150 mlir::Type voidPtrTy = getVoidPtrType(); 2151 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2152 llvm::SmallVector<mlir::Value> args{offset}; 2153 auto addr = 2154 rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args); 2155 if (coor.subcomponent().empty()) { 2156 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, baseTy, addr); 2157 return mlir::success(); 2158 } 2159 auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr); 2160 args.clear(); 2161 args.push_back(zero); 2162 if (!coor.lenParams().empty()) { 2163 // If type parameters are present, then we don't want to use a GEPOp 2164 // as below, as the LLVM struct type cannot be statically defined. 2165 TODO(loc, "derived type with type parameters"); 2166 } 2167 // TODO: array offset subcomponents must be converted to LLVM's 2168 // row-major layout here. 2169 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2170 args.push_back(operands[i]); 2171 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, baseTy, casted, 2172 args); 2173 return mlir::success(); 2174 } 2175 2176 // The array was not boxed, so it must be contiguous. offset is therefore an 2177 // element offset and the base type is kept in the GEP unless the element 2178 // type size is itself dynamic. 2179 mlir::Value base; 2180 if (coor.subcomponent().empty()) { 2181 // No subcomponent. 2182 if (!coor.lenParams().empty()) { 2183 // Type parameters. Adjust element size explicitly. 2184 auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType()); 2185 assert(eleTy && "result must be a reference-like type"); 2186 if (fir::characterWithDynamicLen(eleTy)) { 2187 assert(coor.lenParams().size() == 1); 2188 auto bitsInChar = lowerTy().getKindMap().getCharacterBitsize( 2189 eleTy.cast<fir::CharacterType>().getFKind()); 2190 auto scaling = genConstantIndex(loc, idxTy, rewriter, bitsInChar / 8); 2191 auto scaledBySize = 2192 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, scaling); 2193 auto length = 2194 integerCast(loc, rewriter, idxTy, 2195 adaptor.getOperands()[coor.lenParamsOffset()]); 2196 offset = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, scaledBySize, 2197 length); 2198 } else { 2199 TODO(loc, "compute size of derived type with type parameters"); 2200 } 2201 } 2202 // Cast the base address to a pointer to T. 2203 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty, 2204 adaptor.getOperands()[0]); 2205 } else { 2206 // Operand #0 must have a pointer type. For subcomponent slicing, we 2207 // want to cast away the array type and have a plain struct type. 2208 mlir::Type ty0 = adaptor.getOperands()[0].getType(); 2209 auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 2210 assert(ptrTy && "expected pointer type"); 2211 mlir::Type eleTy = ptrTy.getElementType(); 2212 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2213 eleTy = arrTy.getElementType(); 2214 auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy); 2215 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy, 2216 adaptor.getOperands()[0]); 2217 } 2218 llvm::SmallVector<mlir::Value> args = {offset}; 2219 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2220 args.push_back(operands[i]); 2221 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args); 2222 return mlir::success(); 2223 } 2224 }; 2225 } // namespace 2226 2227 /// Convert to (memory) reference to a reference to a subobject. 2228 /// The coordinate_of op is a Swiss army knife operation that can be used on 2229 /// (memory) references to records, arrays, complex, etc. as well as boxes. 2230 /// With unboxed arrays, there is the restriction that the array have a static 2231 /// shape in all but the last column. 2232 struct CoordinateOpConversion 2233 : public FIROpAndTypeConversion<fir::CoordinateOp> { 2234 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2235 2236 mlir::LogicalResult 2237 doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor, 2238 mlir::ConversionPatternRewriter &rewriter) const override { 2239 mlir::ValueRange operands = adaptor.getOperands(); 2240 2241 mlir::Location loc = coor.getLoc(); 2242 mlir::Value base = operands[0]; 2243 mlir::Type baseObjectTy = coor.getBaseType(); 2244 mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2245 assert(objectTy && "fir.coordinate_of expects a reference type"); 2246 2247 // Complex type - basically, extract the real or imaginary part 2248 if (fir::isa_complex(objectTy)) { 2249 mlir::LLVM::ConstantOp c0 = 2250 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2251 llvm::SmallVector<mlir::Value> offs = {c0, operands[1]}; 2252 mlir::Value gep = genGEP(loc, ty, rewriter, base, offs); 2253 rewriter.replaceOp(coor, gep); 2254 return mlir::success(); 2255 } 2256 2257 // Boxed type - get the base pointer from the box 2258 if (baseObjectTy.dyn_cast<fir::BoxType>()) 2259 return doRewriteBox(coor, ty, operands, loc, rewriter); 2260 2261 // Reference, pointer or a heap type 2262 if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType, fir::HeapType>()) 2263 return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter); 2264 2265 return rewriter.notifyMatchFailure( 2266 coor, "fir.coordinate_of base operand has unsupported type"); 2267 } 2268 2269 static unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) { 2270 return fir::hasDynamicSize(ty) 2271 ? op.getDefiningOp() 2272 ->getAttrOfType<mlir::IntegerAttr>("field") 2273 .getInt() 2274 : getIntValue(op); 2275 } 2276 2277 static int64_t getIntValue(mlir::Value val) { 2278 assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value"); 2279 mlir::Operation *defop = val.getDefiningOp(); 2280 2281 if (auto constOp = mlir::dyn_cast<mlir::arith::ConstantIntOp>(defop)) 2282 return constOp.value(); 2283 if (auto llConstOp = mlir::dyn_cast<mlir::LLVM::ConstantOp>(defop)) 2284 if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>()) 2285 return attr.getValue().getSExtValue(); 2286 fir::emitFatalError(val.getLoc(), "must be a constant"); 2287 } 2288 2289 static bool hasSubDimensions(mlir::Type type) { 2290 return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>(); 2291 } 2292 2293 /// Check whether this form of `!fir.coordinate_of` is supported. These 2294 /// additional checks are required, because we are not yet able to convert 2295 /// all valid forms of `!fir.coordinate_of`. 2296 /// TODO: Either implement the unsupported cases or extend the verifier 2297 /// in FIROps.cpp instead. 2298 static bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) { 2299 const std::size_t numOfCoors = coors.size(); 2300 std::size_t i = 0; 2301 bool subEle = false; 2302 bool ptrEle = false; 2303 for (; i < numOfCoors; ++i) { 2304 mlir::Value nxtOpnd = coors[i]; 2305 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2306 subEle = true; 2307 i += arrTy.getDimension() - 1; 2308 type = arrTy.getEleTy(); 2309 } else if (auto recTy = type.dyn_cast<fir::RecordType>()) { 2310 subEle = true; 2311 type = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2312 } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) { 2313 subEle = true; 2314 type = tupTy.getType(getIntValue(nxtOpnd)); 2315 } else { 2316 ptrEle = true; 2317 } 2318 } 2319 if (ptrEle) 2320 return (!subEle) && (numOfCoors == 1); 2321 return subEle && (i >= numOfCoors); 2322 } 2323 2324 /// Walk the abstract memory layout and determine if the path traverses any 2325 /// array types with unknown shape. Return true iff all the array types have a 2326 /// constant shape along the path. 2327 static bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) { 2328 for (std::size_t i = 0, sz = coors.size(); i < sz; ++i) { 2329 mlir::Value nxtOpnd = coors[i]; 2330 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2331 if (fir::sequenceWithNonConstantShape(arrTy)) 2332 return false; 2333 i += arrTy.getDimension() - 1; 2334 type = arrTy.getEleTy(); 2335 } else if (auto strTy = type.dyn_cast<fir::RecordType>()) { 2336 type = strTy.getType(getFieldNumber(strTy, nxtOpnd)); 2337 } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) { 2338 type = strTy.getType(getIntValue(nxtOpnd)); 2339 } else { 2340 return true; 2341 } 2342 } 2343 return true; 2344 } 2345 2346 private: 2347 mlir::LogicalResult 2348 doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands, 2349 mlir::Location loc, 2350 mlir::ConversionPatternRewriter &rewriter) const { 2351 mlir::Type boxObjTy = coor.getBaseType(); 2352 assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`"); 2353 2354 mlir::Value boxBaseAddr = operands[0]; 2355 2356 // 1. SPECIAL CASE (uses `fir.len_param_index`): 2357 // %box = ... : !fir.box<!fir.type<derived{len1:i32}>> 2358 // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}> 2359 // %addr = coordinate_of %box, %lenp 2360 if (coor.getNumOperands() == 2) { 2361 mlir::Operation *coordinateDef = 2362 (*coor.getCoor().begin()).getDefiningOp(); 2363 if (mlir::isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef)) 2364 TODO(loc, 2365 "fir.coordinate_of - fir.len_param_index is not supported yet"); 2366 } 2367 2368 // 2. GENERAL CASE: 2369 // 2.1. (`fir.array`) 2370 // %box = ... : !fix.box<!fir.array<?xU>> 2371 // %idx = ... : index 2372 // %resultAddr = coordinate_of %box, %idx : !fir.ref<U> 2373 // 2.2 (`fir.derived`) 2374 // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>> 2375 // %idx = ... : i32 2376 // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32> 2377 // 2.3 (`fir.derived` inside `fir.array`) 2378 // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32, 2379 // field_2:f32}>>> %idx1 = ... : index %idx2 = ... : i32 %resultAddr = 2380 // coordinate_of %box, %idx1, %idx2 : !fir.ref<f32> 2381 // 2.4. TODO: Either document or disable any other case that the following 2382 // implementation might convert. 2383 mlir::LLVM::ConstantOp c0 = 2384 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2385 mlir::Value resultAddr = 2386 loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()), 2387 boxBaseAddr, rewriter); 2388 // Component Type 2389 auto cpnTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy); 2390 mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext()); 2391 2392 for (unsigned i = 1, last = operands.size(); i < last; ++i) { 2393 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2394 if (i != 1) 2395 TODO(loc, "fir.array nested inside other array and/or derived type"); 2396 // Applies byte strides from the box. Ignore lower bound from box 2397 // since fir.coordinate_of indexes are zero based. Lowering takes care 2398 // of lower bound aspects. This both accounts for dynamically sized 2399 // types and non contiguous arrays. 2400 auto idxTy = lowerTy().indexType(); 2401 mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0); 2402 for (unsigned index = i, lastIndex = i + arrTy.getDimension(); 2403 index < lastIndex; ++index) { 2404 mlir::Value stride = 2405 loadStrideFromBox(loc, operands[0], index - i, rewriter); 2406 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, 2407 operands[index], stride); 2408 off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off); 2409 } 2410 auto voidPtrBase = 2411 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr); 2412 llvm::SmallVector<mlir::Value> args = {off}; 2413 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, 2414 voidPtrBase, args); 2415 i += arrTy.getDimension() - 1; 2416 cpnTy = arrTy.getEleTy(); 2417 } else if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) { 2418 auto recRefTy = 2419 mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy)); 2420 mlir::Value nxtOpnd = operands[i]; 2421 auto memObj = 2422 rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr); 2423 llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd}; 2424 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2425 auto llvmCurrentObjTy = lowerTy().convertType(cpnTy); 2426 auto gep = rewriter.create<mlir::LLVM::GEPOp>( 2427 loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj, 2428 args); 2429 resultAddr = 2430 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep); 2431 } else { 2432 fir::emitFatalError(loc, "unexpected type in coordinate_of"); 2433 } 2434 } 2435 2436 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr); 2437 return mlir::success(); 2438 } 2439 2440 mlir::LogicalResult 2441 doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty, 2442 mlir::ValueRange operands, mlir::Location loc, 2443 mlir::ConversionPatternRewriter &rewriter) const { 2444 mlir::Type baseObjectTy = coor.getBaseType(); 2445 2446 // Component Type 2447 mlir::Type cpnTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2448 bool hasSubdimension = hasSubDimensions(cpnTy); 2449 bool columnIsDeferred = !hasSubdimension; 2450 2451 if (!supportedCoordinate(cpnTy, operands.drop_front(1))) 2452 TODO(loc, "unsupported combination of coordinate operands"); 2453 2454 const bool hasKnownShape = 2455 arraysHaveKnownShape(cpnTy, operands.drop_front(1)); 2456 2457 // If only the column is `?`, then we can simply place the column value in 2458 // the 0-th GEP position. 2459 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2460 if (!hasKnownShape) { 2461 const unsigned sz = arrTy.getDimension(); 2462 if (arraysHaveKnownShape(arrTy.getEleTy(), 2463 operands.drop_front(1 + sz))) { 2464 fir::SequenceType::ShapeRef shape = arrTy.getShape(); 2465 bool allConst = true; 2466 for (unsigned i = 0; i < sz - 1; ++i) { 2467 if (shape[i] < 0) { 2468 allConst = false; 2469 break; 2470 } 2471 } 2472 if (allConst) 2473 columnIsDeferred = true; 2474 } 2475 } 2476 } 2477 2478 if (fir::hasDynamicSize(fir::unwrapSequenceType(cpnTy))) 2479 return mlir::emitError( 2480 loc, "fir.coordinate_of with a dynamic element size is unsupported"); 2481 2482 if (hasKnownShape || columnIsDeferred) { 2483 llvm::SmallVector<mlir::Value> offs; 2484 if (hasKnownShape && hasSubdimension) { 2485 mlir::LLVM::ConstantOp c0 = 2486 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2487 offs.push_back(c0); 2488 } 2489 llvm::Optional<int> dims; 2490 llvm::SmallVector<mlir::Value> arrIdx; 2491 for (std::size_t i = 1, sz = operands.size(); i < sz; ++i) { 2492 mlir::Value nxtOpnd = operands[i]; 2493 2494 if (!cpnTy) 2495 return mlir::emitError(loc, "invalid coordinate/check failed"); 2496 2497 // check if the i-th coordinate relates to an array 2498 if (dims) { 2499 arrIdx.push_back(nxtOpnd); 2500 int dimsLeft = *dims; 2501 if (dimsLeft > 1) { 2502 dims = dimsLeft - 1; 2503 continue; 2504 } 2505 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2506 // append array range in reverse (FIR arrays are column-major) 2507 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2508 arrIdx.clear(); 2509 dims.reset(); 2510 continue; 2511 } 2512 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2513 int d = arrTy.getDimension() - 1; 2514 if (d > 0) { 2515 dims = d; 2516 arrIdx.push_back(nxtOpnd); 2517 continue; 2518 } 2519 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2520 offs.push_back(nxtOpnd); 2521 continue; 2522 } 2523 2524 // check if the i-th coordinate relates to a field 2525 if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) 2526 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2527 else if (auto tupTy = cpnTy.dyn_cast<mlir::TupleType>()) 2528 cpnTy = tupTy.getType(getIntValue(nxtOpnd)); 2529 else 2530 cpnTy = nullptr; 2531 2532 offs.push_back(nxtOpnd); 2533 } 2534 if (dims) 2535 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2536 mlir::Value base = operands[0]; 2537 mlir::Value retval = genGEP(loc, ty, rewriter, base, offs); 2538 rewriter.replaceOp(coor, retval); 2539 return mlir::success(); 2540 } 2541 2542 return mlir::emitError( 2543 loc, "fir.coordinate_of base operand has unsupported type"); 2544 } 2545 }; 2546 2547 /// Convert `fir.field_index`. The conversion depends on whether the size of 2548 /// the record is static or dynamic. 2549 struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> { 2550 using FIROpConversion::FIROpConversion; 2551 2552 // NB: most field references should be resolved by this point 2553 mlir::LogicalResult 2554 matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor, 2555 mlir::ConversionPatternRewriter &rewriter) const override { 2556 auto recTy = field.getOnType().cast<fir::RecordType>(); 2557 unsigned index = recTy.getFieldIndex(field.getFieldId()); 2558 2559 if (!fir::hasDynamicSize(recTy)) { 2560 // Derived type has compile-time constant layout. Return index of the 2561 // component type in the parent type (to be used in GEP). 2562 rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset( 2563 field.getLoc(), rewriter, index)}); 2564 return mlir::success(); 2565 } 2566 2567 // Derived type has compile-time constant layout. Call the compiler 2568 // generated function to determine the byte offset of the field at runtime. 2569 // This returns a non-constant. 2570 mlir::FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get( 2571 field.getContext(), getOffsetMethodName(recTy, field.getFieldId())); 2572 mlir::NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr); 2573 mlir::NamedAttribute fieldAttr = rewriter.getNamedAttr( 2574 "field", mlir::IntegerAttr::get(lowerTy().indexType(), index)); 2575 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 2576 field, lowerTy().offsetType(), adaptor.getOperands(), 2577 llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr}); 2578 return mlir::success(); 2579 } 2580 2581 // Re-Construct the name of the compiler generated method that calculates the 2582 // offset 2583 inline static std::string getOffsetMethodName(fir::RecordType recTy, 2584 llvm::StringRef field) { 2585 return recTy.getName().str() + "P." + field.str() + ".offset"; 2586 } 2587 }; 2588 2589 /// Convert `fir.end` 2590 struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> { 2591 using FIROpConversion::FIROpConversion; 2592 2593 mlir::LogicalResult 2594 matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor, 2595 mlir::ConversionPatternRewriter &rewriter) const override { 2596 TODO(firEnd.getLoc(), "fir.end codegen"); 2597 return mlir::failure(); 2598 } 2599 }; 2600 2601 /// Lower `fir.gentypedesc` to a global constant. 2602 struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> { 2603 using FIROpConversion::FIROpConversion; 2604 2605 mlir::LogicalResult 2606 matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor, 2607 mlir::ConversionPatternRewriter &rewriter) const override { 2608 TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen"); 2609 return mlir::failure(); 2610 } 2611 }; 2612 2613 /// Lower `fir.has_value` operation to `llvm.return` operation. 2614 struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> { 2615 using FIROpConversion::FIROpConversion; 2616 2617 mlir::LogicalResult 2618 matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor, 2619 mlir::ConversionPatternRewriter &rewriter) const override { 2620 rewriter.replaceOpWithNewOp<mlir::LLVM::ReturnOp>(op, 2621 adaptor.getOperands()); 2622 return mlir::success(); 2623 } 2624 }; 2625 2626 /// Lower `fir.global` operation to `llvm.global` operation. 2627 /// `fir.insert_on_range` operations are replaced with constant dense attribute 2628 /// if they are applied on the full range. 2629 struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> { 2630 using FIROpConversion::FIROpConversion; 2631 2632 mlir::LogicalResult 2633 matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor, 2634 mlir::ConversionPatternRewriter &rewriter) const override { 2635 auto tyAttr = convertType(global.getType()); 2636 if (global.getType().isa<fir::BoxType>()) 2637 tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType(); 2638 auto loc = global.getLoc(); 2639 mlir::Attribute initAttr; 2640 if (global.getInitVal()) 2641 initAttr = global.getInitVal().getValue(); 2642 auto linkage = convertLinkage(global.getLinkName()); 2643 auto isConst = global.getConstant().hasValue(); 2644 auto g = rewriter.create<mlir::LLVM::GlobalOp>( 2645 loc, tyAttr, isConst, linkage, global.getSymName(), initAttr); 2646 auto &gr = g.getInitializerRegion(); 2647 rewriter.inlineRegionBefore(global.getRegion(), gr, gr.end()); 2648 if (!gr.empty()) { 2649 // Replace insert_on_range with a constant dense attribute if the 2650 // initialization is on the full range. 2651 auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>(); 2652 for (auto insertOp : insertOnRangeOps) { 2653 if (isFullRange(insertOp.getCoor(), insertOp.getType())) { 2654 auto seqTyAttr = convertType(insertOp.getType()); 2655 auto *op = insertOp.getVal().getDefiningOp(); 2656 auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op); 2657 if (!constant) { 2658 auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op); 2659 if (!convertOp) 2660 continue; 2661 constant = mlir::cast<mlir::arith::ConstantOp>( 2662 convertOp.getValue().getDefiningOp()); 2663 } 2664 mlir::Type vecType = mlir::VectorType::get( 2665 insertOp.getType().getShape(), constant.getType()); 2666 auto denseAttr = mlir::DenseElementsAttr::get( 2667 vecType.cast<mlir::ShapedType>(), constant.getValue()); 2668 rewriter.setInsertionPointAfter(insertOp); 2669 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>( 2670 insertOp, seqTyAttr, denseAttr); 2671 } 2672 } 2673 } 2674 rewriter.eraseOp(global); 2675 return mlir::success(); 2676 } 2677 2678 bool isFullRange(mlir::DenseIntElementsAttr indexes, 2679 fir::SequenceType seqTy) const { 2680 auto extents = seqTy.getShape(); 2681 if (indexes.size() / 2 != static_cast<int64_t>(extents.size())) 2682 return false; 2683 auto cur_index = indexes.value_begin<int64_t>(); 2684 for (unsigned i = 0; i < indexes.size(); i += 2) { 2685 if (*(cur_index++) != 0) 2686 return false; 2687 if (*(cur_index++) != extents[i / 2] - 1) 2688 return false; 2689 } 2690 return true; 2691 } 2692 2693 // TODO: String comparaison should be avoided. Replace linkName with an 2694 // enumeration. 2695 mlir::LLVM::Linkage 2696 convertLinkage(llvm::Optional<llvm::StringRef> optLinkage) const { 2697 if (optLinkage.hasValue()) { 2698 auto name = optLinkage.getValue(); 2699 if (name == "internal") 2700 return mlir::LLVM::Linkage::Internal; 2701 if (name == "linkonce") 2702 return mlir::LLVM::Linkage::Linkonce; 2703 if (name == "linkonce_odr") 2704 return mlir::LLVM::Linkage::LinkonceODR; 2705 if (name == "common") 2706 return mlir::LLVM::Linkage::Common; 2707 if (name == "weak") 2708 return mlir::LLVM::Linkage::Weak; 2709 } 2710 return mlir::LLVM::Linkage::External; 2711 } 2712 }; 2713 2714 /// `fir.load` --> `llvm.load` 2715 struct LoadOpConversion : public FIROpConversion<fir::LoadOp> { 2716 using FIROpConversion::FIROpConversion; 2717 2718 mlir::LogicalResult 2719 matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor, 2720 mlir::ConversionPatternRewriter &rewriter) const override { 2721 // fir.box is a special case because it is considered as an ssa values in 2722 // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box> 2723 // and fir.box end up being the same llvm types and loading a 2724 // fir.ref<fir.box> is actually a no op in LLVM. 2725 if (load.getType().isa<fir::BoxType>()) { 2726 rewriter.replaceOp(load, adaptor.getOperands()[0]); 2727 } else { 2728 rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>( 2729 load, convertType(load.getType()), adaptor.getOperands(), 2730 load->getAttrs()); 2731 } 2732 return mlir::success(); 2733 } 2734 }; 2735 2736 /// Lower `fir.no_reassoc` to LLVM IR dialect. 2737 /// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast 2738 /// math flags? 2739 struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> { 2740 using FIROpConversion::FIROpConversion; 2741 2742 mlir::LogicalResult 2743 matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor, 2744 mlir::ConversionPatternRewriter &rewriter) const override { 2745 rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]); 2746 return mlir::success(); 2747 } 2748 }; 2749 2750 static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest, 2751 llvm::Optional<mlir::ValueRange> destOps, 2752 mlir::ConversionPatternRewriter &rewriter, 2753 mlir::Block *newBlock) { 2754 if (destOps.hasValue()) 2755 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, destOps.getValue(), 2756 newBlock, mlir::ValueRange()); 2757 else 2758 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock); 2759 } 2760 2761 template <typename A, typename B> 2762 static void genBrOp(A caseOp, mlir::Block *dest, llvm::Optional<B> destOps, 2763 mlir::ConversionPatternRewriter &rewriter) { 2764 if (destOps.hasValue()) 2765 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, destOps.getValue(), 2766 dest); 2767 else 2768 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest); 2769 } 2770 2771 static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp, 2772 mlir::Block *dest, 2773 llvm::Optional<mlir::ValueRange> destOps, 2774 mlir::ConversionPatternRewriter &rewriter) { 2775 auto *thisBlock = rewriter.getInsertionBlock(); 2776 auto *newBlock = createBlock(rewriter, dest); 2777 rewriter.setInsertionPointToEnd(thisBlock); 2778 genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock); 2779 rewriter.setInsertionPointToEnd(newBlock); 2780 } 2781 2782 /// Conversion of `fir.select_case` 2783 /// 2784 /// The `fir.select_case` operation is converted to a if-then-else ladder. 2785 /// Depending on the case condition type, one or several comparison and 2786 /// conditional branching can be generated. 2787 /// 2788 /// A a point value case such as `case(4)`, a lower bound case such as 2789 /// `case(5:)` or an upper bound case such as `case(:3)` are converted to a 2790 /// simple comparison between the selector value and the constant value in the 2791 /// case. The block associated with the case condition is then executed if 2792 /// the comparison succeed otherwise it branch to the next block with the 2793 /// comparison for the the next case conditon. 2794 /// 2795 /// A closed interval case condition such as `case(7:10)` is converted with a 2796 /// first comparison and conditional branching for the lower bound. If 2797 /// successful, it branch to a second block with the comparison for the 2798 /// upper bound in the same case condition. 2799 /// 2800 /// TODO: lowering of CHARACTER type cases is not handled yet. 2801 struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> { 2802 using FIROpConversion::FIROpConversion; 2803 2804 mlir::LogicalResult 2805 matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor, 2806 mlir::ConversionPatternRewriter &rewriter) const override { 2807 unsigned conds = caseOp.getNumConditions(); 2808 llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue(); 2809 // Type can be CHARACTER, INTEGER, or LOGICAL (C1145) 2810 auto ty = caseOp.getSelector().getType(); 2811 if (ty.isa<fir::CharacterType>()) { 2812 TODO(caseOp.getLoc(), "fir.select_case codegen with character type"); 2813 return mlir::failure(); 2814 } 2815 mlir::Value selector = caseOp.getSelector(adaptor.getOperands()); 2816 auto loc = caseOp.getLoc(); 2817 for (unsigned t = 0; t != conds; ++t) { 2818 mlir::Block *dest = caseOp.getSuccessor(t); 2819 llvm::Optional<mlir::ValueRange> destOps = 2820 caseOp.getSuccessorOperands(adaptor.getOperands(), t); 2821 llvm::Optional<mlir::ValueRange> cmpOps = 2822 *caseOp.getCompareOperands(adaptor.getOperands(), t); 2823 mlir::Value caseArg = *(cmpOps.value().begin()); 2824 mlir::Attribute attr = cases[t]; 2825 if (attr.isa<fir::PointIntervalAttr>()) { 2826 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2827 loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg); 2828 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2829 continue; 2830 } 2831 if (attr.isa<fir::LowerBoundAttr>()) { 2832 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2833 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2834 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2835 continue; 2836 } 2837 if (attr.isa<fir::UpperBoundAttr>()) { 2838 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2839 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg); 2840 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2841 continue; 2842 } 2843 if (attr.isa<fir::ClosedIntervalAttr>()) { 2844 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2845 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2846 auto *thisBlock = rewriter.getInsertionBlock(); 2847 auto *newBlock1 = createBlock(rewriter, dest); 2848 auto *newBlock2 = createBlock(rewriter, dest); 2849 rewriter.setInsertionPointToEnd(thisBlock); 2850 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2); 2851 rewriter.setInsertionPointToEnd(newBlock1); 2852 mlir::Value caseArg0 = *(cmpOps.value().begin() + 1); 2853 auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>( 2854 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0); 2855 genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2); 2856 rewriter.setInsertionPointToEnd(newBlock2); 2857 continue; 2858 } 2859 assert(attr.isa<mlir::UnitAttr>()); 2860 assert((t + 1 == conds) && "unit must be last"); 2861 genBrOp(caseOp, dest, destOps, rewriter); 2862 } 2863 return mlir::success(); 2864 } 2865 }; 2866 2867 template <typename OP> 2868 static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select, 2869 typename OP::Adaptor adaptor, 2870 mlir::ConversionPatternRewriter &rewriter) { 2871 unsigned conds = select.getNumConditions(); 2872 auto cases = select.getCases().getValue(); 2873 mlir::Value selector = adaptor.getSelector(); 2874 auto loc = select.getLoc(); 2875 assert(conds > 0 && "select must have cases"); 2876 2877 llvm::SmallVector<mlir::Block *> destinations; 2878 llvm::SmallVector<mlir::ValueRange> destinationsOperands; 2879 mlir::Block *defaultDestination; 2880 mlir::ValueRange defaultOperands; 2881 llvm::SmallVector<int32_t> caseValues; 2882 2883 for (unsigned t = 0; t != conds; ++t) { 2884 mlir::Block *dest = select.getSuccessor(t); 2885 auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t); 2886 const mlir::Attribute &attr = cases[t]; 2887 if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) { 2888 destinations.push_back(dest); 2889 destinationsOperands.push_back(destOps.hasValue() ? *destOps 2890 : mlir::ValueRange{}); 2891 caseValues.push_back(intAttr.getInt()); 2892 continue; 2893 } 2894 assert(attr.template dyn_cast_or_null<mlir::UnitAttr>()); 2895 assert((t + 1 == conds) && "unit must be last"); 2896 defaultDestination = dest; 2897 defaultOperands = destOps.hasValue() ? *destOps : mlir::ValueRange{}; 2898 } 2899 2900 // LLVM::SwitchOp takes a i32 type for the selector. 2901 if (select.getSelector().getType() != rewriter.getI32Type()) 2902 selector = rewriter.create<mlir::LLVM::TruncOp>(loc, rewriter.getI32Type(), 2903 selector); 2904 2905 rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>( 2906 select, selector, 2907 /*defaultDestination=*/defaultDestination, 2908 /*defaultOperands=*/defaultOperands, 2909 /*caseValues=*/caseValues, 2910 /*caseDestinations=*/destinations, 2911 /*caseOperands=*/destinationsOperands, 2912 /*branchWeights=*/llvm::ArrayRef<std::int32_t>()); 2913 } 2914 2915 /// conversion of fir::SelectOp to an if-then-else ladder 2916 struct SelectOpConversion : public FIROpConversion<fir::SelectOp> { 2917 using FIROpConversion::FIROpConversion; 2918 2919 mlir::LogicalResult 2920 matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor, 2921 mlir::ConversionPatternRewriter &rewriter) const override { 2922 selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter); 2923 return mlir::success(); 2924 } 2925 }; 2926 2927 /// conversion of fir::SelectRankOp to an if-then-else ladder 2928 struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> { 2929 using FIROpConversion::FIROpConversion; 2930 2931 mlir::LogicalResult 2932 matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor, 2933 mlir::ConversionPatternRewriter &rewriter) const override { 2934 selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter); 2935 return mlir::success(); 2936 } 2937 }; 2938 2939 /// Lower `fir.select_type` to LLVM IR dialect. 2940 struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> { 2941 using FIROpConversion::FIROpConversion; 2942 2943 mlir::LogicalResult 2944 matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor, 2945 mlir::ConversionPatternRewriter &rewriter) const override { 2946 mlir::emitError(select.getLoc(), 2947 "fir.select_type should have already been converted"); 2948 return mlir::failure(); 2949 } 2950 }; 2951 2952 /// `fir.store` --> `llvm.store` 2953 struct StoreOpConversion : public FIROpConversion<fir::StoreOp> { 2954 using FIROpConversion::FIROpConversion; 2955 2956 mlir::LogicalResult 2957 matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor, 2958 mlir::ConversionPatternRewriter &rewriter) const override { 2959 if (store.getValue().getType().isa<fir::BoxType>()) { 2960 // fir.box value is actually in memory, load it first before storing it. 2961 mlir::Location loc = store.getLoc(); 2962 mlir::Type boxPtrTy = adaptor.getOperands()[0].getType(); 2963 auto val = rewriter.create<mlir::LLVM::LoadOp>( 2964 loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(), 2965 adaptor.getOperands()[0]); 2966 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2967 store, val, adaptor.getOperands()[1]); 2968 } else { 2969 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2970 store, adaptor.getOperands()[0], adaptor.getOperands()[1]); 2971 } 2972 return mlir::success(); 2973 } 2974 }; 2975 2976 namespace { 2977 2978 /// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for 2979 /// the character buffer and one for the buffer length. 2980 struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> { 2981 using FIROpConversion::FIROpConversion; 2982 2983 mlir::LogicalResult 2984 matchAndRewrite(fir::UnboxCharOp unboxchar, OpAdaptor adaptor, 2985 mlir::ConversionPatternRewriter &rewriter) const override { 2986 auto *ctx = unboxchar.getContext(); 2987 2988 mlir::Type lenTy = convertType(unboxchar.getType(1)); 2989 mlir::Value tuple = adaptor.getOperands()[0]; 2990 mlir::Type tupleTy = tuple.getType(); 2991 2992 mlir::Location loc = unboxchar.getLoc(); 2993 mlir::Value ptrToBuffer = 2994 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 0); 2995 2996 mlir::LLVM::ExtractValueOp len = 2997 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 1); 2998 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, len); 2999 3000 rewriter.replaceOp(unboxchar, 3001 llvm::ArrayRef<mlir::Value>{ptrToBuffer, lenAfterCast}); 3002 return mlir::success(); 3003 } 3004 }; 3005 3006 /// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its 3007 /// components. 3008 /// TODO: Part of supporting Fortran 2003 procedure pointers. 3009 struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> { 3010 using FIROpConversion::FIROpConversion; 3011 3012 mlir::LogicalResult 3013 matchAndRewrite(fir::UnboxProcOp unboxproc, OpAdaptor adaptor, 3014 mlir::ConversionPatternRewriter &rewriter) const override { 3015 TODO(unboxproc.getLoc(), "fir.unboxproc codegen"); 3016 return mlir::failure(); 3017 } 3018 }; 3019 3020 /// convert to LLVM IR dialect `undef` 3021 struct UndefOpConversion : public FIROpConversion<fir::UndefOp> { 3022 using FIROpConversion::FIROpConversion; 3023 3024 mlir::LogicalResult 3025 matchAndRewrite(fir::UndefOp undef, OpAdaptor, 3026 mlir::ConversionPatternRewriter &rewriter) const override { 3027 rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>( 3028 undef, convertType(undef.getType())); 3029 return mlir::success(); 3030 } 3031 }; 3032 3033 struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> { 3034 using FIROpConversion::FIROpConversion; 3035 3036 mlir::LogicalResult 3037 matchAndRewrite(fir::ZeroOp zero, OpAdaptor, 3038 mlir::ConversionPatternRewriter &rewriter) const override { 3039 mlir::Type ty = convertType(zero.getType()); 3040 if (ty.isa<mlir::LLVM::LLVMPointerType>()) { 3041 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty); 3042 } else if (ty.isa<mlir::IntegerType>()) { 3043 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3044 zero, ty, mlir::IntegerAttr::get(zero.getType(), 0)); 3045 } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) { 3046 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3047 zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0)); 3048 } else { 3049 // TODO: create ConstantAggregateZero for FIR aggregate/array types. 3050 return rewriter.notifyMatchFailure( 3051 zero, 3052 "conversion of fir.zero with aggregate type not implemented yet"); 3053 } 3054 return mlir::success(); 3055 } 3056 }; 3057 3058 /// `fir.unreachable` --> `llvm.unreachable` 3059 struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> { 3060 using FIROpConversion::FIROpConversion; 3061 3062 mlir::LogicalResult 3063 matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor, 3064 mlir::ConversionPatternRewriter &rewriter) const override { 3065 rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach); 3066 return mlir::success(); 3067 } 3068 }; 3069 3070 /// `fir.is_present` --> 3071 /// ``` 3072 /// %0 = llvm.mlir.constant(0 : i64) 3073 /// %1 = llvm.ptrtoint %0 3074 /// %2 = llvm.icmp "ne" %1, %0 : i64 3075 /// ``` 3076 struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> { 3077 using FIROpConversion::FIROpConversion; 3078 3079 mlir::LogicalResult 3080 matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor, 3081 mlir::ConversionPatternRewriter &rewriter) const override { 3082 mlir::Type idxTy = lowerTy().indexType(); 3083 mlir::Location loc = isPresent.getLoc(); 3084 auto ptr = adaptor.getOperands()[0]; 3085 3086 if (isPresent.getVal().getType().isa<fir::BoxCharType>()) { 3087 auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>(); 3088 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3089 3090 mlir::Type ty = structTy.getBody()[0]; 3091 mlir::MLIRContext *ctx = isPresent.getContext(); 3092 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3093 ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0); 3094 } 3095 mlir::LLVM::ConstantOp c0 = 3096 genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0); 3097 auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr); 3098 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 3099 isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0); 3100 3101 return mlir::success(); 3102 } 3103 }; 3104 3105 /// Create value signaling an absent optional argument in a call, e.g. 3106 /// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>` 3107 struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> { 3108 using FIROpConversion::FIROpConversion; 3109 3110 mlir::LogicalResult 3111 matchAndRewrite(fir::AbsentOp absent, OpAdaptor, 3112 mlir::ConversionPatternRewriter &rewriter) const override { 3113 mlir::Type ty = convertType(absent.getType()); 3114 mlir::Location loc = absent.getLoc(); 3115 3116 if (absent.getType().isa<fir::BoxCharType>()) { 3117 auto structTy = ty.cast<mlir::LLVM::LLVMStructType>(); 3118 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3119 auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3120 auto nullField = 3121 rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]); 3122 mlir::MLIRContext *ctx = absent.getContext(); 3123 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3124 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 3125 absent, ty, undefStruct, nullField, c0); 3126 } else { 3127 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty); 3128 } 3129 return mlir::success(); 3130 } 3131 }; 3132 3133 // 3134 // Primitive operations on Complex types 3135 // 3136 3137 /// Generate inline code for complex addition/subtraction 3138 template <typename LLVMOP, typename OPTY> 3139 static mlir::LLVM::InsertValueOp 3140 complexSum(OPTY sumop, mlir::ValueRange opnds, 3141 mlir::ConversionPatternRewriter &rewriter, 3142 fir::LLVMTypeConverter &lowering) { 3143 mlir::Value a = opnds[0]; 3144 mlir::Value b = opnds[1]; 3145 auto loc = sumop.getLoc(); 3146 auto ctx = sumop.getContext(); 3147 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3148 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3149 mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType())); 3150 mlir::Type ty = lowering.convertType(sumop.getType()); 3151 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3152 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3153 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3154 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3155 auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1); 3156 auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1); 3157 auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3158 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0); 3159 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1); 3160 } 3161 } // namespace 3162 3163 namespace { 3164 struct AddcOpConversion : public FIROpConversion<fir::AddcOp> { 3165 using FIROpConversion::FIROpConversion; 3166 3167 mlir::LogicalResult 3168 matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor, 3169 mlir::ConversionPatternRewriter &rewriter) const override { 3170 // given: (x + iy) + (x' + iy') 3171 // result: (x + x') + i(y + y') 3172 auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(), 3173 rewriter, lowerTy()); 3174 rewriter.replaceOp(addc, r.getResult()); 3175 return mlir::success(); 3176 } 3177 }; 3178 3179 struct SubcOpConversion : public FIROpConversion<fir::SubcOp> { 3180 using FIROpConversion::FIROpConversion; 3181 3182 mlir::LogicalResult 3183 matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor, 3184 mlir::ConversionPatternRewriter &rewriter) const override { 3185 // given: (x + iy) - (x' + iy') 3186 // result: (x - x') + i(y - y') 3187 auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(), 3188 rewriter, lowerTy()); 3189 rewriter.replaceOp(subc, r.getResult()); 3190 return mlir::success(); 3191 } 3192 }; 3193 3194 /// Inlined complex multiply 3195 struct MulcOpConversion : public FIROpConversion<fir::MulcOp> { 3196 using FIROpConversion::FIROpConversion; 3197 3198 mlir::LogicalResult 3199 matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor, 3200 mlir::ConversionPatternRewriter &rewriter) const override { 3201 // TODO: Can we use a call to __muldc3 ? 3202 // given: (x + iy) * (x' + iy') 3203 // result: (xx'-yy')+i(xy'+yx') 3204 mlir::Value a = adaptor.getOperands()[0]; 3205 mlir::Value b = adaptor.getOperands()[1]; 3206 auto loc = mulc.getLoc(); 3207 auto *ctx = mulc.getContext(); 3208 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3209 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3210 mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType())); 3211 mlir::Type ty = convertType(mulc.getType()); 3212 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3213 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3214 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3215 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3216 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3217 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3218 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3219 auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx); 3220 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3221 auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy); 3222 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3223 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3224 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3225 rewriter.replaceOp(mulc, r0.getResult()); 3226 return mlir::success(); 3227 } 3228 }; 3229 3230 /// Inlined complex division 3231 struct DivcOpConversion : public FIROpConversion<fir::DivcOp> { 3232 using FIROpConversion::FIROpConversion; 3233 3234 mlir::LogicalResult 3235 matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor, 3236 mlir::ConversionPatternRewriter &rewriter) const override { 3237 // TODO: Can we use a call to __divdc3 instead? 3238 // Just generate inline code for now. 3239 // given: (x + iy) / (x' + iy') 3240 // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y' 3241 mlir::Value a = adaptor.getOperands()[0]; 3242 mlir::Value b = adaptor.getOperands()[1]; 3243 auto loc = divc.getLoc(); 3244 auto *ctx = divc.getContext(); 3245 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3246 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3247 mlir::Type eleTy = convertType(getComplexEleTy(divc.getType())); 3248 mlir::Type ty = convertType(divc.getType()); 3249 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3250 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3251 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3252 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3253 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3254 auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1); 3255 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3256 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3257 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3258 auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1); 3259 auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1); 3260 auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy); 3261 auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy); 3262 auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d); 3263 auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d); 3264 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3265 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3266 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3267 rewriter.replaceOp(divc, r0.getResult()); 3268 return mlir::success(); 3269 } 3270 }; 3271 3272 /// Inlined complex negation 3273 struct NegcOpConversion : public FIROpConversion<fir::NegcOp> { 3274 using FIROpConversion::FIROpConversion; 3275 3276 mlir::LogicalResult 3277 matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor, 3278 mlir::ConversionPatternRewriter &rewriter) const override { 3279 // given: -(x + iy) 3280 // result: -x - iy 3281 auto *ctxt = neg.getContext(); 3282 auto eleTy = convertType(getComplexEleTy(neg.getType())); 3283 auto ty = convertType(neg.getType()); 3284 auto loc = neg.getLoc(); 3285 mlir::Value o0 = adaptor.getOperands()[0]; 3286 auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 3287 auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 3288 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0); 3289 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1); 3290 auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp); 3291 auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip); 3292 auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0); 3293 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1); 3294 return mlir::success(); 3295 } 3296 }; 3297 3298 /// Conversion pattern for operation that must be dead. The information in these 3299 /// operations is used by other operation. At this point they should not have 3300 /// anymore uses. 3301 /// These operations are normally dead after the pre-codegen pass. 3302 template <typename FromOp> 3303 struct MustBeDeadConversion : public FIROpConversion<FromOp> { 3304 explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering, 3305 const fir::FIRToLLVMPassOptions &options) 3306 : FIROpConversion<FromOp>(lowering, options) {} 3307 using OpAdaptor = typename FromOp::Adaptor; 3308 3309 mlir::LogicalResult 3310 matchAndRewrite(FromOp op, OpAdaptor adaptor, 3311 mlir::ConversionPatternRewriter &rewriter) const final { 3312 if (!op->getUses().empty()) 3313 return rewriter.notifyMatchFailure(op, "op must be dead"); 3314 rewriter.eraseOp(op); 3315 return mlir::success(); 3316 } 3317 }; 3318 3319 struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> { 3320 using MustBeDeadConversion::MustBeDeadConversion; 3321 }; 3322 3323 struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> { 3324 using MustBeDeadConversion::MustBeDeadConversion; 3325 }; 3326 3327 struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> { 3328 using MustBeDeadConversion::MustBeDeadConversion; 3329 }; 3330 3331 struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> { 3332 using MustBeDeadConversion::MustBeDeadConversion; 3333 }; 3334 3335 } // namespace 3336 3337 namespace { 3338 /// Convert FIR dialect to LLVM dialect 3339 /// 3340 /// This pass lowers all FIR dialect operations to LLVM IR dialect. An 3341 /// MLIR pass is used to lower residual Std dialect to LLVM IR dialect. 3342 class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> { 3343 public: 3344 FIRToLLVMLowering() = default; 3345 FIRToLLVMLowering(fir::FIRToLLVMPassOptions options) : options{options} {} 3346 mlir::ModuleOp getModule() { return getOperation(); } 3347 3348 void runOnOperation() override final { 3349 auto mod = getModule(); 3350 if (!forcedTargetTriple.empty()) 3351 fir::setTargetTriple(mod, forcedTargetTriple); 3352 3353 auto *context = getModule().getContext(); 3354 fir::LLVMTypeConverter typeConverter{getModule()}; 3355 mlir::RewritePatternSet pattern(context); 3356 pattern.insert< 3357 AbsentOpConversion, AddcOpConversion, AddrOfOpConversion, 3358 AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion, 3359 BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion, 3360 BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion, 3361 BoxProcHostOpConversion, BoxRankOpConversion, BoxTypeDescOpConversion, 3362 CallOpConversion, CmpcOpConversion, ConstcOpConversion, 3363 ConvertOpConversion, CoordinateOpConversion, DispatchOpConversion, 3364 DispatchTableOpConversion, DTEntryOpConversion, DivcOpConversion, 3365 EmboxOpConversion, EmboxCharOpConversion, EmboxProcOpConversion, 3366 ExtractValueOpConversion, FieldIndexOpConversion, FirEndOpConversion, 3367 FreeMemOpConversion, GenTypeDescOpConversion, GlobalLenOpConversion, 3368 GlobalOpConversion, HasValueOpConversion, InsertOnRangeOpConversion, 3369 InsertValueOpConversion, IsPresentOpConversion, 3370 LenParamIndexOpConversion, LoadOpConversion, MulcOpConversion, 3371 NegcOpConversion, NoReassocOpConversion, SelectCaseOpConversion, 3372 SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion, 3373 ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion, 3374 SliceOpConversion, StoreOpConversion, StringLitOpConversion, 3375 SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion, 3376 UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion, 3377 XEmboxOpConversion, XReboxOpConversion, ZeroOpConversion>(typeConverter, 3378 options); 3379 mlir::populateFuncToLLVMConversionPatterns(typeConverter, pattern); 3380 mlir::populateOpenMPToLLVMConversionPatterns(typeConverter, pattern); 3381 mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter, 3382 pattern); 3383 mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter, 3384 pattern); 3385 // Convert math-like dialect operations, which can be produced 3386 // when late math lowering mode is used, into llvm dialect. 3387 mlir::populateMathToLLVMConversionPatterns(typeConverter, pattern); 3388 mlir::populateMathToLibmConversionPatterns(pattern, /*benefit=*/0); 3389 mlir::ConversionTarget target{*context}; 3390 target.addLegalDialect<mlir::LLVM::LLVMDialect>(); 3391 // The OpenMP dialect is legal for Operations without regions, for those 3392 // which contains regions it is legal if the region contains only the 3393 // LLVM dialect. Add OpenMP dialect as a legal dialect for conversion and 3394 // legalize conversion of OpenMP operations without regions. 3395 mlir::configureOpenMPToLLVMConversionLegality(target, typeConverter); 3396 target.addLegalDialect<mlir::omp::OpenMPDialect>(); 3397 3398 // required NOPs for applying a full conversion 3399 target.addLegalOp<mlir::ModuleOp>(); 3400 3401 // apply the patterns 3402 if (mlir::failed(mlir::applyFullConversion(getModule(), target, 3403 std::move(pattern)))) { 3404 signalPassFailure(); 3405 } 3406 } 3407 3408 private: 3409 fir::FIRToLLVMPassOptions options; 3410 }; 3411 3412 /// Lower from LLVM IR dialect to proper LLVM-IR and dump the module 3413 struct LLVMIRLoweringPass 3414 : public mlir::PassWrapper<LLVMIRLoweringPass, 3415 mlir::OperationPass<mlir::ModuleOp>> { 3416 MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(LLVMIRLoweringPass) 3417 3418 LLVMIRLoweringPass(llvm::raw_ostream &output, fir::LLVMIRLoweringPrinter p) 3419 : output{output}, printer{p} {} 3420 3421 mlir::ModuleOp getModule() { return getOperation(); } 3422 3423 void runOnOperation() override final { 3424 auto *ctx = getModule().getContext(); 3425 auto optName = getModule().getName(); 3426 llvm::LLVMContext llvmCtx; 3427 if (auto llvmModule = mlir::translateModuleToLLVMIR( 3428 getModule(), llvmCtx, optName ? *optName : "FIRModule")) { 3429 printer(*llvmModule, output); 3430 return; 3431 } 3432 3433 mlir::emitError(mlir::UnknownLoc::get(ctx), "could not emit LLVM-IR\n"); 3434 signalPassFailure(); 3435 } 3436 3437 private: 3438 llvm::raw_ostream &output; 3439 fir::LLVMIRLoweringPrinter printer; 3440 }; 3441 3442 } // namespace 3443 3444 std::unique_ptr<mlir::Pass> fir::createFIRToLLVMPass() { 3445 return std::make_unique<FIRToLLVMLowering>(); 3446 } 3447 3448 std::unique_ptr<mlir::Pass> 3449 fir::createFIRToLLVMPass(fir::FIRToLLVMPassOptions options) { 3450 return std::make_unique<FIRToLLVMLowering>(options); 3451 } 3452 3453 std::unique_ptr<mlir::Pass> 3454 fir::createLLVMDialectToLLVMPass(llvm::raw_ostream &output, 3455 fir::LLVMIRLoweringPrinter printer) { 3456 return std::make_unique<LLVMIRLoweringPass>(output, printer); 3457 } 3458