1 //===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/ 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "flang/Optimizer/CodeGen/CodeGen.h" 14 #include "CGOps.h" 15 #include "PassDetail.h" 16 #include "flang/ISO_Fortran_binding.h" 17 #include "flang/Optimizer/Dialect/FIRAttr.h" 18 #include "flang/Optimizer/Dialect/FIROps.h" 19 #include "flang/Optimizer/Support/InternalNames.h" 20 #include "flang/Optimizer/Support/TypeCode.h" 21 #include "flang/Semantics/runtime-type-info.h" 22 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" 23 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" 24 #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" 25 #include "mlir/Conversion/LLVMCommon/Pattern.h" 26 #include "mlir/Conversion/MathToLLVM/MathToLLVM.h" 27 #include "mlir/Conversion/MathToLibm/MathToLibm.h" 28 #include "mlir/Conversion/OpenMPToLLVM/ConvertOpenMPToLLVM.h" 29 #include "mlir/IR/BuiltinTypes.h" 30 #include "mlir/IR/Matchers.h" 31 #include "mlir/Pass/Pass.h" 32 #include "mlir/Target/LLVMIR/ModuleTranslation.h" 33 #include "llvm/ADT/ArrayRef.h" 34 35 #define DEBUG_TYPE "flang-codegen" 36 37 // fir::LLVMTypeConverter for converting to LLVM IR dialect types. 38 #include "TypeConverter.h" 39 40 // TODO: This should really be recovered from the specified target. 41 static constexpr unsigned defaultAlign = 8; 42 43 /// `fir.box` attribute values as defined for CFI_attribute_t in 44 /// flang/ISO_Fortran_binding.h. 45 static constexpr unsigned kAttrPointer = CFI_attribute_pointer; 46 static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable; 47 48 static inline mlir::Type getVoidPtrType(mlir::MLIRContext *context) { 49 return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8)); 50 } 51 52 static mlir::LLVM::ConstantOp 53 genConstantIndex(mlir::Location loc, mlir::Type ity, 54 mlir::ConversionPatternRewriter &rewriter, 55 std::int64_t offset) { 56 auto cattr = rewriter.getI64IntegerAttr(offset); 57 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 58 } 59 60 static mlir::Block *createBlock(mlir::ConversionPatternRewriter &rewriter, 61 mlir::Block *insertBefore) { 62 assert(insertBefore && "expected valid insertion block"); 63 return rewriter.createBlock(insertBefore->getParent(), 64 mlir::Region::iterator(insertBefore)); 65 } 66 67 namespace { 68 /// FIR conversion pattern template 69 template <typename FromOp> 70 class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> { 71 public: 72 explicit FIROpConversion(fir::LLVMTypeConverter &lowering, 73 const fir::FIRToLLVMPassOptions &options) 74 : mlir::ConvertOpToLLVMPattern<FromOp>(lowering), options(options) {} 75 76 protected: 77 mlir::Type convertType(mlir::Type ty) const { 78 return lowerTy().convertType(ty); 79 } 80 mlir::Type voidPtrTy() const { return getVoidPtrType(); } 81 82 mlir::Type getVoidPtrType() const { 83 return mlir::LLVM::LLVMPointerType::get( 84 mlir::IntegerType::get(&lowerTy().getContext(), 8)); 85 } 86 87 mlir::LLVM::ConstantOp 88 genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 89 int value) const { 90 mlir::Type i32Ty = rewriter.getI32Type(); 91 mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value); 92 return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr); 93 } 94 95 mlir::LLVM::ConstantOp 96 genConstantOffset(mlir::Location loc, 97 mlir::ConversionPatternRewriter &rewriter, 98 int offset) const { 99 mlir::Type ity = lowerTy().offsetType(); 100 mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset); 101 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 102 } 103 104 /// Perform an extension or truncation as needed on an integer value. Lowering 105 /// to the specific target may involve some sign-extending or truncation of 106 /// values, particularly to fit them from abstract box types to the 107 /// appropriate reified structures. 108 mlir::Value integerCast(mlir::Location loc, 109 mlir::ConversionPatternRewriter &rewriter, 110 mlir::Type ty, mlir::Value val) const { 111 auto valTy = val.getType(); 112 // If the value was not yet lowered, lower its type so that it can 113 // be used in getPrimitiveTypeSizeInBits. 114 if (!valTy.isa<mlir::IntegerType>()) 115 valTy = convertType(valTy); 116 auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 117 auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy); 118 if (toSize < fromSize) 119 return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val); 120 if (toSize > fromSize) 121 return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val); 122 return val; 123 } 124 125 /// Construct code sequence to extract the specifc value from a `fir.box`. 126 mlir::Value getValueFromBox(mlir::Location loc, mlir::Value box, 127 mlir::Type resultTy, 128 mlir::ConversionPatternRewriter &rewriter, 129 unsigned boxValue) const { 130 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 131 mlir::LLVM::ConstantOp cValuePos = 132 genConstantOffset(loc, rewriter, boxValue); 133 auto pty = mlir::LLVM::LLVMPointerType::get(resultTy); 134 auto p = rewriter.create<mlir::LLVM::GEPOp>( 135 loc, pty, box, mlir::ValueRange{c0, cValuePos}); 136 return rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p); 137 } 138 139 /// Method to construct code sequence to get the triple for dimension `dim` 140 /// from a box. 141 llvm::SmallVector<mlir::Value, 3> 142 getDimsFromBox(mlir::Location loc, llvm::ArrayRef<mlir::Type> retTys, 143 mlir::Value box, mlir::Value dim, 144 mlir::ConversionPatternRewriter &rewriter) const { 145 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 146 mlir::LLVM::ConstantOp cDims = 147 genConstantOffset(loc, rewriter, kDimsPosInBox); 148 mlir::LLVM::LoadOp l0 = 149 loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter); 150 mlir::LLVM::LoadOp l1 = 151 loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter); 152 mlir::LLVM::LoadOp l2 = 153 loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter); 154 return {l0.getResult(), l1.getResult(), l2.getResult()}; 155 } 156 157 mlir::LLVM::LoadOp 158 loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0, 159 mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off, 160 mlir::Type ty, 161 mlir::ConversionPatternRewriter &rewriter) const { 162 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 163 mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off); 164 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c); 165 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 166 } 167 168 mlir::Value 169 loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim, 170 mlir::ConversionPatternRewriter &rewriter) const { 171 auto idxTy = lowerTy().indexType(); 172 auto c0 = genConstantOffset(loc, rewriter, 0); 173 auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox); 174 auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim); 175 return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy, 176 rewriter); 177 } 178 179 /// Read base address from a fir.box. Returned address has type ty. 180 mlir::Value 181 loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 182 mlir::ConversionPatternRewriter &rewriter) const { 183 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 184 mlir::LLVM::ConstantOp cAddr = 185 genConstantOffset(loc, rewriter, kAddrPosInBox); 186 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 187 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr); 188 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 189 } 190 191 mlir::Value 192 loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 193 mlir::ConversionPatternRewriter &rewriter) const { 194 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 195 mlir::LLVM::ConstantOp cElemLen = 196 genConstantOffset(loc, rewriter, kElemLenPosInBox); 197 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 198 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen); 199 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 200 } 201 202 // Get the element type given an LLVM type that is of the form 203 // [llvm.ptr](array|struct|vector)+ and the provided indexes. 204 static mlir::Type getBoxEleTy(mlir::Type type, 205 llvm::ArrayRef<unsigned> indexes) { 206 if (auto t = type.dyn_cast<mlir::LLVM::LLVMPointerType>()) 207 type = t.getElementType(); 208 for (auto i : indexes) { 209 if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) { 210 assert(!t.isOpaque() && i < t.getBody().size()); 211 type = t.getBody()[i]; 212 } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 213 type = t.getElementType(); 214 } else if (auto t = type.dyn_cast<mlir::VectorType>()) { 215 type = t.getElementType(); 216 } else { 217 fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()), 218 "request for invalid box element type"); 219 } 220 } 221 return type; 222 } 223 224 // Return LLVM type of the base address given the LLVM type 225 // of the related descriptor (lowered fir.box type). 226 static mlir::Type getBaseAddrTypeFromBox(mlir::Type type) { 227 return getBoxEleTy(type, {kAddrPosInBox}); 228 } 229 230 // Load the attribute from the \p box and perform a check against \p maskValue 231 // The final comparison is implemented as `(attribute & maskValue) != 0`. 232 mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box, 233 mlir::ConversionPatternRewriter &rewriter, 234 unsigned maskValue) const { 235 mlir::Type attrTy = rewriter.getI32Type(); 236 mlir::Value attribute = 237 getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox); 238 mlir::LLVM::ConstantOp attrMask = 239 genConstantOffset(loc, rewriter, maskValue); 240 auto maskRes = 241 rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask); 242 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 243 return rewriter.create<mlir::LLVM::ICmpOp>( 244 loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0); 245 } 246 247 template <typename... ARGS> 248 mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty, 249 mlir::ConversionPatternRewriter &rewriter, 250 mlir::Value base, ARGS... args) const { 251 llvm::SmallVector<mlir::Value> cv = {args...}; 252 return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv); 253 } 254 255 fir::LLVMTypeConverter &lowerTy() const { 256 return *static_cast<fir::LLVMTypeConverter *>(this->getTypeConverter()); 257 } 258 259 const fir::FIRToLLVMPassOptions &options; 260 }; 261 262 /// FIR conversion pattern template 263 template <typename FromOp> 264 class FIROpAndTypeConversion : public FIROpConversion<FromOp> { 265 public: 266 using FIROpConversion<FromOp>::FIROpConversion; 267 using OpAdaptor = typename FromOp::Adaptor; 268 269 mlir::LogicalResult 270 matchAndRewrite(FromOp op, OpAdaptor adaptor, 271 mlir::ConversionPatternRewriter &rewriter) const final { 272 mlir::Type ty = this->convertType(op.getType()); 273 return doRewrite(op, ty, adaptor, rewriter); 274 } 275 276 virtual mlir::LogicalResult 277 doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor, 278 mlir::ConversionPatternRewriter &rewriter) const = 0; 279 }; 280 } // namespace 281 282 namespace { 283 /// Lower `fir.address_of` operation to `llvm.address_of` operation. 284 struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> { 285 using FIROpConversion::FIROpConversion; 286 287 mlir::LogicalResult 288 matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor, 289 mlir::ConversionPatternRewriter &rewriter) const override { 290 auto ty = convertType(addr.getType()); 291 rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>( 292 addr, ty, addr.getSymbol().getRootReference().getValue()); 293 return mlir::success(); 294 } 295 }; 296 } // namespace 297 298 /// Lookup the function to compute the memory size of this parametric derived 299 /// type. The size of the object may depend on the LEN type parameters of the 300 /// derived type. 301 static mlir::LLVM::LLVMFuncOp 302 getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op, 303 mlir::ConversionPatternRewriter &rewriter) { 304 auto module = op->getParentOfType<mlir::ModuleOp>(); 305 std::string name = recTy.getName().str() + "P.mem.size"; 306 if (auto memSizeFunc = module.lookupSymbol<mlir::LLVM::LLVMFuncOp>(name)) 307 return memSizeFunc; 308 TODO(op.getLoc(), "did not find allocation function"); 309 } 310 311 // Compute the alloc scale size (constant factors encoded in the array type). 312 // We do this for arrays without a constant interior or arrays of character with 313 // dynamic length arrays, since those are the only ones that get decayed to a 314 // pointer to the element type. 315 template <typename OP> 316 static mlir::Value 317 genAllocationScaleSize(OP op, mlir::Type ity, 318 mlir::ConversionPatternRewriter &rewriter) { 319 mlir::Location loc = op.getLoc(); 320 mlir::Type dataTy = op.getInType(); 321 mlir::Type scalarType = fir::unwrapSequenceType(dataTy); 322 auto seqTy = dataTy.dyn_cast<fir::SequenceType>(); 323 if ((op.hasShapeOperands() && seqTy && !seqTy.hasConstantInterior()) || 324 (seqTy && fir::characterWithDynamicLen(scalarType))) { 325 fir::SequenceType::Extent constSize = 1; 326 for (auto extent : seqTy.getShape()) 327 if (extent != fir::SequenceType::getUnknownExtent()) 328 constSize *= extent; 329 if (constSize != 1) { 330 mlir::Value constVal{ 331 genConstantIndex(loc, ity, rewriter, constSize).getResult()}; 332 return constVal; 333 } 334 } 335 return nullptr; 336 } 337 338 namespace { 339 /// convert to LLVM IR dialect `alloca` 340 struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> { 341 using FIROpConversion::FIROpConversion; 342 343 mlir::LogicalResult 344 matchAndRewrite(fir::AllocaOp alloc, OpAdaptor adaptor, 345 mlir::ConversionPatternRewriter &rewriter) const override { 346 mlir::ValueRange operands = adaptor.getOperands(); 347 auto loc = alloc.getLoc(); 348 mlir::Type ity = lowerTy().indexType(); 349 unsigned i = 0; 350 mlir::Value size = genConstantIndex(loc, ity, rewriter, 1).getResult(); 351 mlir::Type ty = convertType(alloc.getType()); 352 mlir::Type resultTy = ty; 353 if (alloc.hasLenParams()) { 354 unsigned end = alloc.numLenParams(); 355 llvm::SmallVector<mlir::Value> lenParams; 356 for (; i < end; ++i) 357 lenParams.push_back(operands[i]); 358 mlir::Type scalarType = fir::unwrapSequenceType(alloc.getInType()); 359 if (auto chrTy = scalarType.dyn_cast<fir::CharacterType>()) { 360 fir::CharacterType rawCharTy = fir::CharacterType::getUnknownLen( 361 chrTy.getContext(), chrTy.getFKind()); 362 ty = mlir::LLVM::LLVMPointerType::get(convertType(rawCharTy)); 363 assert(end == 1); 364 size = integerCast(loc, rewriter, ity, lenParams[0]); 365 } else if (auto recTy = scalarType.dyn_cast<fir::RecordType>()) { 366 mlir::LLVM::LLVMFuncOp memSizeFn = 367 getDependentTypeMemSizeFn(recTy, alloc, rewriter); 368 if (!memSizeFn) 369 emitError(loc, "did not find allocation function"); 370 mlir::NamedAttribute attr = rewriter.getNamedAttr( 371 "callee", mlir::SymbolRefAttr::get(memSizeFn)); 372 auto call = rewriter.create<mlir::LLVM::CallOp>( 373 loc, ity, lenParams, llvm::ArrayRef<mlir::NamedAttribute>{attr}); 374 size = call.getResult(0); 375 ty = ::getVoidPtrType(alloc.getContext()); 376 } else { 377 return emitError(loc, "unexpected type ") 378 << scalarType << " with type parameters"; 379 } 380 } 381 if (auto scaleSize = genAllocationScaleSize(alloc, ity, rewriter)) 382 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize); 383 if (alloc.hasShapeOperands()) { 384 unsigned end = operands.size(); 385 for (; i < end; ++i) 386 size = rewriter.create<mlir::LLVM::MulOp>( 387 loc, ity, size, integerCast(loc, rewriter, ity, operands[i])); 388 } 389 if (ty == resultTy) { 390 // Do not emit the bitcast if ty and resultTy are the same. 391 rewriter.replaceOpWithNewOp<mlir::LLVM::AllocaOp>(alloc, ty, size, 392 alloc->getAttrs()); 393 } else { 394 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, ty, size, 395 alloc->getAttrs()); 396 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(alloc, resultTy, al); 397 } 398 return mlir::success(); 399 } 400 }; 401 } // namespace 402 403 /// Construct an `llvm.extractvalue` instruction. It will return value at 404 /// element \p x from \p tuple. 405 static mlir::LLVM::ExtractValueOp 406 genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty, 407 mlir::ConversionPatternRewriter &rewriter, 408 mlir::MLIRContext *ctx, int x) { 409 auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x)); 410 auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x]; 411 return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx); 412 } 413 414 namespace { 415 /// Lower `fir.box_addr` to the sequence of operations to extract the first 416 /// element of the box. 417 struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> { 418 using FIROpConversion::FIROpConversion; 419 420 mlir::LogicalResult 421 matchAndRewrite(fir::BoxAddrOp boxaddr, OpAdaptor adaptor, 422 mlir::ConversionPatternRewriter &rewriter) const override { 423 mlir::Value a = adaptor.getOperands()[0]; 424 auto loc = boxaddr.getLoc(); 425 mlir::Type ty = convertType(boxaddr.getType()); 426 if (auto argty = boxaddr.getVal().getType().dyn_cast<fir::BoxType>()) { 427 rewriter.replaceOp(boxaddr, loadBaseAddrFromBox(loc, ty, a, rewriter)); 428 } else { 429 auto c0attr = rewriter.getI32IntegerAttr(0); 430 auto c0 = mlir::ArrayAttr::get(boxaddr.getContext(), c0attr); 431 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(boxaddr, ty, a, 432 c0); 433 } 434 return mlir::success(); 435 } 436 }; 437 438 /// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the 439 /// boxchar. 440 struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> { 441 using FIROpConversion::FIROpConversion; 442 443 mlir::LogicalResult 444 matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor, 445 mlir::ConversionPatternRewriter &rewriter) const override { 446 mlir::Value boxChar = adaptor.getOperands()[0]; 447 mlir::Location loc = boxChar.getLoc(); 448 mlir::MLIRContext *ctx = boxChar.getContext(); 449 mlir::Type returnValTy = boxCharLen.getResult().getType(); 450 451 constexpr int boxcharLenIdx = 1; 452 mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex( 453 loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx); 454 mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len); 455 rewriter.replaceOp(boxCharLen, lenAfterCast); 456 457 return mlir::success(); 458 } 459 }; 460 461 /// Lower `fir.box_dims` to a sequence of operations to extract the requested 462 /// dimension infomartion from the boxed value. 463 /// Result in a triple set of GEPs and loads. 464 struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> { 465 using FIROpConversion::FIROpConversion; 466 467 mlir::LogicalResult 468 matchAndRewrite(fir::BoxDimsOp boxdims, OpAdaptor adaptor, 469 mlir::ConversionPatternRewriter &rewriter) const override { 470 llvm::SmallVector<mlir::Type, 3> resultTypes = { 471 convertType(boxdims.getResult(0).getType()), 472 convertType(boxdims.getResult(1).getType()), 473 convertType(boxdims.getResult(2).getType()), 474 }; 475 auto results = 476 getDimsFromBox(boxdims.getLoc(), resultTypes, adaptor.getOperands()[0], 477 adaptor.getOperands()[1], rewriter); 478 rewriter.replaceOp(boxdims, results); 479 return mlir::success(); 480 } 481 }; 482 483 /// Lower `fir.box_elesize` to a sequence of operations ro extract the size of 484 /// an element in the boxed value. 485 struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> { 486 using FIROpConversion::FIROpConversion; 487 488 mlir::LogicalResult 489 matchAndRewrite(fir::BoxEleSizeOp boxelesz, OpAdaptor adaptor, 490 mlir::ConversionPatternRewriter &rewriter) const override { 491 mlir::Value a = adaptor.getOperands()[0]; 492 auto loc = boxelesz.getLoc(); 493 auto ty = convertType(boxelesz.getType()); 494 auto elemSize = getValueFromBox(loc, a, ty, rewriter, kElemLenPosInBox); 495 rewriter.replaceOp(boxelesz, elemSize); 496 return mlir::success(); 497 } 498 }; 499 500 /// Lower `fir.box_isalloc` to a sequence of operations to determine if the 501 /// boxed value was from an ALLOCATABLE entity. 502 struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> { 503 using FIROpConversion::FIROpConversion; 504 505 mlir::LogicalResult 506 matchAndRewrite(fir::BoxIsAllocOp boxisalloc, OpAdaptor adaptor, 507 mlir::ConversionPatternRewriter &rewriter) const override { 508 mlir::Value box = adaptor.getOperands()[0]; 509 auto loc = boxisalloc.getLoc(); 510 mlir::Value check = 511 genBoxAttributeCheck(loc, box, rewriter, kAttrAllocatable); 512 rewriter.replaceOp(boxisalloc, check); 513 return mlir::success(); 514 } 515 }; 516 517 /// Lower `fir.box_isarray` to a sequence of operations to determine if the 518 /// boxed is an array. 519 struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> { 520 using FIROpConversion::FIROpConversion; 521 522 mlir::LogicalResult 523 matchAndRewrite(fir::BoxIsArrayOp boxisarray, OpAdaptor adaptor, 524 mlir::ConversionPatternRewriter &rewriter) const override { 525 mlir::Value a = adaptor.getOperands()[0]; 526 auto loc = boxisarray.getLoc(); 527 auto rank = 528 getValueFromBox(loc, a, rewriter.getI32Type(), rewriter, kRankPosInBox); 529 auto c0 = genConstantOffset(loc, rewriter, 0); 530 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 531 boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0); 532 return mlir::success(); 533 } 534 }; 535 536 /// Lower `fir.box_isptr` to a sequence of operations to determined if the 537 /// boxed value was from a POINTER entity. 538 struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> { 539 using FIROpConversion::FIROpConversion; 540 541 mlir::LogicalResult 542 matchAndRewrite(fir::BoxIsPtrOp boxisptr, OpAdaptor adaptor, 543 mlir::ConversionPatternRewriter &rewriter) const override { 544 mlir::Value box = adaptor.getOperands()[0]; 545 auto loc = boxisptr.getLoc(); 546 mlir::Value check = genBoxAttributeCheck(loc, box, rewriter, kAttrPointer); 547 rewriter.replaceOp(boxisptr, check); 548 return mlir::success(); 549 } 550 }; 551 552 /// Lower `fir.box_rank` to the sequence of operation to extract the rank from 553 /// the box. 554 struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> { 555 using FIROpConversion::FIROpConversion; 556 557 mlir::LogicalResult 558 matchAndRewrite(fir::BoxRankOp boxrank, OpAdaptor adaptor, 559 mlir::ConversionPatternRewriter &rewriter) const override { 560 mlir::Value a = adaptor.getOperands()[0]; 561 auto loc = boxrank.getLoc(); 562 mlir::Type ty = convertType(boxrank.getType()); 563 auto result = getValueFromBox(loc, a, ty, rewriter, kRankPosInBox); 564 rewriter.replaceOp(boxrank, result); 565 return mlir::success(); 566 } 567 }; 568 569 /// Lower `fir.boxproc_host` operation. Extracts the host pointer from the 570 /// boxproc. 571 /// TODO: Part of supporting Fortran 2003 procedure pointers. 572 struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> { 573 using FIROpConversion::FIROpConversion; 574 575 mlir::LogicalResult 576 matchAndRewrite(fir::BoxProcHostOp boxprochost, OpAdaptor adaptor, 577 mlir::ConversionPatternRewriter &rewriter) const override { 578 TODO(boxprochost.getLoc(), "fir.boxproc_host codegen"); 579 return mlir::failure(); 580 } 581 }; 582 583 /// Lower `fir.box_tdesc` to the sequence of operations to extract the type 584 /// descriptor from the box. 585 struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> { 586 using FIROpConversion::FIROpConversion; 587 588 mlir::LogicalResult 589 matchAndRewrite(fir::BoxTypeDescOp boxtypedesc, OpAdaptor adaptor, 590 mlir::ConversionPatternRewriter &rewriter) const override { 591 mlir::Value box = adaptor.getOperands()[0]; 592 auto loc = boxtypedesc.getLoc(); 593 mlir::Type typeTy = 594 fir::getDescFieldTypeModel<kTypePosInBox>()(boxtypedesc.getContext()); 595 auto result = getValueFromBox(loc, box, typeTy, rewriter, kTypePosInBox); 596 auto typePtrTy = mlir::LLVM::LLVMPointerType::get(typeTy); 597 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(boxtypedesc, typePtrTy, 598 result); 599 return mlir::success(); 600 } 601 }; 602 603 /// Lower `fir.string_lit` to LLVM IR dialect operation. 604 struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> { 605 using FIROpConversion::FIROpConversion; 606 607 mlir::LogicalResult 608 matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor, 609 mlir::ConversionPatternRewriter &rewriter) const override { 610 auto ty = convertType(constop.getType()); 611 auto attr = constop.getValue(); 612 if (attr.isa<mlir::StringAttr>()) { 613 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr); 614 return mlir::success(); 615 } 616 617 auto charTy = constop.getType().cast<fir::CharacterType>(); 618 unsigned bits = lowerTy().characterBitsize(charTy); 619 mlir::Type intTy = rewriter.getIntegerType(bits); 620 mlir::Location loc = constop.getLoc(); 621 mlir::Value cst = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 622 if (auto arr = attr.dyn_cast<mlir::DenseElementsAttr>()) { 623 cst = rewriter.create<mlir::LLVM::ConstantOp>(loc, ty, arr); 624 } else if (auto arr = attr.dyn_cast<mlir::ArrayAttr>()) { 625 for (auto a : llvm::enumerate(arr.getValue())) { 626 // convert each character to a precise bitsize 627 auto elemAttr = mlir::IntegerAttr::get( 628 intTy, 629 a.value().cast<mlir::IntegerAttr>().getValue().zextOrTrunc(bits)); 630 auto elemCst = 631 rewriter.create<mlir::LLVM::ConstantOp>(loc, intTy, elemAttr); 632 auto index = mlir::ArrayAttr::get( 633 constop.getContext(), rewriter.getI32IntegerAttr(a.index())); 634 cst = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, cst, elemCst, 635 index); 636 } 637 } else { 638 return mlir::failure(); 639 } 640 rewriter.replaceOp(constop, cst); 641 return mlir::success(); 642 } 643 }; 644 645 /// `fir.call` -> `llvm.call` 646 struct CallOpConversion : public FIROpConversion<fir::CallOp> { 647 using FIROpConversion::FIROpConversion; 648 649 mlir::LogicalResult 650 matchAndRewrite(fir::CallOp call, OpAdaptor adaptor, 651 mlir::ConversionPatternRewriter &rewriter) const override { 652 llvm::SmallVector<mlir::Type> resultTys; 653 for (auto r : call.getResults()) 654 resultTys.push_back(convertType(r.getType())); 655 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 656 call, resultTys, adaptor.getOperands(), call->getAttrs()); 657 return mlir::success(); 658 } 659 }; 660 } // namespace 661 662 static mlir::Type getComplexEleTy(mlir::Type complex) { 663 if (auto cc = complex.dyn_cast<mlir::ComplexType>()) 664 return cc.getElementType(); 665 return complex.cast<fir::ComplexType>().getElementType(); 666 } 667 668 namespace { 669 /// Compare complex values 670 /// 671 /// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une). 672 /// 673 /// For completeness, all other comparison are done on the real component only. 674 struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> { 675 using FIROpConversion::FIROpConversion; 676 677 mlir::LogicalResult 678 matchAndRewrite(fir::CmpcOp cmp, OpAdaptor adaptor, 679 mlir::ConversionPatternRewriter &rewriter) const override { 680 mlir::ValueRange operands = adaptor.getOperands(); 681 mlir::MLIRContext *ctxt = cmp.getContext(); 682 mlir::Type eleTy = convertType(getComplexEleTy(cmp.getLhs().getType())); 683 mlir::Type resTy = convertType(cmp.getType()); 684 mlir::Location loc = cmp.getLoc(); 685 auto pos0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 686 llvm::SmallVector<mlir::Value, 2> rp = { 687 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 688 pos0), 689 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 690 pos0)}; 691 auto rcp = 692 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, rp, cmp->getAttrs()); 693 auto pos1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 694 llvm::SmallVector<mlir::Value, 2> ip = { 695 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 696 pos1), 697 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 698 pos1)}; 699 auto icp = 700 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, ip, cmp->getAttrs()); 701 llvm::SmallVector<mlir::Value, 2> cp = {rcp, icp}; 702 switch (cmp.getPredicate()) { 703 case mlir::arith::CmpFPredicate::OEQ: // .EQ. 704 rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(cmp, resTy, cp); 705 break; 706 case mlir::arith::CmpFPredicate::UNE: // .NE. 707 rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(cmp, resTy, cp); 708 break; 709 default: 710 rewriter.replaceOp(cmp, rcp.getResult()); 711 break; 712 } 713 return mlir::success(); 714 } 715 }; 716 717 /// Lower complex constants 718 struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> { 719 using FIROpConversion::FIROpConversion; 720 721 mlir::LogicalResult 722 matchAndRewrite(fir::ConstcOp conc, OpAdaptor, 723 mlir::ConversionPatternRewriter &rewriter) const override { 724 mlir::Location loc = conc.getLoc(); 725 mlir::MLIRContext *ctx = conc.getContext(); 726 mlir::Type ty = convertType(conc.getType()); 727 mlir::Type ety = convertType(getComplexEleTy(conc.getType())); 728 auto realFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getReal())); 729 auto realPart = 730 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, realFloatAttr); 731 auto imFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getImaginary())); 732 auto imPart = 733 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, imFloatAttr); 734 auto realIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 735 auto imIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 736 auto undef = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 737 auto setReal = rewriter.create<mlir::LLVM::InsertValueOp>( 738 loc, ty, undef, realPart, realIndex); 739 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(conc, ty, setReal, 740 imPart, imIndex); 741 return mlir::success(); 742 } 743 744 inline llvm::APFloat getValue(mlir::Attribute attr) const { 745 return attr.cast<fir::RealAttr>().getValue(); 746 } 747 }; 748 749 /// convert value of from-type to value of to-type 750 struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> { 751 using FIROpConversion::FIROpConversion; 752 753 static bool isFloatingPointTy(mlir::Type ty) { 754 return ty.isa<mlir::FloatType>(); 755 } 756 757 mlir::LogicalResult 758 matchAndRewrite(fir::ConvertOp convert, OpAdaptor adaptor, 759 mlir::ConversionPatternRewriter &rewriter) const override { 760 auto fromFirTy = convert.getValue().getType(); 761 auto toFirTy = convert.getRes().getType(); 762 auto fromTy = convertType(fromFirTy); 763 auto toTy = convertType(toFirTy); 764 mlir::Value op0 = adaptor.getOperands()[0]; 765 if (fromTy == toTy) { 766 rewriter.replaceOp(convert, op0); 767 return mlir::success(); 768 } 769 auto loc = convert.getLoc(); 770 auto convertFpToFp = [&](mlir::Value val, unsigned fromBits, 771 unsigned toBits, mlir::Type toTy) -> mlir::Value { 772 if (fromBits == toBits) { 773 // TODO: Converting between two floating-point representations with the 774 // same bitwidth is not allowed for now. 775 mlir::emitError(loc, 776 "cannot implicitly convert between two floating-point " 777 "representations of the same bitwidth"); 778 return {}; 779 } 780 if (fromBits > toBits) 781 return rewriter.create<mlir::LLVM::FPTruncOp>(loc, toTy, val); 782 return rewriter.create<mlir::LLVM::FPExtOp>(loc, toTy, val); 783 }; 784 // Complex to complex conversion. 785 if (fir::isa_complex(fromFirTy) && fir::isa_complex(toFirTy)) { 786 // Special case: handle the conversion of a complex such that both the 787 // real and imaginary parts are converted together. 788 auto zero = mlir::ArrayAttr::get(convert.getContext(), 789 rewriter.getI32IntegerAttr(0)); 790 auto one = mlir::ArrayAttr::get(convert.getContext(), 791 rewriter.getI32IntegerAttr(1)); 792 auto ty = convertType(getComplexEleTy(convert.getValue().getType())); 793 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, zero); 794 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, one); 795 auto nt = convertType(getComplexEleTy(convert.getRes().getType())); 796 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 797 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(nt); 798 auto rc = convertFpToFp(rp, fromBits, toBits, nt); 799 auto ic = convertFpToFp(ip, fromBits, toBits, nt); 800 auto un = rewriter.create<mlir::LLVM::UndefOp>(loc, toTy); 801 auto i1 = 802 rewriter.create<mlir::LLVM::InsertValueOp>(loc, toTy, un, rc, zero); 803 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(convert, toTy, i1, 804 ic, one); 805 return mlir::success(); 806 } 807 808 // Follow UNIX F77 convention for logicals: 809 // 1. underlying integer is not zero => logical is .TRUE. 810 // 2. logical is .TRUE. => set underlying integer to 1. 811 auto i1Type = mlir::IntegerType::get(convert.getContext(), 1); 812 if (fromFirTy.isa<fir::LogicalType>() && toFirTy == i1Type) { 813 mlir::Value zero = genConstantIndex(loc, fromTy, rewriter, 0); 814 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 815 convert, mlir::LLVM::ICmpPredicate::ne, op0, zero); 816 return mlir::success(); 817 } 818 if (fromFirTy == i1Type && toFirTy.isa<fir::LogicalType>()) { 819 rewriter.replaceOpWithNewOp<mlir::LLVM::ZExtOp>(convert, toTy, op0); 820 return mlir::success(); 821 } 822 823 // Floating point to floating point conversion. 824 if (isFloatingPointTy(fromTy)) { 825 if (isFloatingPointTy(toTy)) { 826 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 827 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 828 auto v = convertFpToFp(op0, fromBits, toBits, toTy); 829 rewriter.replaceOp(convert, v); 830 return mlir::success(); 831 } 832 if (toTy.isa<mlir::IntegerType>()) { 833 rewriter.replaceOpWithNewOp<mlir::LLVM::FPToSIOp>(convert, toTy, op0); 834 return mlir::success(); 835 } 836 } else if (fromTy.isa<mlir::IntegerType>()) { 837 // Integer to integer conversion. 838 if (toTy.isa<mlir::IntegerType>()) { 839 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 840 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 841 assert(fromBits != toBits); 842 if (fromBits > toBits) { 843 rewriter.replaceOpWithNewOp<mlir::LLVM::TruncOp>(convert, toTy, op0); 844 return mlir::success(); 845 } 846 rewriter.replaceOpWithNewOp<mlir::LLVM::SExtOp>(convert, toTy, op0); 847 return mlir::success(); 848 } 849 // Integer to floating point conversion. 850 if (isFloatingPointTy(toTy)) { 851 rewriter.replaceOpWithNewOp<mlir::LLVM::SIToFPOp>(convert, toTy, op0); 852 return mlir::success(); 853 } 854 // Integer to pointer conversion. 855 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 856 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(convert, toTy, op0); 857 return mlir::success(); 858 } 859 } else if (fromTy.isa<mlir::LLVM::LLVMPointerType>()) { 860 // Pointer to integer conversion. 861 if (toTy.isa<mlir::IntegerType>()) { 862 rewriter.replaceOpWithNewOp<mlir::LLVM::PtrToIntOp>(convert, toTy, op0); 863 return mlir::success(); 864 } 865 // Pointer to pointer conversion. 866 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 867 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(convert, toTy, op0); 868 return mlir::success(); 869 } 870 } 871 return emitError(loc) << "cannot convert " << fromTy << " to " << toTy; 872 } 873 }; 874 875 /// Lower `fir.dispatch` operation. A virtual call to a method in a dispatch 876 /// table. 877 struct DispatchOpConversion : public FIROpConversion<fir::DispatchOp> { 878 using FIROpConversion::FIROpConversion; 879 880 mlir::LogicalResult 881 matchAndRewrite(fir::DispatchOp dispatch, OpAdaptor adaptor, 882 mlir::ConversionPatternRewriter &rewriter) const override { 883 TODO(dispatch.getLoc(), "fir.dispatch codegen"); 884 return mlir::failure(); 885 } 886 }; 887 888 /// Lower `fir.dispatch_table` operation. The dispatch table for a Fortran 889 /// derived type. 890 struct DispatchTableOpConversion 891 : public FIROpConversion<fir::DispatchTableOp> { 892 using FIROpConversion::FIROpConversion; 893 894 mlir::LogicalResult 895 matchAndRewrite(fir::DispatchTableOp dispTab, OpAdaptor adaptor, 896 mlir::ConversionPatternRewriter &rewriter) const override { 897 TODO(dispTab.getLoc(), "fir.dispatch_table codegen"); 898 return mlir::failure(); 899 } 900 }; 901 902 /// Lower `fir.dt_entry` operation. An entry in a dispatch table; binds a 903 /// method-name to a function. 904 struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> { 905 using FIROpConversion::FIROpConversion; 906 907 mlir::LogicalResult 908 matchAndRewrite(fir::DTEntryOp dtEnt, OpAdaptor adaptor, 909 mlir::ConversionPatternRewriter &rewriter) const override { 910 TODO(dtEnt.getLoc(), "fir.dt_entry codegen"); 911 return mlir::failure(); 912 } 913 }; 914 915 /// Lower `fir.global_len` operation. 916 struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> { 917 using FIROpConversion::FIROpConversion; 918 919 mlir::LogicalResult 920 matchAndRewrite(fir::GlobalLenOp globalLen, OpAdaptor adaptor, 921 mlir::ConversionPatternRewriter &rewriter) const override { 922 TODO(globalLen.getLoc(), "fir.global_len codegen"); 923 return mlir::failure(); 924 } 925 }; 926 927 /// Lower fir.len_param_index 928 struct LenParamIndexOpConversion 929 : public FIROpConversion<fir::LenParamIndexOp> { 930 using FIROpConversion::FIROpConversion; 931 932 // FIXME: this should be specialized by the runtime target 933 mlir::LogicalResult 934 matchAndRewrite(fir::LenParamIndexOp lenp, OpAdaptor, 935 mlir::ConversionPatternRewriter &rewriter) const override { 936 TODO(lenp.getLoc(), "fir.len_param_index codegen"); 937 } 938 }; 939 940 /// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of 941 /// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element 942 /// in this struct is a pointer. Its type is determined from `KIND`. The 2nd 943 /// element is the length of the character buffer (`#n`). 944 struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> { 945 using FIROpConversion::FIROpConversion; 946 947 mlir::LogicalResult 948 matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor, 949 mlir::ConversionPatternRewriter &rewriter) const override { 950 mlir::ValueRange operands = adaptor.getOperands(); 951 auto *ctx = emboxChar.getContext(); 952 953 mlir::Value charBuffer = operands[0]; 954 mlir::Value charBufferLen = operands[1]; 955 956 mlir::Location loc = emboxChar.getLoc(); 957 mlir::Type llvmStructTy = convertType(emboxChar.getType()); 958 auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy); 959 960 mlir::Type lenTy = 961 llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1]; 962 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen); 963 964 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 965 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 966 auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>( 967 loc, llvmStructTy, llvmStruct, charBuffer, c0); 968 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 969 emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1); 970 971 return mlir::success(); 972 } 973 }; 974 } // namespace 975 976 /// Return the LLVMFuncOp corresponding to the standard malloc call. 977 static mlir::LLVM::LLVMFuncOp 978 getMalloc(fir::AllocMemOp op, mlir::ConversionPatternRewriter &rewriter) { 979 auto module = op->getParentOfType<mlir::ModuleOp>(); 980 if (mlir::LLVM::LLVMFuncOp mallocFunc = 981 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("malloc")) 982 return mallocFunc; 983 mlir::OpBuilder moduleBuilder( 984 op->getParentOfType<mlir::ModuleOp>().getBodyRegion()); 985 auto indexType = mlir::IntegerType::get(op.getContext(), 64); 986 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 987 rewriter.getUnknownLoc(), "malloc", 988 mlir::LLVM::LLVMFunctionType::get(getVoidPtrType(op.getContext()), 989 indexType, 990 /*isVarArg=*/false)); 991 } 992 993 /// Helper function for generating the LLVM IR that computes the size 994 /// in bytes for a derived type. 995 static mlir::Value 996 computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy, 997 mlir::ConversionPatternRewriter &rewriter) { 998 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 999 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 1000 llvm::SmallVector<mlir::Value> args = {one}; 1001 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, args); 1002 return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep); 1003 } 1004 1005 namespace { 1006 /// Lower a `fir.allocmem` instruction into `llvm.call @malloc` 1007 struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> { 1008 using FIROpConversion::FIROpConversion; 1009 1010 mlir::LogicalResult 1011 matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor, 1012 mlir::ConversionPatternRewriter &rewriter) const override { 1013 mlir::Type heapTy = heap.getType(); 1014 mlir::Type ty = convertType(heapTy); 1015 mlir::LLVM::LLVMFuncOp mallocFunc = getMalloc(heap, rewriter); 1016 mlir::Location loc = heap.getLoc(); 1017 auto ity = lowerTy().indexType(); 1018 mlir::Type dataTy = fir::unwrapRefType(heapTy); 1019 if (fir::isRecordWithTypeParameters(fir::unwrapSequenceType(dataTy))) 1020 TODO(loc, "fir.allocmem codegen of derived type with length parameters"); 1021 mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, ty); 1022 if (auto scaleSize = genAllocationScaleSize(heap, ity, rewriter)) 1023 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize); 1024 for (mlir::Value opnd : adaptor.getOperands()) 1025 size = rewriter.create<mlir::LLVM::MulOp>( 1026 loc, ity, size, integerCast(loc, rewriter, ity, opnd)); 1027 heap->setAttr("callee", mlir::SymbolRefAttr::get(mallocFunc)); 1028 auto malloc = rewriter.create<mlir::LLVM::CallOp>( 1029 loc, ::getVoidPtrType(heap.getContext()), size, heap->getAttrs()); 1030 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(heap, ty, 1031 malloc.getResult(0)); 1032 return mlir::success(); 1033 } 1034 1035 // Compute the (allocation) size of the allocmem type in bytes. 1036 mlir::Value genTypeSizeInBytes(mlir::Location loc, mlir::Type idxTy, 1037 mlir::ConversionPatternRewriter &rewriter, 1038 mlir::Type llTy) const { 1039 // Use the primitive size, if available. 1040 auto ptrTy = llTy.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1041 if (auto size = 1042 mlir::LLVM::getPrimitiveTypeSizeInBits(ptrTy.getElementType())) 1043 return genConstantIndex(loc, idxTy, rewriter, size / 8); 1044 1045 // Otherwise, generate the GEP trick in LLVM IR to compute the size. 1046 return computeDerivedTypeSize(loc, ptrTy, idxTy, rewriter); 1047 } 1048 }; 1049 } // namespace 1050 1051 /// Return the LLVMFuncOp corresponding to the standard free call. 1052 static mlir::LLVM::LLVMFuncOp 1053 getFree(fir::FreeMemOp op, mlir::ConversionPatternRewriter &rewriter) { 1054 auto module = op->getParentOfType<mlir::ModuleOp>(); 1055 if (mlir::LLVM::LLVMFuncOp freeFunc = 1056 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("free")) 1057 return freeFunc; 1058 mlir::OpBuilder moduleBuilder(module.getBodyRegion()); 1059 auto voidType = mlir::LLVM::LLVMVoidType::get(op.getContext()); 1060 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 1061 rewriter.getUnknownLoc(), "free", 1062 mlir::LLVM::LLVMFunctionType::get(voidType, 1063 getVoidPtrType(op.getContext()), 1064 /*isVarArg=*/false)); 1065 } 1066 1067 static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) { 1068 unsigned result = 1; 1069 for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>(); 1070 eleTy; 1071 eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>()) 1072 ++result; 1073 return result; 1074 } 1075 1076 namespace { 1077 /// Lower a `fir.freemem` instruction into `llvm.call @free` 1078 struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> { 1079 using FIROpConversion::FIROpConversion; 1080 1081 mlir::LogicalResult 1082 matchAndRewrite(fir::FreeMemOp freemem, OpAdaptor adaptor, 1083 mlir::ConversionPatternRewriter &rewriter) const override { 1084 mlir::LLVM::LLVMFuncOp freeFunc = getFree(freemem, rewriter); 1085 mlir::Location loc = freemem.getLoc(); 1086 auto bitcast = rewriter.create<mlir::LLVM::BitcastOp>( 1087 freemem.getLoc(), voidPtrTy(), adaptor.getOperands()[0]); 1088 freemem->setAttr("callee", mlir::SymbolRefAttr::get(freeFunc)); 1089 rewriter.create<mlir::LLVM::CallOp>( 1090 loc, mlir::TypeRange{}, mlir::ValueRange{bitcast}, freemem->getAttrs()); 1091 rewriter.eraseOp(freemem); 1092 return mlir::success(); 1093 } 1094 }; 1095 } // namespace 1096 1097 /// Common base class for embox to descriptor conversion. 1098 template <typename OP> 1099 struct EmboxCommonConversion : public FIROpConversion<OP> { 1100 using FIROpConversion<OP>::FIROpConversion; 1101 1102 // Find the LLVMFuncOp in whose entry block the alloca should be inserted. 1103 // The order to find the LLVMFuncOp is as follows: 1104 // 1. The parent operation of the current block if it is a LLVMFuncOp. 1105 // 2. The first ancestor that is a LLVMFuncOp. 1106 mlir::LLVM::LLVMFuncOp 1107 getFuncForAllocaInsert(mlir::ConversionPatternRewriter &rewriter) const { 1108 mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp(); 1109 return mlir::isa<mlir::LLVM::LLVMFuncOp>(parentOp) 1110 ? mlir::cast<mlir::LLVM::LLVMFuncOp>(parentOp) 1111 : parentOp->getParentOfType<mlir::LLVM::LLVMFuncOp>(); 1112 } 1113 1114 // Generate an alloca of size 1 and type \p toTy. 1115 mlir::LLVM::AllocaOp 1116 genAllocaWithType(mlir::Location loc, mlir::Type toTy, unsigned alignment, 1117 mlir::ConversionPatternRewriter &rewriter) const { 1118 auto thisPt = rewriter.saveInsertionPoint(); 1119 mlir::LLVM::LLVMFuncOp func = getFuncForAllocaInsert(rewriter); 1120 rewriter.setInsertionPointToStart(&func.front()); 1121 auto size = this->genI32Constant(loc, rewriter, 1); 1122 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, toTy, size, alignment); 1123 rewriter.restoreInsertionPoint(thisPt); 1124 return al; 1125 } 1126 1127 static int getCFIAttr(fir::BoxType boxTy) { 1128 auto eleTy = boxTy.getEleTy(); 1129 if (eleTy.isa<fir::PointerType>()) 1130 return CFI_attribute_pointer; 1131 if (eleTy.isa<fir::HeapType>()) 1132 return CFI_attribute_allocatable; 1133 return CFI_attribute_other; 1134 } 1135 1136 static fir::RecordType unwrapIfDerived(fir::BoxType boxTy) { 1137 return fir::unwrapSequenceType(fir::dyn_cast_ptrOrBoxEleTy(boxTy)) 1138 .template dyn_cast<fir::RecordType>(); 1139 } 1140 static bool isDerivedTypeWithLenParams(fir::BoxType boxTy) { 1141 auto recTy = unwrapIfDerived(boxTy); 1142 return recTy && recTy.getNumLenParams() > 0; 1143 } 1144 static bool isDerivedType(fir::BoxType boxTy) { 1145 return static_cast<bool>(unwrapIfDerived(boxTy)); 1146 } 1147 1148 // Get the element size and CFI type code of the boxed value. 1149 std::tuple<mlir::Value, mlir::Value> getSizeAndTypeCode( 1150 mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 1151 mlir::Type boxEleTy, mlir::ValueRange lenParams = {}) const { 1152 auto doInteger = 1153 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1154 int typeCode = fir::integerBitsToTypeCode(width); 1155 return {this->genConstantOffset(loc, rewriter, width / 8), 1156 this->genConstantOffset(loc, rewriter, typeCode)}; 1157 }; 1158 auto doLogical = 1159 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1160 int typeCode = fir::logicalBitsToTypeCode(width); 1161 return {this->genConstantOffset(loc, rewriter, width / 8), 1162 this->genConstantOffset(loc, rewriter, typeCode)}; 1163 }; 1164 auto doFloat = [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1165 int typeCode = fir::realBitsToTypeCode(width); 1166 return {this->genConstantOffset(loc, rewriter, width / 8), 1167 this->genConstantOffset(loc, rewriter, typeCode)}; 1168 }; 1169 auto doComplex = 1170 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1171 auto typeCode = fir::complexBitsToTypeCode(width); 1172 return {this->genConstantOffset(loc, rewriter, width / 8 * 2), 1173 this->genConstantOffset(loc, rewriter, typeCode)}; 1174 }; 1175 auto doCharacter = 1176 [&](unsigned width, 1177 mlir::Value len) -> std::tuple<mlir::Value, mlir::Value> { 1178 auto typeCode = fir::characterBitsToTypeCode(width); 1179 auto typeCodeVal = this->genConstantOffset(loc, rewriter, typeCode); 1180 if (width == 8) 1181 return {len, typeCodeVal}; 1182 auto i64Ty = mlir::IntegerType::get(&this->lowerTy().getContext(), 64); 1183 auto byteWidth = genConstantIndex(loc, i64Ty, rewriter, width / 8); 1184 auto len64 = FIROpConversion<OP>::integerCast(loc, rewriter, i64Ty, len); 1185 auto size = 1186 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len64); 1187 return {size, typeCodeVal}; 1188 }; 1189 auto getKindMap = [&]() -> fir::KindMapping & { 1190 return this->lowerTy().getKindMap(); 1191 }; 1192 // Pointer-like types. 1193 if (auto eleTy = fir::dyn_cast_ptrEleTy(boxEleTy)) 1194 boxEleTy = eleTy; 1195 // Integer types. 1196 if (fir::isa_integer(boxEleTy)) { 1197 if (auto ty = boxEleTy.dyn_cast<mlir::IntegerType>()) 1198 return doInteger(ty.getWidth()); 1199 auto ty = boxEleTy.cast<fir::IntegerType>(); 1200 return doInteger(getKindMap().getIntegerBitsize(ty.getFKind())); 1201 } 1202 // Floating point types. 1203 if (fir::isa_real(boxEleTy)) { 1204 if (auto ty = boxEleTy.dyn_cast<mlir::FloatType>()) 1205 return doFloat(ty.getWidth()); 1206 auto ty = boxEleTy.cast<fir::RealType>(); 1207 return doFloat(getKindMap().getRealBitsize(ty.getFKind())); 1208 } 1209 // Complex types. 1210 if (fir::isa_complex(boxEleTy)) { 1211 if (auto ty = boxEleTy.dyn_cast<mlir::ComplexType>()) 1212 return doComplex( 1213 ty.getElementType().cast<mlir::FloatType>().getWidth()); 1214 auto ty = boxEleTy.cast<fir::ComplexType>(); 1215 return doComplex(getKindMap().getRealBitsize(ty.getFKind())); 1216 } 1217 // Character types. 1218 if (auto ty = boxEleTy.dyn_cast<fir::CharacterType>()) { 1219 auto charWidth = getKindMap().getCharacterBitsize(ty.getFKind()); 1220 if (ty.getLen() != fir::CharacterType::unknownLen()) { 1221 auto len = this->genConstantOffset(loc, rewriter, ty.getLen()); 1222 return doCharacter(charWidth, len); 1223 } 1224 assert(!lenParams.empty()); 1225 return doCharacter(charWidth, lenParams.back()); 1226 } 1227 // Logical type. 1228 if (auto ty = boxEleTy.dyn_cast<fir::LogicalType>()) 1229 return doLogical(getKindMap().getLogicalBitsize(ty.getFKind())); 1230 // Array types. 1231 if (auto seqTy = boxEleTy.dyn_cast<fir::SequenceType>()) 1232 return getSizeAndTypeCode(loc, rewriter, seqTy.getEleTy(), lenParams); 1233 // Derived-type types. 1234 if (boxEleTy.isa<fir::RecordType>()) { 1235 auto ptrTy = mlir::LLVM::LLVMPointerType::get( 1236 this->lowerTy().convertType(boxEleTy)); 1237 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 1238 auto one = 1239 genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1); 1240 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, 1241 mlir::ValueRange{one}); 1242 auto eleSize = rewriter.create<mlir::LLVM::PtrToIntOp>( 1243 loc, this->lowerTy().indexType(), gep); 1244 return {eleSize, 1245 this->genConstantOffset(loc, rewriter, fir::derivedToTypeCode())}; 1246 } 1247 // Reference type. 1248 if (fir::isa_ref_type(boxEleTy)) { 1249 // FIXME: use the target pointer size rather than sizeof(void*) 1250 return {this->genConstantOffset(loc, rewriter, sizeof(void *)), 1251 this->genConstantOffset(loc, rewriter, CFI_type_cptr)}; 1252 } 1253 fir::emitFatalError(loc, "unhandled type in fir.box code generation"); 1254 } 1255 1256 /// Basic pattern to write a field in the descriptor 1257 mlir::Value insertField(mlir::ConversionPatternRewriter &rewriter, 1258 mlir::Location loc, mlir::Value dest, 1259 llvm::ArrayRef<unsigned> fldIndexes, 1260 mlir::Value value, bool bitcast = false) const { 1261 auto boxTy = dest.getType(); 1262 auto fldTy = this->getBoxEleTy(boxTy, fldIndexes); 1263 if (bitcast) 1264 value = rewriter.create<mlir::LLVM::BitcastOp>(loc, fldTy, value); 1265 else 1266 value = this->integerCast(loc, rewriter, fldTy, value); 1267 llvm::SmallVector<mlir::Attribute, 2> attrs; 1268 for (auto i : fldIndexes) 1269 attrs.push_back(rewriter.getI32IntegerAttr(i)); 1270 auto indexesAttr = mlir::ArrayAttr::get(rewriter.getContext(), attrs); 1271 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, boxTy, dest, value, 1272 indexesAttr); 1273 } 1274 1275 inline mlir::Value 1276 insertBaseAddress(mlir::ConversionPatternRewriter &rewriter, 1277 mlir::Location loc, mlir::Value dest, 1278 mlir::Value base) const { 1279 return insertField(rewriter, loc, dest, {kAddrPosInBox}, base, 1280 /*bitCast=*/true); 1281 } 1282 1283 inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter, 1284 mlir::Location loc, mlir::Value dest, 1285 unsigned dim, mlir::Value lb) const { 1286 return insertField(rewriter, loc, dest, 1287 {kDimsPosInBox, dim, kDimLowerBoundPos}, lb); 1288 } 1289 1290 inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter, 1291 mlir::Location loc, mlir::Value dest, 1292 unsigned dim, mlir::Value extent) const { 1293 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos}, 1294 extent); 1295 } 1296 1297 inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter, 1298 mlir::Location loc, mlir::Value dest, 1299 unsigned dim, mlir::Value stride) const { 1300 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos}, 1301 stride); 1302 } 1303 1304 /// Get the address of the type descriptor global variable that was created by 1305 /// lowering for derived type \p recType. 1306 template <typename BOX> 1307 mlir::Value 1308 getTypeDescriptor(BOX box, mlir::ConversionPatternRewriter &rewriter, 1309 mlir::Location loc, fir::RecordType recType) const { 1310 std::string name = 1311 fir::NameUniquer::getTypeDescriptorName(recType.getName()); 1312 auto module = box->template getParentOfType<mlir::ModuleOp>(); 1313 if (auto global = module.template lookupSymbol<fir::GlobalOp>(name)) { 1314 auto ty = mlir::LLVM::LLVMPointerType::get( 1315 this->lowerTy().convertType(global.getType())); 1316 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1317 global.getSymName()); 1318 } 1319 if (auto global = 1320 module.template lookupSymbol<mlir::LLVM::GlobalOp>(name)) { 1321 // The global may have already been translated to LLVM. 1322 auto ty = mlir::LLVM::LLVMPointerType::get(global.getType()); 1323 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1324 global.getSymName()); 1325 } 1326 // Type info derived types do not have type descriptors since they are the 1327 // types defining type descriptors. 1328 if (!this->options.ignoreMissingTypeDescriptors && 1329 !fir::NameUniquer::belongsToModule( 1330 name, Fortran::semantics::typeInfoBuiltinModule)) 1331 fir::emitFatalError( 1332 loc, "runtime derived type info descriptor was not generated"); 1333 return rewriter.create<mlir::LLVM::NullOp>( 1334 loc, ::getVoidPtrType(box.getContext())); 1335 } 1336 1337 template <typename BOX> 1338 std::tuple<fir::BoxType, mlir::Value, mlir::Value> 1339 consDescriptorPrefix(BOX box, mlir::ConversionPatternRewriter &rewriter, 1340 unsigned rank, mlir::ValueRange lenParams) const { 1341 auto loc = box.getLoc(); 1342 auto boxTy = box.getType().template dyn_cast<fir::BoxType>(); 1343 auto convTy = this->lowerTy().convertBoxType(boxTy, rank); 1344 auto llvmBoxPtrTy = convTy.template cast<mlir::LLVM::LLVMPointerType>(); 1345 auto llvmBoxTy = llvmBoxPtrTy.getElementType(); 1346 mlir::Value descriptor = 1347 rewriter.create<mlir::LLVM::UndefOp>(loc, llvmBoxTy); 1348 1349 llvm::SmallVector<mlir::Value> typeparams = lenParams; 1350 if constexpr (!std::is_same_v<BOX, fir::EmboxOp>) { 1351 if (!box.substr().empty() && fir::hasDynamicSize(boxTy.getEleTy())) 1352 typeparams.push_back(box.substr()[1]); 1353 } 1354 1355 // Write each of the fields with the appropriate values 1356 auto [eleSize, cfiTy] = 1357 getSizeAndTypeCode(loc, rewriter, boxTy.getEleTy(), typeparams); 1358 descriptor = 1359 insertField(rewriter, loc, descriptor, {kElemLenPosInBox}, eleSize); 1360 descriptor = insertField(rewriter, loc, descriptor, {kVersionPosInBox}, 1361 this->genI32Constant(loc, rewriter, CFI_VERSION)); 1362 descriptor = insertField(rewriter, loc, descriptor, {kRankPosInBox}, 1363 this->genI32Constant(loc, rewriter, rank)); 1364 descriptor = insertField(rewriter, loc, descriptor, {kTypePosInBox}, cfiTy); 1365 descriptor = 1366 insertField(rewriter, loc, descriptor, {kAttributePosInBox}, 1367 this->genI32Constant(loc, rewriter, getCFIAttr(boxTy))); 1368 const bool hasAddendum = isDerivedType(boxTy); 1369 descriptor = 1370 insertField(rewriter, loc, descriptor, {kF18AddendumPosInBox}, 1371 this->genI32Constant(loc, rewriter, hasAddendum ? 1 : 0)); 1372 1373 if (hasAddendum) { 1374 auto isArray = 1375 fir::dyn_cast_ptrOrBoxEleTy(boxTy).template isa<fir::SequenceType>(); 1376 unsigned typeDescFieldId = isArray ? kOptTypePtrPosInBox : kDimsPosInBox; 1377 auto typeDesc = 1378 getTypeDescriptor(box, rewriter, loc, unwrapIfDerived(boxTy)); 1379 descriptor = 1380 insertField(rewriter, loc, descriptor, {typeDescFieldId}, typeDesc, 1381 /*bitCast=*/true); 1382 } 1383 1384 return {boxTy, descriptor, eleSize}; 1385 } 1386 1387 /// Compute the base address of a substring given the base address of a scalar 1388 /// string and the zero based string lower bound. 1389 mlir::Value shiftSubstringBase(mlir::ConversionPatternRewriter &rewriter, 1390 mlir::Location loc, mlir::Value base, 1391 mlir::Value lowerBound) const { 1392 llvm::SmallVector<mlir::Value> gepOperands; 1393 auto baseType = 1394 base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1395 if (auto arrayType = baseType.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 1396 // FIXME: The baseType should be the array element type here, meaning 1397 // there should at most be one dimension (constant length characters are 1398 // lowered to LLVM as an array of length one characters.). However, using 1399 // the character type in the GEP does not lead to correct GEPs when llvm 1400 // opaque pointers are enabled. 1401 auto idxTy = this->lowerTy().indexType(); 1402 gepOperands.append(getDimension(arrayType), 1403 genConstantIndex(loc, idxTy, rewriter, 0)); 1404 gepOperands.push_back(lowerBound); 1405 } else { 1406 gepOperands.push_back(lowerBound); 1407 } 1408 return this->genGEP(loc, base.getType(), rewriter, base, gepOperands); 1409 } 1410 1411 /// If the embox is not in a globalOp body, allocate storage for the box; 1412 /// store the value inside and return the generated alloca. Return the input 1413 /// value otherwise. 1414 mlir::Value 1415 placeInMemoryIfNotGlobalInit(mlir::ConversionPatternRewriter &rewriter, 1416 mlir::Location loc, mlir::Value boxValue) const { 1417 auto *thisBlock = rewriter.getInsertionBlock(); 1418 if (thisBlock && mlir::isa<mlir::LLVM::GlobalOp>(thisBlock->getParentOp())) 1419 return boxValue; 1420 auto boxPtrTy = mlir::LLVM::LLVMPointerType::get(boxValue.getType()); 1421 auto alloca = genAllocaWithType(loc, boxPtrTy, defaultAlign, rewriter); 1422 rewriter.create<mlir::LLVM::StoreOp>(loc, boxValue, alloca); 1423 return alloca; 1424 } 1425 }; 1426 1427 /// Compute the extent of a triplet slice (lb:ub:step). 1428 static mlir::Value 1429 computeTripletExtent(mlir::ConversionPatternRewriter &rewriter, 1430 mlir::Location loc, mlir::Value lb, mlir::Value ub, 1431 mlir::Value step, mlir::Value zero, mlir::Type type) { 1432 mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb); 1433 extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step); 1434 extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step); 1435 // If the resulting extent is negative (`ub-lb` and `step` have different 1436 // signs), zero must be returned instead. 1437 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1438 loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero); 1439 return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero); 1440 } 1441 1442 /// Create a generic box on a memory reference. This conversions lowers the 1443 /// abstract box to the appropriate, initialized descriptor. 1444 struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> { 1445 using EmboxCommonConversion::EmboxCommonConversion; 1446 1447 mlir::LogicalResult 1448 matchAndRewrite(fir::EmboxOp embox, OpAdaptor adaptor, 1449 mlir::ConversionPatternRewriter &rewriter) const override { 1450 mlir::ValueRange operands = adaptor.getOperands(); 1451 assert(!embox.getShape() && "There should be no dims on this embox op"); 1452 auto [boxTy, dest, eleSize] = consDescriptorPrefix( 1453 embox, rewriter, /*rank=*/0, /*lenParams=*/operands.drop_front(1)); 1454 dest = insertBaseAddress(rewriter, embox.getLoc(), dest, operands[0]); 1455 if (isDerivedTypeWithLenParams(boxTy)) { 1456 TODO(embox.getLoc(), 1457 "fir.embox codegen of derived with length parameters"); 1458 return mlir::failure(); 1459 } 1460 auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest); 1461 rewriter.replaceOp(embox, result); 1462 return mlir::success(); 1463 } 1464 }; 1465 1466 /// Create a generic box on a memory reference. 1467 struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> { 1468 using EmboxCommonConversion::EmboxCommonConversion; 1469 1470 mlir::LogicalResult 1471 matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor, 1472 mlir::ConversionPatternRewriter &rewriter) const override { 1473 mlir::ValueRange operands = adaptor.getOperands(); 1474 auto [boxTy, dest, eleSize] = 1475 consDescriptorPrefix(xbox, rewriter, xbox.getOutRank(), 1476 operands.drop_front(xbox.lenParamOffset())); 1477 // Generate the triples in the dims field of the descriptor 1478 auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64); 1479 mlir::Value base = operands[0]; 1480 assert(!xbox.shape().empty() && "must have a shape"); 1481 unsigned shapeOffset = xbox.shapeOffset(); 1482 bool hasShift = !xbox.shift().empty(); 1483 unsigned shiftOffset = xbox.shiftOffset(); 1484 bool hasSlice = !xbox.slice().empty(); 1485 unsigned sliceOffset = xbox.sliceOffset(); 1486 mlir::Location loc = xbox.getLoc(); 1487 mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0); 1488 mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1); 1489 mlir::Value prevPtrOff = one; 1490 mlir::Type eleTy = boxTy.getEleTy(); 1491 const unsigned rank = xbox.getRank(); 1492 llvm::SmallVector<mlir::Value> gepArgs; 1493 unsigned constRows = 0; 1494 mlir::Value ptrOffset = zero; 1495 mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()); 1496 assert(memEleTy.isa<fir::SequenceType>()); 1497 auto seqTy = memEleTy.cast<fir::SequenceType>(); 1498 mlir::Type seqEleTy = seqTy.getEleTy(); 1499 // Adjust the element scaling factor if the element is a dependent type. 1500 if (fir::hasDynamicSize(seqEleTy)) { 1501 if (auto charTy = seqEleTy.dyn_cast<fir::CharacterType>()) { 1502 assert(xbox.lenParams().size() == 1); 1503 mlir::LLVM::ConstantOp charSize = genConstantIndex( 1504 loc, i64Ty, rewriter, lowerTy().characterBitsize(charTy) / 8); 1505 mlir::Value castedLen = 1506 integerCast(loc, rewriter, i64Ty, operands[xbox.lenParamOffset()]); 1507 auto byteOffset = 1508 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, charSize, castedLen); 1509 prevPtrOff = integerCast(loc, rewriter, i64Ty, byteOffset); 1510 } else if (seqEleTy.isa<fir::RecordType>()) { 1511 // prevPtrOff = ; 1512 TODO(loc, "generate call to calculate size of PDT"); 1513 } else { 1514 fir::emitFatalError(loc, "unexpected dynamic type"); 1515 } 1516 } else { 1517 constRows = seqTy.getConstantRows(); 1518 } 1519 1520 const auto hasSubcomp = !xbox.subcomponent().empty(); 1521 const bool hasSubstr = !xbox.substr().empty(); 1522 /// Compute initial element stride that will be use to compute the step in 1523 /// each dimension. 1524 mlir::Value prevDimByteStride = integerCast(loc, rewriter, i64Ty, eleSize); 1525 if (hasSubcomp) { 1526 // We have a subcomponent. The step value needs to be the number of 1527 // bytes per element (which is a derived type). 1528 auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy)); 1529 prevDimByteStride = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter); 1530 } else if (hasSubstr) { 1531 // We have a substring. The step value needs to be the number of bytes 1532 // per CHARACTER element. 1533 auto charTy = seqEleTy.cast<fir::CharacterType>(); 1534 if (fir::hasDynamicSize(charTy)) { 1535 prevDimByteStride = prevPtrOff; 1536 } else { 1537 prevDimByteStride = genConstantIndex( 1538 loc, i64Ty, rewriter, 1539 charTy.getLen() * lowerTy().characterBitsize(charTy) / 8); 1540 } 1541 } 1542 1543 // Process the array subspace arguments (shape, shift, etc.), if any, 1544 // translating everything to values in the descriptor wherever the entity 1545 // has a dynamic array dimension. 1546 for (unsigned di = 0, descIdx = 0; di < rank; ++di) { 1547 mlir::Value extent = operands[shapeOffset]; 1548 mlir::Value outerExtent = extent; 1549 bool skipNext = false; 1550 if (hasSlice) { 1551 mlir::Value off = operands[sliceOffset]; 1552 mlir::Value adj = one; 1553 if (hasShift) 1554 adj = operands[shiftOffset]; 1555 auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj); 1556 if (constRows > 0) { 1557 gepArgs.push_back(ao); 1558 } else { 1559 auto dimOff = 1560 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff); 1561 ptrOffset = 1562 rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset); 1563 } 1564 if (mlir::isa_and_nonnull<fir::UndefOp>( 1565 xbox.slice()[3 * di + 1].getDefiningOp())) { 1566 // This dimension contains a scalar expression in the array slice op. 1567 // The dimension is loop invariant, will be dropped, and will not 1568 // appear in the descriptor. 1569 skipNext = true; 1570 } 1571 } 1572 if (!skipNext) { 1573 // store extent 1574 if (hasSlice) 1575 extent = computeTripletExtent(rewriter, loc, operands[sliceOffset], 1576 operands[sliceOffset + 1], 1577 operands[sliceOffset + 2], zero, i64Ty); 1578 // Lower bound is normalized to 0 for BIND(C) interoperability. 1579 mlir::Value lb = zero; 1580 const bool isaPointerOrAllocatable = 1581 eleTy.isa<fir::PointerType>() || eleTy.isa<fir::HeapType>(); 1582 // Lower bound is defaults to 1 for POINTER, ALLOCATABLE, and 1583 // denormalized descriptors. 1584 if (isaPointerOrAllocatable || !normalizedLowerBound(xbox)) 1585 lb = one; 1586 // If there is a shifted origin, and no fir.slice, and this is not 1587 // a normalized descriptor then use the value from the shift op as 1588 // the lower bound. 1589 if (hasShift && !(hasSlice || hasSubcomp || hasSubstr) && 1590 (isaPointerOrAllocatable || !normalizedLowerBound(xbox))) { 1591 lb = operands[shiftOffset]; 1592 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1593 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1594 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, 1595 lb); 1596 } 1597 dest = insertLowerBound(rewriter, loc, dest, descIdx, lb); 1598 1599 dest = insertExtent(rewriter, loc, dest, descIdx, extent); 1600 1601 // store step (scaled by shaped extent) 1602 mlir::Value step = prevDimByteStride; 1603 if (hasSlice) 1604 step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step, 1605 operands[sliceOffset + 2]); 1606 dest = insertStride(rewriter, loc, dest, descIdx, step); 1607 ++descIdx; 1608 } 1609 1610 // compute the stride and offset for the next natural dimension 1611 prevDimByteStride = rewriter.create<mlir::LLVM::MulOp>( 1612 loc, i64Ty, prevDimByteStride, outerExtent); 1613 if (constRows == 0) 1614 prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff, 1615 outerExtent); 1616 else 1617 --constRows; 1618 1619 // increment iterators 1620 ++shapeOffset; 1621 if (hasShift) 1622 ++shiftOffset; 1623 if (hasSlice) 1624 sliceOffset += 3; 1625 } 1626 if (hasSlice || hasSubcomp || hasSubstr) { 1627 llvm::SmallVector<mlir::Value> args = {ptrOffset}; 1628 args.append(gepArgs.rbegin(), gepArgs.rend()); 1629 if (hasSubcomp) { 1630 // For each field in the path add the offset to base via the args list. 1631 // In the most general case, some offsets must be computed since 1632 // they are not be known until runtime. 1633 if (fir::hasDynamicSize(fir::unwrapSequenceType( 1634 fir::unwrapPassByRefType(xbox.memref().getType())))) 1635 TODO(loc, "fir.embox codegen dynamic size component in derived type"); 1636 args.append(operands.begin() + xbox.subcomponentOffset(), 1637 operands.begin() + xbox.subcomponentOffset() + 1638 xbox.subcomponent().size()); 1639 } 1640 base = 1641 rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), base, args); 1642 if (hasSubstr) 1643 base = shiftSubstringBase(rewriter, loc, base, 1644 operands[xbox.substrOffset()]); 1645 } 1646 dest = insertBaseAddress(rewriter, loc, dest, base); 1647 if (isDerivedTypeWithLenParams(boxTy)) 1648 TODO(loc, "fir.embox codegen of derived with length parameters"); 1649 1650 mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest); 1651 rewriter.replaceOp(xbox, result); 1652 return mlir::success(); 1653 } 1654 1655 /// Return true if `xbox` has a normalized lower bounds attribute. A box value 1656 /// that is neither a POINTER nor an ALLOCATABLE should be normalized to a 1657 /// zero origin lower bound for interoperability with BIND(C). 1658 inline static bool normalizedLowerBound(fir::cg::XEmboxOp xbox) { 1659 return xbox->hasAttr(fir::getNormalizedLowerBoundAttrName()); 1660 } 1661 }; 1662 1663 /// Create a new box given a box reference. 1664 struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> { 1665 using EmboxCommonConversion::EmboxCommonConversion; 1666 1667 mlir::LogicalResult 1668 matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor, 1669 mlir::ConversionPatternRewriter &rewriter) const override { 1670 mlir::Location loc = rebox.getLoc(); 1671 mlir::Type idxTy = lowerTy().indexType(); 1672 mlir::Value loweredBox = adaptor.getOperands()[0]; 1673 mlir::ValueRange operands = adaptor.getOperands(); 1674 1675 // Create new descriptor and fill its non-shape related data. 1676 llvm::SmallVector<mlir::Value, 2> lenParams; 1677 mlir::Type inputEleTy = getInputEleTy(rebox); 1678 if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) { 1679 mlir::Value len = 1680 loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter); 1681 if (charTy.getFKind() != 1) { 1682 mlir::Value width = 1683 genConstantIndex(loc, idxTy, rewriter, charTy.getFKind()); 1684 len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width); 1685 } 1686 lenParams.emplace_back(len); 1687 } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) { 1688 if (recTy.getNumLenParams() != 0) 1689 TODO(loc, "reboxing descriptor of derived type with length parameters"); 1690 } 1691 auto [boxTy, dest, eleSize] = 1692 consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams); 1693 1694 // Read input extents, strides, and base address 1695 llvm::SmallVector<mlir::Value> inputExtents; 1696 llvm::SmallVector<mlir::Value> inputStrides; 1697 const unsigned inputRank = rebox.getRank(); 1698 for (unsigned i = 0; i < inputRank; ++i) { 1699 mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i); 1700 llvm::SmallVector<mlir::Value, 3> dimInfo = 1701 getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter); 1702 inputExtents.emplace_back(dimInfo[1]); 1703 inputStrides.emplace_back(dimInfo[2]); 1704 } 1705 1706 mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType()); 1707 mlir::Value baseAddr = 1708 loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter); 1709 1710 if (!rebox.slice().empty() || !rebox.subcomponent().empty()) 1711 return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1712 operands, rewriter); 1713 return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1714 operands, rewriter); 1715 } 1716 1717 private: 1718 /// Write resulting shape and base address in descriptor, and replace rebox 1719 /// op. 1720 mlir::LogicalResult 1721 finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1722 mlir::ValueRange lbounds, mlir::ValueRange extents, 1723 mlir::ValueRange strides, 1724 mlir::ConversionPatternRewriter &rewriter) const { 1725 mlir::Location loc = rebox.getLoc(); 1726 mlir::Value zero = 1727 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 1728 mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1); 1729 for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) { 1730 mlir::Value extent = std::get<0>(iter.value()); 1731 unsigned dim = iter.index(); 1732 mlir::Value lb = one; 1733 if (!lbounds.empty()) { 1734 lb = lbounds[dim]; 1735 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1736 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1737 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, lb); 1738 }; 1739 dest = insertLowerBound(rewriter, loc, dest, dim, lb); 1740 dest = insertExtent(rewriter, loc, dest, dim, extent); 1741 dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value())); 1742 } 1743 dest = insertBaseAddress(rewriter, loc, dest, base); 1744 mlir::Value result = 1745 placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest); 1746 rewriter.replaceOp(rebox, result); 1747 return mlir::success(); 1748 } 1749 1750 // Apply slice given the base address, extents and strides of the input box. 1751 mlir::LogicalResult 1752 sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1753 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1754 mlir::ValueRange operands, 1755 mlir::ConversionPatternRewriter &rewriter) const { 1756 mlir::Location loc = rebox.getLoc(); 1757 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1758 mlir::Type idxTy = lowerTy().indexType(); 1759 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1760 // Apply subcomponent and substring shift on base address. 1761 if (!rebox.subcomponent().empty() || !rebox.substr().empty()) { 1762 // Cast to inputEleTy* so that a GEP can be used. 1763 mlir::Type inputEleTy = getInputEleTy(rebox); 1764 auto llvmElePtrTy = 1765 mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy)); 1766 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base); 1767 1768 if (!rebox.subcomponent().empty()) { 1769 llvm::SmallVector<mlir::Value> gepOperands = {zero}; 1770 for (unsigned i = 0; i < rebox.subcomponent().size(); ++i) 1771 gepOperands.push_back(operands[rebox.subcomponentOffset() + i]); 1772 base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands); 1773 } 1774 if (!rebox.substr().empty()) 1775 base = shiftSubstringBase(rewriter, loc, base, 1776 operands[rebox.substrOffset()]); 1777 } 1778 1779 if (rebox.slice().empty()) 1780 // The array section is of the form array[%component][substring], keep 1781 // the input array extents and strides. 1782 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1783 inputExtents, inputStrides, rewriter); 1784 1785 // Strides from the fir.box are in bytes. 1786 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1787 1788 // The slice is of the form array(i:j:k)[%component]. Compute new extents 1789 // and strides. 1790 llvm::SmallVector<mlir::Value> slicedExtents; 1791 llvm::SmallVector<mlir::Value> slicedStrides; 1792 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 1793 const bool sliceHasOrigins = !rebox.shift().empty(); 1794 unsigned sliceOps = rebox.sliceOffset(); 1795 unsigned shiftOps = rebox.shiftOffset(); 1796 auto strideOps = inputStrides.begin(); 1797 const unsigned inputRank = inputStrides.size(); 1798 for (unsigned i = 0; i < inputRank; 1799 ++i, ++strideOps, ++shiftOps, sliceOps += 3) { 1800 mlir::Value sliceLb = 1801 integerCast(loc, rewriter, idxTy, operands[sliceOps]); 1802 mlir::Value inputStride = *strideOps; // already idxTy 1803 // Apply origin shift: base += (lb-shift)*input_stride 1804 mlir::Value sliceOrigin = 1805 sliceHasOrigins 1806 ? integerCast(loc, rewriter, idxTy, operands[shiftOps]) 1807 : one; 1808 mlir::Value diff = 1809 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin); 1810 mlir::Value offset = 1811 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride); 1812 base = genGEP(loc, voidPtrTy, rewriter, base, offset); 1813 // Apply upper bound and step if this is a triplet. Otherwise, the 1814 // dimension is dropped and no extents/strides are computed. 1815 mlir::Value upper = operands[sliceOps + 1]; 1816 const bool isTripletSlice = 1817 !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp()); 1818 if (isTripletSlice) { 1819 mlir::Value step = 1820 integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]); 1821 // extent = ub-lb+step/step 1822 mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper); 1823 mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb, 1824 sliceUb, step, zero, idxTy); 1825 slicedExtents.emplace_back(extent); 1826 // stride = step*input_stride 1827 mlir::Value stride = 1828 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride); 1829 slicedStrides.emplace_back(stride); 1830 } 1831 } 1832 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1833 slicedExtents, slicedStrides, rewriter); 1834 } 1835 1836 /// Apply a new shape to the data described by a box given the base address, 1837 /// extents and strides of the box. 1838 mlir::LogicalResult 1839 reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1840 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1841 mlir::ValueRange operands, 1842 mlir::ConversionPatternRewriter &rewriter) const { 1843 mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(), 1844 operands.begin() + rebox.shiftOffset() + 1845 rebox.shift().size()}; 1846 if (rebox.shape().empty()) { 1847 // Only setting new lower bounds. 1848 return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents, 1849 inputStrides, rewriter); 1850 } 1851 1852 mlir::Location loc = rebox.getLoc(); 1853 // Strides from the fir.box are in bytes. 1854 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1855 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1856 1857 llvm::SmallVector<mlir::Value> newStrides; 1858 llvm::SmallVector<mlir::Value> newExtents; 1859 mlir::Type idxTy = lowerTy().indexType(); 1860 // First stride from input box is kept. The rest is assumed contiguous 1861 // (it is not possible to reshape otherwise). If the input is scalar, 1862 // which may be OK if all new extents are ones, the stride does not 1863 // matter, use one. 1864 mlir::Value stride = inputStrides.empty() 1865 ? genConstantIndex(loc, idxTy, rewriter, 1) 1866 : inputStrides[0]; 1867 for (unsigned i = 0; i < rebox.shape().size(); ++i) { 1868 mlir::Value rawExtent = operands[rebox.shapeOffset() + i]; 1869 mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent); 1870 newExtents.emplace_back(extent); 1871 newStrides.emplace_back(stride); 1872 // nextStride = extent * stride; 1873 stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride); 1874 } 1875 return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides, 1876 rewriter); 1877 } 1878 1879 /// Return scalar element type of the input box. 1880 static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) { 1881 auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType()); 1882 if (auto seqTy = ty.dyn_cast<fir::SequenceType>()) 1883 return seqTy.getEleTy(); 1884 return ty; 1885 } 1886 }; 1887 1888 /// Lower `fir.emboxproc` operation. Creates a procedure box. 1889 /// TODO: Part of supporting Fortran 2003 procedure pointers. 1890 struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> { 1891 using FIROpConversion::FIROpConversion; 1892 1893 mlir::LogicalResult 1894 matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor, 1895 mlir::ConversionPatternRewriter &rewriter) const override { 1896 TODO(emboxproc.getLoc(), "fir.emboxproc codegen"); 1897 return mlir::failure(); 1898 } 1899 }; 1900 1901 // Code shared between insert_value and extract_value Ops. 1902 struct ValueOpCommon { 1903 // Translate the arguments pertaining to any multidimensional array to 1904 // row-major order for LLVM-IR. 1905 static void toRowMajor(llvm::SmallVectorImpl<mlir::Attribute> &attrs, 1906 mlir::Type ty) { 1907 assert(ty && "type is null"); 1908 const auto end = attrs.size(); 1909 for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) { 1910 if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 1911 const auto dim = getDimension(seq); 1912 if (dim > 1) { 1913 auto ub = std::min(i + dim, end); 1914 std::reverse(attrs.begin() + i, attrs.begin() + ub); 1915 i += dim - 1; 1916 } 1917 ty = getArrayElementType(seq); 1918 } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) { 1919 ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()]; 1920 } else { 1921 llvm_unreachable("index into invalid type"); 1922 } 1923 } 1924 } 1925 1926 static llvm::SmallVector<mlir::Attribute> 1927 collectIndices(mlir::ConversionPatternRewriter &rewriter, 1928 mlir::ArrayAttr arrAttr) { 1929 llvm::SmallVector<mlir::Attribute> attrs; 1930 for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) { 1931 if (i->isa<mlir::IntegerAttr>()) { 1932 attrs.push_back(*i); 1933 } else { 1934 auto fieldName = i->cast<mlir::StringAttr>().getValue(); 1935 ++i; 1936 auto ty = i->cast<mlir::TypeAttr>().getValue(); 1937 auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName); 1938 attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index)); 1939 } 1940 } 1941 return attrs; 1942 } 1943 1944 private: 1945 static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) { 1946 auto eleTy = ty.getElementType(); 1947 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 1948 eleTy = arrTy.getElementType(); 1949 return eleTy; 1950 } 1951 }; 1952 1953 namespace { 1954 /// Extract a subobject value from an ssa-value of aggregate type 1955 struct ExtractValueOpConversion 1956 : public FIROpAndTypeConversion<fir::ExtractValueOp>, 1957 public ValueOpCommon { 1958 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1959 1960 mlir::LogicalResult 1961 doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor, 1962 mlir::ConversionPatternRewriter &rewriter) const override { 1963 mlir::ValueRange operands = adaptor.getOperands(); 1964 auto attrs = collectIndices(rewriter, extractVal.getCoor()); 1965 toRowMajor(attrs, operands[0].getType()); 1966 auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs); 1967 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>( 1968 extractVal, ty, operands[0], position); 1969 return mlir::success(); 1970 } 1971 }; 1972 1973 /// InsertValue is the generalized instruction for the composition of new 1974 /// aggregate type values. 1975 struct InsertValueOpConversion 1976 : public FIROpAndTypeConversion<fir::InsertValueOp>, 1977 public ValueOpCommon { 1978 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1979 1980 mlir::LogicalResult 1981 doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor, 1982 mlir::ConversionPatternRewriter &rewriter) const override { 1983 mlir::ValueRange operands = adaptor.getOperands(); 1984 auto attrs = collectIndices(rewriter, insertVal.getCoor()); 1985 toRowMajor(attrs, operands[0].getType()); 1986 auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs); 1987 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 1988 insertVal, ty, operands[0], operands[1], position); 1989 return mlir::success(); 1990 } 1991 }; 1992 1993 /// InsertOnRange inserts a value into a sequence over a range of offsets. 1994 struct InsertOnRangeOpConversion 1995 : public FIROpAndTypeConversion<fir::InsertOnRangeOp> { 1996 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1997 1998 // Increments an array of subscripts in a row major fasion. 1999 void incrementSubscripts(const llvm::SmallVector<uint64_t> &dims, 2000 llvm::SmallVector<uint64_t> &subscripts) const { 2001 for (size_t i = dims.size(); i > 0; --i) { 2002 if (++subscripts[i - 1] < dims[i - 1]) { 2003 return; 2004 } 2005 subscripts[i - 1] = 0; 2006 } 2007 } 2008 2009 mlir::LogicalResult 2010 doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor, 2011 mlir::ConversionPatternRewriter &rewriter) const override { 2012 2013 llvm::SmallVector<uint64_t> dims; 2014 auto type = adaptor.getOperands()[0].getType(); 2015 2016 // Iteratively extract the array dimensions from the type. 2017 while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 2018 dims.push_back(t.getNumElements()); 2019 type = t.getElementType(); 2020 } 2021 2022 llvm::SmallVector<std::uint64_t> lBounds; 2023 llvm::SmallVector<std::uint64_t> uBounds; 2024 2025 // Unzip the upper and lower bound and convert to a row major format. 2026 mlir::DenseIntElementsAttr coor = range.getCoor(); 2027 auto reversedCoor = llvm::reverse(coor.getValues<int64_t>()); 2028 for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) { 2029 uBounds.push_back(*i++); 2030 lBounds.push_back(*i); 2031 } 2032 2033 auto &subscripts = lBounds; 2034 auto loc = range.getLoc(); 2035 mlir::Value lastOp = adaptor.getOperands()[0]; 2036 mlir::Value insertVal = adaptor.getOperands()[1]; 2037 2038 auto i64Ty = rewriter.getI64Type(); 2039 while (subscripts != uBounds) { 2040 // Convert uint64_t's to Attribute's. 2041 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2042 for (const auto &subscript : subscripts) 2043 subscriptAttrs.push_back(mlir::IntegerAttr::get(i64Ty, subscript)); 2044 lastOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2045 loc, ty, lastOp, insertVal, 2046 mlir::ArrayAttr::get(range.getContext(), subscriptAttrs)); 2047 2048 incrementSubscripts(dims, subscripts); 2049 } 2050 2051 // Convert uint64_t's to Attribute's. 2052 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2053 for (const auto &subscript : subscripts) 2054 subscriptAttrs.push_back( 2055 mlir::IntegerAttr::get(rewriter.getI64Type(), subscript)); 2056 mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs); 2057 2058 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2059 range, ty, lastOp, insertVal, 2060 mlir::ArrayAttr::get(range.getContext(), arrayRef)); 2061 2062 return mlir::success(); 2063 } 2064 }; 2065 } // namespace 2066 2067 namespace { 2068 /// XArrayCoor is the address arithmetic on a dynamically shaped, sliced, 2069 /// shifted etc. array. 2070 /// (See the static restriction on coordinate_of.) array_coor determines the 2071 /// coordinate (location) of a specific element. 2072 struct XArrayCoorOpConversion 2073 : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> { 2074 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2075 2076 mlir::LogicalResult 2077 doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor, 2078 mlir::ConversionPatternRewriter &rewriter) const override { 2079 auto loc = coor.getLoc(); 2080 mlir::ValueRange operands = adaptor.getOperands(); 2081 unsigned rank = coor.getRank(); 2082 assert(coor.indices().size() == rank); 2083 assert(coor.shape().empty() || coor.shape().size() == rank); 2084 assert(coor.shift().empty() || coor.shift().size() == rank); 2085 assert(coor.slice().empty() || coor.slice().size() == 3 * rank); 2086 mlir::Type idxTy = lowerTy().indexType(); 2087 unsigned indexOffset = coor.indicesOffset(); 2088 unsigned shapeOffset = coor.shapeOffset(); 2089 unsigned shiftOffset = coor.shiftOffset(); 2090 unsigned sliceOffset = coor.sliceOffset(); 2091 auto sliceOps = coor.slice().begin(); 2092 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2093 mlir::Value prevExt = one; 2094 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2095 mlir::Value offset = zero; 2096 const bool isShifted = !coor.shift().empty(); 2097 const bool isSliced = !coor.slice().empty(); 2098 const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>(); 2099 2100 // For each dimension of the array, generate the offset calculation. 2101 for (unsigned i = 0; i < rank; ++i, ++indexOffset, ++shapeOffset, 2102 ++shiftOffset, sliceOffset += 3, sliceOps += 3) { 2103 mlir::Value index = 2104 integerCast(loc, rewriter, idxTy, operands[indexOffset]); 2105 mlir::Value lb = 2106 isShifted ? integerCast(loc, rewriter, idxTy, operands[shiftOffset]) 2107 : one; 2108 mlir::Value step = one; 2109 bool normalSlice = isSliced; 2110 // Compute zero based index in dimension i of the element, applying 2111 // potential triplets and lower bounds. 2112 if (isSliced) { 2113 mlir::Value originalUb = *(sliceOps + 1); 2114 normalSlice = 2115 !mlir::isa_and_nonnull<fir::UndefOp>(originalUb.getDefiningOp()); 2116 if (normalSlice) 2117 step = integerCast(loc, rewriter, idxTy, operands[sliceOffset + 2]); 2118 } 2119 auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb); 2120 mlir::Value diff = 2121 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step); 2122 if (normalSlice) { 2123 mlir::Value sliceLb = 2124 integerCast(loc, rewriter, idxTy, operands[sliceOffset]); 2125 auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb); 2126 diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj); 2127 } 2128 // Update the offset given the stride and the zero based index `diff` 2129 // that was just computed. 2130 if (baseIsBoxed) { 2131 // Use stride in bytes from the descriptor. 2132 mlir::Value stride = loadStrideFromBox(loc, operands[0], i, rewriter); 2133 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride); 2134 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2135 } else { 2136 // Use stride computed at last iteration. 2137 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt); 2138 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2139 // Compute next stride assuming contiguity of the base array 2140 // (in element number). 2141 auto nextExt = integerCast(loc, rewriter, idxTy, operands[shapeOffset]); 2142 prevExt = 2143 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt); 2144 } 2145 } 2146 2147 // Add computed offset to the base address. 2148 if (baseIsBoxed) { 2149 // Working with byte offsets. The base address is read from the fir.box. 2150 // and need to be casted to i8* to do the pointer arithmetic. 2151 mlir::Type baseTy = getBaseAddrTypeFromBox(operands[0].getType()); 2152 mlir::Value base = 2153 loadBaseAddrFromBox(loc, baseTy, operands[0], rewriter); 2154 mlir::Type voidPtrTy = getVoidPtrType(); 2155 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2156 llvm::SmallVector<mlir::Value> args{offset}; 2157 auto addr = 2158 rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args); 2159 if (coor.subcomponent().empty()) { 2160 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, addr); 2161 return mlir::success(); 2162 } 2163 auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr); 2164 args.clear(); 2165 args.push_back(zero); 2166 if (!coor.lenParams().empty()) { 2167 // If type parameters are present, then we don't want to use a GEPOp 2168 // as below, as the LLVM struct type cannot be statically defined. 2169 TODO(loc, "derived type with type parameters"); 2170 } 2171 // TODO: array offset subcomponents must be converted to LLVM's 2172 // row-major layout here. 2173 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2174 args.push_back(operands[i]); 2175 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, casted, args); 2176 return mlir::success(); 2177 } 2178 2179 // The array was not boxed, so it must be contiguous. offset is therefore an 2180 // element offset and the base type is kept in the GEP unless the element 2181 // type size is itself dynamic. 2182 mlir::Value base; 2183 if (coor.subcomponent().empty()) { 2184 // No subcomponent. 2185 if (!coor.lenParams().empty()) { 2186 // Type parameters. Adjust element size explicitly. 2187 auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType()); 2188 assert(eleTy && "result must be a reference-like type"); 2189 if (fir::characterWithDynamicLen(eleTy)) { 2190 assert(coor.lenParams().size() == 1); 2191 auto length = integerCast(loc, rewriter, idxTy, 2192 operands[coor.lenParamsOffset()]); 2193 offset = 2194 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, length); 2195 } else { 2196 TODO(loc, "compute size of derived type with type parameters"); 2197 } 2198 } 2199 // Cast the base address to a pointer to T. 2200 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty, operands[0]); 2201 } else { 2202 // Operand #0 must have a pointer type. For subcomponent slicing, we 2203 // want to cast away the array type and have a plain struct type. 2204 mlir::Type ty0 = operands[0].getType(); 2205 auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 2206 assert(ptrTy && "expected pointer type"); 2207 mlir::Type eleTy = ptrTy.getElementType(); 2208 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2209 eleTy = arrTy.getElementType(); 2210 auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy); 2211 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy, operands[0]); 2212 } 2213 llvm::SmallVector<mlir::Value> args = {offset}; 2214 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2215 args.push_back(operands[i]); 2216 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args); 2217 return mlir::success(); 2218 } 2219 }; 2220 } // namespace 2221 2222 /// Convert to (memory) reference to a reference to a subobject. 2223 /// The coordinate_of op is a Swiss army knife operation that can be used on 2224 /// (memory) references to records, arrays, complex, etc. as well as boxes. 2225 /// With unboxed arrays, there is the restriction that the array have a static 2226 /// shape in all but the last column. 2227 struct CoordinateOpConversion 2228 : public FIROpAndTypeConversion<fir::CoordinateOp> { 2229 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2230 2231 mlir::LogicalResult 2232 doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor, 2233 mlir::ConversionPatternRewriter &rewriter) const override { 2234 mlir::ValueRange operands = adaptor.getOperands(); 2235 2236 mlir::Location loc = coor.getLoc(); 2237 mlir::Value base = operands[0]; 2238 mlir::Type baseObjectTy = coor.getBaseType(); 2239 mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2240 assert(objectTy && "fir.coordinate_of expects a reference type"); 2241 2242 // Complex type - basically, extract the real or imaginary part 2243 if (fir::isa_complex(objectTy)) { 2244 mlir::LLVM::ConstantOp c0 = 2245 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2246 llvm::SmallVector<mlir::Value> offs = {c0, operands[1]}; 2247 mlir::Value gep = genGEP(loc, ty, rewriter, base, offs); 2248 rewriter.replaceOp(coor, gep); 2249 return mlir::success(); 2250 } 2251 2252 // Boxed type - get the base pointer from the box 2253 if (baseObjectTy.dyn_cast<fir::BoxType>()) 2254 return doRewriteBox(coor, ty, operands, loc, rewriter); 2255 2256 // Reference, pointer or a heap type 2257 if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType, fir::HeapType>()) 2258 return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter); 2259 2260 return rewriter.notifyMatchFailure( 2261 coor, "fir.coordinate_of base operand has unsupported type"); 2262 } 2263 2264 static unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) { 2265 return fir::hasDynamicSize(ty) 2266 ? op.getDefiningOp() 2267 ->getAttrOfType<mlir::IntegerAttr>("field") 2268 .getInt() 2269 : getIntValue(op); 2270 } 2271 2272 static int64_t getIntValue(mlir::Value val) { 2273 assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value"); 2274 mlir::Operation *defop = val.getDefiningOp(); 2275 2276 if (auto constOp = mlir::dyn_cast<mlir::arith::ConstantIntOp>(defop)) 2277 return constOp.value(); 2278 if (auto llConstOp = mlir::dyn_cast<mlir::LLVM::ConstantOp>(defop)) 2279 if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>()) 2280 return attr.getValue().getSExtValue(); 2281 fir::emitFatalError(val.getLoc(), "must be a constant"); 2282 } 2283 2284 static bool hasSubDimensions(mlir::Type type) { 2285 return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>(); 2286 } 2287 2288 /// Check whether this form of `!fir.coordinate_of` is supported. These 2289 /// additional checks are required, because we are not yet able to convert 2290 /// all valid forms of `!fir.coordinate_of`. 2291 /// TODO: Either implement the unsupported cases or extend the verifier 2292 /// in FIROps.cpp instead. 2293 static bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) { 2294 const std::size_t numOfCoors = coors.size(); 2295 std::size_t i = 0; 2296 bool subEle = false; 2297 bool ptrEle = false; 2298 for (; i < numOfCoors; ++i) { 2299 mlir::Value nxtOpnd = coors[i]; 2300 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2301 subEle = true; 2302 i += arrTy.getDimension() - 1; 2303 type = arrTy.getEleTy(); 2304 } else if (auto recTy = type.dyn_cast<fir::RecordType>()) { 2305 subEle = true; 2306 type = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2307 } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) { 2308 subEle = true; 2309 type = tupTy.getType(getIntValue(nxtOpnd)); 2310 } else { 2311 ptrEle = true; 2312 } 2313 } 2314 if (ptrEle) 2315 return (!subEle) && (numOfCoors == 1); 2316 return subEle && (i >= numOfCoors); 2317 } 2318 2319 /// Walk the abstract memory layout and determine if the path traverses any 2320 /// array types with unknown shape. Return true iff all the array types have a 2321 /// constant shape along the path. 2322 static bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) { 2323 for (std::size_t i = 0, sz = coors.size(); i < sz; ++i) { 2324 mlir::Value nxtOpnd = coors[i]; 2325 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2326 if (fir::sequenceWithNonConstantShape(arrTy)) 2327 return false; 2328 i += arrTy.getDimension() - 1; 2329 type = arrTy.getEleTy(); 2330 } else if (auto strTy = type.dyn_cast<fir::RecordType>()) { 2331 type = strTy.getType(getFieldNumber(strTy, nxtOpnd)); 2332 } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) { 2333 type = strTy.getType(getIntValue(nxtOpnd)); 2334 } else { 2335 return true; 2336 } 2337 } 2338 return true; 2339 } 2340 2341 private: 2342 mlir::LogicalResult 2343 doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands, 2344 mlir::Location loc, 2345 mlir::ConversionPatternRewriter &rewriter) const { 2346 mlir::Type boxObjTy = coor.getBaseType(); 2347 assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`"); 2348 2349 mlir::Value boxBaseAddr = operands[0]; 2350 2351 // 1. SPECIAL CASE (uses `fir.len_param_index`): 2352 // %box = ... : !fir.box<!fir.type<derived{len1:i32}>> 2353 // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}> 2354 // %addr = coordinate_of %box, %lenp 2355 if (coor.getNumOperands() == 2) { 2356 mlir::Operation *coordinateDef = 2357 (*coor.getCoor().begin()).getDefiningOp(); 2358 if (mlir::isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef)) 2359 TODO(loc, 2360 "fir.coordinate_of - fir.len_param_index is not supported yet"); 2361 } 2362 2363 // 2. GENERAL CASE: 2364 // 2.1. (`fir.array`) 2365 // %box = ... : !fix.box<!fir.array<?xU>> 2366 // %idx = ... : index 2367 // %resultAddr = coordinate_of %box, %idx : !fir.ref<U> 2368 // 2.2 (`fir.derived`) 2369 // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>> 2370 // %idx = ... : i32 2371 // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32> 2372 // 2.3 (`fir.derived` inside `fir.array`) 2373 // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32, 2374 // field_2:f32}>>> %idx1 = ... : index %idx2 = ... : i32 %resultAddr = 2375 // coordinate_of %box, %idx1, %idx2 : !fir.ref<f32> 2376 // 2.4. TODO: Either document or disable any other case that the following 2377 // implementation might convert. 2378 mlir::LLVM::ConstantOp c0 = 2379 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2380 mlir::Value resultAddr = 2381 loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()), 2382 boxBaseAddr, rewriter); 2383 // Component Type 2384 auto cpnTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy); 2385 mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext()); 2386 2387 for (unsigned i = 1, last = operands.size(); i < last; ++i) { 2388 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2389 if (i != 1) 2390 TODO(loc, "fir.array nested inside other array and/or derived type"); 2391 // Applies byte strides from the box. Ignore lower bound from box 2392 // since fir.coordinate_of indexes are zero based. Lowering takes care 2393 // of lower bound aspects. This both accounts for dynamically sized 2394 // types and non contiguous arrays. 2395 auto idxTy = lowerTy().indexType(); 2396 mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0); 2397 for (unsigned index = i, lastIndex = i + arrTy.getDimension(); 2398 index < lastIndex; ++index) { 2399 mlir::Value stride = 2400 loadStrideFromBox(loc, operands[0], index - i, rewriter); 2401 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, 2402 operands[index], stride); 2403 off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off); 2404 } 2405 auto voidPtrBase = 2406 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr); 2407 llvm::SmallVector<mlir::Value> args = {off}; 2408 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, 2409 voidPtrBase, args); 2410 i += arrTy.getDimension() - 1; 2411 cpnTy = arrTy.getEleTy(); 2412 } else if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) { 2413 auto recRefTy = 2414 mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy)); 2415 mlir::Value nxtOpnd = operands[i]; 2416 auto memObj = 2417 rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr); 2418 llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd}; 2419 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2420 auto llvmCurrentObjTy = lowerTy().convertType(cpnTy); 2421 auto gep = rewriter.create<mlir::LLVM::GEPOp>( 2422 loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj, 2423 args); 2424 resultAddr = 2425 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep); 2426 } else { 2427 fir::emitFatalError(loc, "unexpected type in coordinate_of"); 2428 } 2429 } 2430 2431 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr); 2432 return mlir::success(); 2433 } 2434 2435 mlir::LogicalResult 2436 doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty, 2437 mlir::ValueRange operands, mlir::Location loc, 2438 mlir::ConversionPatternRewriter &rewriter) const { 2439 mlir::Type baseObjectTy = coor.getBaseType(); 2440 2441 // Component Type 2442 mlir::Type cpnTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2443 bool hasSubdimension = hasSubDimensions(cpnTy); 2444 bool columnIsDeferred = !hasSubdimension; 2445 2446 if (!supportedCoordinate(cpnTy, operands.drop_front(1))) 2447 TODO(loc, "unsupported combination of coordinate operands"); 2448 2449 const bool hasKnownShape = 2450 arraysHaveKnownShape(cpnTy, operands.drop_front(1)); 2451 2452 // If only the column is `?`, then we can simply place the column value in 2453 // the 0-th GEP position. 2454 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2455 if (!hasKnownShape) { 2456 const unsigned sz = arrTy.getDimension(); 2457 if (arraysHaveKnownShape(arrTy.getEleTy(), 2458 operands.drop_front(1 + sz))) { 2459 fir::SequenceType::ShapeRef shape = arrTy.getShape(); 2460 bool allConst = true; 2461 for (unsigned i = 0; i < sz - 1; ++i) { 2462 if (shape[i] < 0) { 2463 allConst = false; 2464 break; 2465 } 2466 } 2467 if (allConst) 2468 columnIsDeferred = true; 2469 } 2470 } 2471 } 2472 2473 if (fir::hasDynamicSize(fir::unwrapSequenceType(cpnTy))) 2474 return mlir::emitError( 2475 loc, "fir.coordinate_of with a dynamic element size is unsupported"); 2476 2477 if (hasKnownShape || columnIsDeferred) { 2478 llvm::SmallVector<mlir::Value> offs; 2479 if (hasKnownShape && hasSubdimension) { 2480 mlir::LLVM::ConstantOp c0 = 2481 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2482 offs.push_back(c0); 2483 } 2484 llvm::Optional<int> dims; 2485 llvm::SmallVector<mlir::Value> arrIdx; 2486 for (std::size_t i = 1, sz = operands.size(); i < sz; ++i) { 2487 mlir::Value nxtOpnd = operands[i]; 2488 2489 if (!cpnTy) 2490 return mlir::emitError(loc, "invalid coordinate/check failed"); 2491 2492 // check if the i-th coordinate relates to an array 2493 if (dims) { 2494 arrIdx.push_back(nxtOpnd); 2495 int dimsLeft = *dims; 2496 if (dimsLeft > 1) { 2497 dims = dimsLeft - 1; 2498 continue; 2499 } 2500 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2501 // append array range in reverse (FIR arrays are column-major) 2502 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2503 arrIdx.clear(); 2504 dims.reset(); 2505 continue; 2506 } 2507 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2508 int d = arrTy.getDimension() - 1; 2509 if (d > 0) { 2510 dims = d; 2511 arrIdx.push_back(nxtOpnd); 2512 continue; 2513 } 2514 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2515 offs.push_back(nxtOpnd); 2516 continue; 2517 } 2518 2519 // check if the i-th coordinate relates to a field 2520 if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) 2521 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2522 else if (auto tupTy = cpnTy.dyn_cast<mlir::TupleType>()) 2523 cpnTy = tupTy.getType(getIntValue(nxtOpnd)); 2524 else 2525 cpnTy = nullptr; 2526 2527 offs.push_back(nxtOpnd); 2528 } 2529 if (dims) 2530 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2531 mlir::Value base = operands[0]; 2532 mlir::Value retval = genGEP(loc, ty, rewriter, base, offs); 2533 rewriter.replaceOp(coor, retval); 2534 return mlir::success(); 2535 } 2536 2537 return mlir::emitError( 2538 loc, "fir.coordinate_of base operand has unsupported type"); 2539 } 2540 }; 2541 2542 /// Convert `fir.field_index`. The conversion depends on whether the size of 2543 /// the record is static or dynamic. 2544 struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> { 2545 using FIROpConversion::FIROpConversion; 2546 2547 // NB: most field references should be resolved by this point 2548 mlir::LogicalResult 2549 matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor, 2550 mlir::ConversionPatternRewriter &rewriter) const override { 2551 auto recTy = field.getOnType().cast<fir::RecordType>(); 2552 unsigned index = recTy.getFieldIndex(field.getFieldId()); 2553 2554 if (!fir::hasDynamicSize(recTy)) { 2555 // Derived type has compile-time constant layout. Return index of the 2556 // component type in the parent type (to be used in GEP). 2557 rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset( 2558 field.getLoc(), rewriter, index)}); 2559 return mlir::success(); 2560 } 2561 2562 // Derived type has compile-time constant layout. Call the compiler 2563 // generated function to determine the byte offset of the field at runtime. 2564 // This returns a non-constant. 2565 mlir::FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get( 2566 field.getContext(), getOffsetMethodName(recTy, field.getFieldId())); 2567 mlir::NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr); 2568 mlir::NamedAttribute fieldAttr = rewriter.getNamedAttr( 2569 "field", mlir::IntegerAttr::get(lowerTy().indexType(), index)); 2570 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 2571 field, lowerTy().offsetType(), adaptor.getOperands(), 2572 llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr}); 2573 return mlir::success(); 2574 } 2575 2576 // Re-Construct the name of the compiler generated method that calculates the 2577 // offset 2578 inline static std::string getOffsetMethodName(fir::RecordType recTy, 2579 llvm::StringRef field) { 2580 return recTy.getName().str() + "P." + field.str() + ".offset"; 2581 } 2582 }; 2583 2584 /// Convert `fir.end` 2585 struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> { 2586 using FIROpConversion::FIROpConversion; 2587 2588 mlir::LogicalResult 2589 matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor, 2590 mlir::ConversionPatternRewriter &rewriter) const override { 2591 TODO(firEnd.getLoc(), "fir.end codegen"); 2592 return mlir::failure(); 2593 } 2594 }; 2595 2596 /// Lower `fir.gentypedesc` to a global constant. 2597 struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> { 2598 using FIROpConversion::FIROpConversion; 2599 2600 mlir::LogicalResult 2601 matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor, 2602 mlir::ConversionPatternRewriter &rewriter) const override { 2603 TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen"); 2604 return mlir::failure(); 2605 } 2606 }; 2607 2608 /// Lower `fir.has_value` operation to `llvm.return` operation. 2609 struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> { 2610 using FIROpConversion::FIROpConversion; 2611 2612 mlir::LogicalResult 2613 matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor, 2614 mlir::ConversionPatternRewriter &rewriter) const override { 2615 rewriter.replaceOpWithNewOp<mlir::LLVM::ReturnOp>(op, 2616 adaptor.getOperands()); 2617 return mlir::success(); 2618 } 2619 }; 2620 2621 /// Lower `fir.global` operation to `llvm.global` operation. 2622 /// `fir.insert_on_range` operations are replaced with constant dense attribute 2623 /// if they are applied on the full range. 2624 struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> { 2625 using FIROpConversion::FIROpConversion; 2626 2627 mlir::LogicalResult 2628 matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor, 2629 mlir::ConversionPatternRewriter &rewriter) const override { 2630 auto tyAttr = convertType(global.getType()); 2631 if (global.getType().isa<fir::BoxType>()) 2632 tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType(); 2633 auto loc = global.getLoc(); 2634 mlir::Attribute initAttr; 2635 if (global.getInitVal()) 2636 initAttr = global.getInitVal().getValue(); 2637 auto linkage = convertLinkage(global.getLinkName()); 2638 auto isConst = global.getConstant().has_value(); 2639 auto g = rewriter.create<mlir::LLVM::GlobalOp>( 2640 loc, tyAttr, isConst, linkage, global.getSymName(), initAttr); 2641 auto &gr = g.getInitializerRegion(); 2642 rewriter.inlineRegionBefore(global.getRegion(), gr, gr.end()); 2643 if (!gr.empty()) { 2644 // Replace insert_on_range with a constant dense attribute if the 2645 // initialization is on the full range. 2646 auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>(); 2647 for (auto insertOp : insertOnRangeOps) { 2648 if (isFullRange(insertOp.getCoor(), insertOp.getType())) { 2649 auto seqTyAttr = convertType(insertOp.getType()); 2650 auto *op = insertOp.getVal().getDefiningOp(); 2651 auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op); 2652 if (!constant) { 2653 auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op); 2654 if (!convertOp) 2655 continue; 2656 constant = mlir::cast<mlir::arith::ConstantOp>( 2657 convertOp.getValue().getDefiningOp()); 2658 } 2659 mlir::Type vecType = mlir::VectorType::get( 2660 insertOp.getType().getShape(), constant.getType()); 2661 auto denseAttr = mlir::DenseElementsAttr::get( 2662 vecType.cast<mlir::ShapedType>(), constant.getValue()); 2663 rewriter.setInsertionPointAfter(insertOp); 2664 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>( 2665 insertOp, seqTyAttr, denseAttr); 2666 } 2667 } 2668 } 2669 rewriter.eraseOp(global); 2670 return mlir::success(); 2671 } 2672 2673 bool isFullRange(mlir::DenseIntElementsAttr indexes, 2674 fir::SequenceType seqTy) const { 2675 auto extents = seqTy.getShape(); 2676 if (indexes.size() / 2 != static_cast<int64_t>(extents.size())) 2677 return false; 2678 auto cur_index = indexes.value_begin<int64_t>(); 2679 for (unsigned i = 0; i < indexes.size(); i += 2) { 2680 if (*(cur_index++) != 0) 2681 return false; 2682 if (*(cur_index++) != extents[i / 2] - 1) 2683 return false; 2684 } 2685 return true; 2686 } 2687 2688 // TODO: String comparaison should be avoided. Replace linkName with an 2689 // enumeration. 2690 mlir::LLVM::Linkage 2691 convertLinkage(llvm::Optional<llvm::StringRef> optLinkage) const { 2692 if (optLinkage) { 2693 auto name = optLinkage.getValue(); 2694 if (name == "internal") 2695 return mlir::LLVM::Linkage::Internal; 2696 if (name == "linkonce") 2697 return mlir::LLVM::Linkage::Linkonce; 2698 if (name == "linkonce_odr") 2699 return mlir::LLVM::Linkage::LinkonceODR; 2700 if (name == "common") 2701 return mlir::LLVM::Linkage::Common; 2702 if (name == "weak") 2703 return mlir::LLVM::Linkage::Weak; 2704 } 2705 return mlir::LLVM::Linkage::External; 2706 } 2707 }; 2708 2709 /// `fir.load` --> `llvm.load` 2710 struct LoadOpConversion : public FIROpConversion<fir::LoadOp> { 2711 using FIROpConversion::FIROpConversion; 2712 2713 mlir::LogicalResult 2714 matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor, 2715 mlir::ConversionPatternRewriter &rewriter) const override { 2716 // fir.box is a special case because it is considered as an ssa values in 2717 // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box> 2718 // and fir.box end up being the same llvm types and loading a 2719 // fir.ref<fir.box> is actually a no op in LLVM. 2720 if (load.getType().isa<fir::BoxType>()) { 2721 rewriter.replaceOp(load, adaptor.getOperands()[0]); 2722 } else { 2723 rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>( 2724 load, convertType(load.getType()), adaptor.getOperands(), 2725 load->getAttrs()); 2726 } 2727 return mlir::success(); 2728 } 2729 }; 2730 2731 /// Lower `fir.no_reassoc` to LLVM IR dialect. 2732 /// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast 2733 /// math flags? 2734 struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> { 2735 using FIROpConversion::FIROpConversion; 2736 2737 mlir::LogicalResult 2738 matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor, 2739 mlir::ConversionPatternRewriter &rewriter) const override { 2740 rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]); 2741 return mlir::success(); 2742 } 2743 }; 2744 2745 static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest, 2746 llvm::Optional<mlir::ValueRange> destOps, 2747 mlir::ConversionPatternRewriter &rewriter, 2748 mlir::Block *newBlock) { 2749 if (destOps) 2750 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, destOps.getValue(), 2751 newBlock, mlir::ValueRange()); 2752 else 2753 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock); 2754 } 2755 2756 template <typename A, typename B> 2757 static void genBrOp(A caseOp, mlir::Block *dest, llvm::Optional<B> destOps, 2758 mlir::ConversionPatternRewriter &rewriter) { 2759 if (destOps) 2760 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, destOps.getValue(), 2761 dest); 2762 else 2763 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest); 2764 } 2765 2766 static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp, 2767 mlir::Block *dest, 2768 llvm::Optional<mlir::ValueRange> destOps, 2769 mlir::ConversionPatternRewriter &rewriter) { 2770 auto *thisBlock = rewriter.getInsertionBlock(); 2771 auto *newBlock = createBlock(rewriter, dest); 2772 rewriter.setInsertionPointToEnd(thisBlock); 2773 genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock); 2774 rewriter.setInsertionPointToEnd(newBlock); 2775 } 2776 2777 /// Conversion of `fir.select_case` 2778 /// 2779 /// The `fir.select_case` operation is converted to a if-then-else ladder. 2780 /// Depending on the case condition type, one or several comparison and 2781 /// conditional branching can be generated. 2782 /// 2783 /// A a point value case such as `case(4)`, a lower bound case such as 2784 /// `case(5:)` or an upper bound case such as `case(:3)` are converted to a 2785 /// simple comparison between the selector value and the constant value in the 2786 /// case. The block associated with the case condition is then executed if 2787 /// the comparison succeed otherwise it branch to the next block with the 2788 /// comparison for the the next case conditon. 2789 /// 2790 /// A closed interval case condition such as `case(7:10)` is converted with a 2791 /// first comparison and conditional branching for the lower bound. If 2792 /// successful, it branch to a second block with the comparison for the 2793 /// upper bound in the same case condition. 2794 /// 2795 /// TODO: lowering of CHARACTER type cases is not handled yet. 2796 struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> { 2797 using FIROpConversion::FIROpConversion; 2798 2799 mlir::LogicalResult 2800 matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor, 2801 mlir::ConversionPatternRewriter &rewriter) const override { 2802 unsigned conds = caseOp.getNumConditions(); 2803 llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue(); 2804 // Type can be CHARACTER, INTEGER, or LOGICAL (C1145) 2805 auto ty = caseOp.getSelector().getType(); 2806 if (ty.isa<fir::CharacterType>()) { 2807 TODO(caseOp.getLoc(), "fir.select_case codegen with character type"); 2808 return mlir::failure(); 2809 } 2810 mlir::Value selector = caseOp.getSelector(adaptor.getOperands()); 2811 auto loc = caseOp.getLoc(); 2812 for (unsigned t = 0; t != conds; ++t) { 2813 mlir::Block *dest = caseOp.getSuccessor(t); 2814 llvm::Optional<mlir::ValueRange> destOps = 2815 caseOp.getSuccessorOperands(adaptor.getOperands(), t); 2816 llvm::Optional<mlir::ValueRange> cmpOps = 2817 *caseOp.getCompareOperands(adaptor.getOperands(), t); 2818 mlir::Value caseArg = *(cmpOps.value().begin()); 2819 mlir::Attribute attr = cases[t]; 2820 if (attr.isa<fir::PointIntervalAttr>()) { 2821 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2822 loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg); 2823 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2824 continue; 2825 } 2826 if (attr.isa<fir::LowerBoundAttr>()) { 2827 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2828 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2829 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2830 continue; 2831 } 2832 if (attr.isa<fir::UpperBoundAttr>()) { 2833 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2834 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg); 2835 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2836 continue; 2837 } 2838 if (attr.isa<fir::ClosedIntervalAttr>()) { 2839 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2840 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2841 auto *thisBlock = rewriter.getInsertionBlock(); 2842 auto *newBlock1 = createBlock(rewriter, dest); 2843 auto *newBlock2 = createBlock(rewriter, dest); 2844 rewriter.setInsertionPointToEnd(thisBlock); 2845 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2); 2846 rewriter.setInsertionPointToEnd(newBlock1); 2847 mlir::Value caseArg0 = *(cmpOps.value().begin() + 1); 2848 auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>( 2849 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0); 2850 genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2); 2851 rewriter.setInsertionPointToEnd(newBlock2); 2852 continue; 2853 } 2854 assert(attr.isa<mlir::UnitAttr>()); 2855 assert((t + 1 == conds) && "unit must be last"); 2856 genBrOp(caseOp, dest, destOps, rewriter); 2857 } 2858 return mlir::success(); 2859 } 2860 }; 2861 2862 template <typename OP> 2863 static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select, 2864 typename OP::Adaptor adaptor, 2865 mlir::ConversionPatternRewriter &rewriter) { 2866 unsigned conds = select.getNumConditions(); 2867 auto cases = select.getCases().getValue(); 2868 mlir::Value selector = adaptor.getSelector(); 2869 auto loc = select.getLoc(); 2870 assert(conds > 0 && "select must have cases"); 2871 2872 llvm::SmallVector<mlir::Block *> destinations; 2873 llvm::SmallVector<mlir::ValueRange> destinationsOperands; 2874 mlir::Block *defaultDestination; 2875 mlir::ValueRange defaultOperands; 2876 llvm::SmallVector<int32_t> caseValues; 2877 2878 for (unsigned t = 0; t != conds; ++t) { 2879 mlir::Block *dest = select.getSuccessor(t); 2880 auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t); 2881 const mlir::Attribute &attr = cases[t]; 2882 if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) { 2883 destinations.push_back(dest); 2884 destinationsOperands.push_back(destOps ? *destOps : mlir::ValueRange{}); 2885 caseValues.push_back(intAttr.getInt()); 2886 continue; 2887 } 2888 assert(attr.template dyn_cast_or_null<mlir::UnitAttr>()); 2889 assert((t + 1 == conds) && "unit must be last"); 2890 defaultDestination = dest; 2891 defaultOperands = destOps ? *destOps : mlir::ValueRange{}; 2892 } 2893 2894 // LLVM::SwitchOp takes a i32 type for the selector. 2895 if (select.getSelector().getType() != rewriter.getI32Type()) 2896 selector = rewriter.create<mlir::LLVM::TruncOp>(loc, rewriter.getI32Type(), 2897 selector); 2898 2899 rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>( 2900 select, selector, 2901 /*defaultDestination=*/defaultDestination, 2902 /*defaultOperands=*/defaultOperands, 2903 /*caseValues=*/caseValues, 2904 /*caseDestinations=*/destinations, 2905 /*caseOperands=*/destinationsOperands, 2906 /*branchWeights=*/llvm::ArrayRef<std::int32_t>()); 2907 } 2908 2909 /// conversion of fir::SelectOp to an if-then-else ladder 2910 struct SelectOpConversion : public FIROpConversion<fir::SelectOp> { 2911 using FIROpConversion::FIROpConversion; 2912 2913 mlir::LogicalResult 2914 matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor, 2915 mlir::ConversionPatternRewriter &rewriter) const override { 2916 selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter); 2917 return mlir::success(); 2918 } 2919 }; 2920 2921 /// conversion of fir::SelectRankOp to an if-then-else ladder 2922 struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> { 2923 using FIROpConversion::FIROpConversion; 2924 2925 mlir::LogicalResult 2926 matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor, 2927 mlir::ConversionPatternRewriter &rewriter) const override { 2928 selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter); 2929 return mlir::success(); 2930 } 2931 }; 2932 2933 /// Lower `fir.select_type` to LLVM IR dialect. 2934 struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> { 2935 using FIROpConversion::FIROpConversion; 2936 2937 mlir::LogicalResult 2938 matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor, 2939 mlir::ConversionPatternRewriter &rewriter) const override { 2940 mlir::emitError(select.getLoc(), 2941 "fir.select_type should have already been converted"); 2942 return mlir::failure(); 2943 } 2944 }; 2945 2946 /// `fir.store` --> `llvm.store` 2947 struct StoreOpConversion : public FIROpConversion<fir::StoreOp> { 2948 using FIROpConversion::FIROpConversion; 2949 2950 mlir::LogicalResult 2951 matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor, 2952 mlir::ConversionPatternRewriter &rewriter) const override { 2953 if (store.getValue().getType().isa<fir::BoxType>()) { 2954 // fir.box value is actually in memory, load it first before storing it. 2955 mlir::Location loc = store.getLoc(); 2956 mlir::Type boxPtrTy = adaptor.getOperands()[0].getType(); 2957 auto val = rewriter.create<mlir::LLVM::LoadOp>( 2958 loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(), 2959 adaptor.getOperands()[0]); 2960 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2961 store, val, adaptor.getOperands()[1]); 2962 } else { 2963 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2964 store, adaptor.getOperands()[0], adaptor.getOperands()[1]); 2965 } 2966 return mlir::success(); 2967 } 2968 }; 2969 2970 namespace { 2971 2972 /// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for 2973 /// the character buffer and one for the buffer length. 2974 struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> { 2975 using FIROpConversion::FIROpConversion; 2976 2977 mlir::LogicalResult 2978 matchAndRewrite(fir::UnboxCharOp unboxchar, OpAdaptor adaptor, 2979 mlir::ConversionPatternRewriter &rewriter) const override { 2980 auto *ctx = unboxchar.getContext(); 2981 2982 mlir::Type lenTy = convertType(unboxchar.getType(1)); 2983 mlir::Value tuple = adaptor.getOperands()[0]; 2984 mlir::Type tupleTy = tuple.getType(); 2985 2986 mlir::Location loc = unboxchar.getLoc(); 2987 mlir::Value ptrToBuffer = 2988 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 0); 2989 2990 mlir::LLVM::ExtractValueOp len = 2991 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 1); 2992 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, len); 2993 2994 rewriter.replaceOp(unboxchar, 2995 llvm::ArrayRef<mlir::Value>{ptrToBuffer, lenAfterCast}); 2996 return mlir::success(); 2997 } 2998 }; 2999 3000 /// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its 3001 /// components. 3002 /// TODO: Part of supporting Fortran 2003 procedure pointers. 3003 struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> { 3004 using FIROpConversion::FIROpConversion; 3005 3006 mlir::LogicalResult 3007 matchAndRewrite(fir::UnboxProcOp unboxproc, OpAdaptor adaptor, 3008 mlir::ConversionPatternRewriter &rewriter) const override { 3009 TODO(unboxproc.getLoc(), "fir.unboxproc codegen"); 3010 return mlir::failure(); 3011 } 3012 }; 3013 3014 /// convert to LLVM IR dialect `undef` 3015 struct UndefOpConversion : public FIROpConversion<fir::UndefOp> { 3016 using FIROpConversion::FIROpConversion; 3017 3018 mlir::LogicalResult 3019 matchAndRewrite(fir::UndefOp undef, OpAdaptor, 3020 mlir::ConversionPatternRewriter &rewriter) const override { 3021 rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>( 3022 undef, convertType(undef.getType())); 3023 return mlir::success(); 3024 } 3025 }; 3026 3027 struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> { 3028 using FIROpConversion::FIROpConversion; 3029 3030 mlir::LogicalResult 3031 matchAndRewrite(fir::ZeroOp zero, OpAdaptor, 3032 mlir::ConversionPatternRewriter &rewriter) const override { 3033 mlir::Type ty = convertType(zero.getType()); 3034 if (ty.isa<mlir::LLVM::LLVMPointerType>()) { 3035 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty); 3036 } else if (ty.isa<mlir::IntegerType>()) { 3037 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3038 zero, ty, mlir::IntegerAttr::get(zero.getType(), 0)); 3039 } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) { 3040 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3041 zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0)); 3042 } else { 3043 // TODO: create ConstantAggregateZero for FIR aggregate/array types. 3044 return rewriter.notifyMatchFailure( 3045 zero, 3046 "conversion of fir.zero with aggregate type not implemented yet"); 3047 } 3048 return mlir::success(); 3049 } 3050 }; 3051 3052 /// `fir.unreachable` --> `llvm.unreachable` 3053 struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> { 3054 using FIROpConversion::FIROpConversion; 3055 3056 mlir::LogicalResult 3057 matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor, 3058 mlir::ConversionPatternRewriter &rewriter) const override { 3059 rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach); 3060 return mlir::success(); 3061 } 3062 }; 3063 3064 /// `fir.is_present` --> 3065 /// ``` 3066 /// %0 = llvm.mlir.constant(0 : i64) 3067 /// %1 = llvm.ptrtoint %0 3068 /// %2 = llvm.icmp "ne" %1, %0 : i64 3069 /// ``` 3070 struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> { 3071 using FIROpConversion::FIROpConversion; 3072 3073 mlir::LogicalResult 3074 matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor, 3075 mlir::ConversionPatternRewriter &rewriter) const override { 3076 mlir::Type idxTy = lowerTy().indexType(); 3077 mlir::Location loc = isPresent.getLoc(); 3078 auto ptr = adaptor.getOperands()[0]; 3079 3080 if (isPresent.getVal().getType().isa<fir::BoxCharType>()) { 3081 auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>(); 3082 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3083 3084 mlir::Type ty = structTy.getBody()[0]; 3085 mlir::MLIRContext *ctx = isPresent.getContext(); 3086 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3087 ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0); 3088 } 3089 mlir::LLVM::ConstantOp c0 = 3090 genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0); 3091 auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr); 3092 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 3093 isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0); 3094 3095 return mlir::success(); 3096 } 3097 }; 3098 3099 /// Create value signaling an absent optional argument in a call, e.g. 3100 /// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>` 3101 struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> { 3102 using FIROpConversion::FIROpConversion; 3103 3104 mlir::LogicalResult 3105 matchAndRewrite(fir::AbsentOp absent, OpAdaptor, 3106 mlir::ConversionPatternRewriter &rewriter) const override { 3107 mlir::Type ty = convertType(absent.getType()); 3108 mlir::Location loc = absent.getLoc(); 3109 3110 if (absent.getType().isa<fir::BoxCharType>()) { 3111 auto structTy = ty.cast<mlir::LLVM::LLVMStructType>(); 3112 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3113 auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3114 auto nullField = 3115 rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]); 3116 mlir::MLIRContext *ctx = absent.getContext(); 3117 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3118 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 3119 absent, ty, undefStruct, nullField, c0); 3120 } else { 3121 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty); 3122 } 3123 return mlir::success(); 3124 } 3125 }; 3126 3127 // 3128 // Primitive operations on Complex types 3129 // 3130 3131 /// Generate inline code for complex addition/subtraction 3132 template <typename LLVMOP, typename OPTY> 3133 static mlir::LLVM::InsertValueOp 3134 complexSum(OPTY sumop, mlir::ValueRange opnds, 3135 mlir::ConversionPatternRewriter &rewriter, 3136 fir::LLVMTypeConverter &lowering) { 3137 mlir::Value a = opnds[0]; 3138 mlir::Value b = opnds[1]; 3139 auto loc = sumop.getLoc(); 3140 auto ctx = sumop.getContext(); 3141 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3142 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3143 mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType())); 3144 mlir::Type ty = lowering.convertType(sumop.getType()); 3145 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3146 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3147 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3148 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3149 auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1); 3150 auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1); 3151 auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3152 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0); 3153 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1); 3154 } 3155 } // namespace 3156 3157 namespace { 3158 struct AddcOpConversion : public FIROpConversion<fir::AddcOp> { 3159 using FIROpConversion::FIROpConversion; 3160 3161 mlir::LogicalResult 3162 matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor, 3163 mlir::ConversionPatternRewriter &rewriter) const override { 3164 // given: (x + iy) + (x' + iy') 3165 // result: (x + x') + i(y + y') 3166 auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(), 3167 rewriter, lowerTy()); 3168 rewriter.replaceOp(addc, r.getResult()); 3169 return mlir::success(); 3170 } 3171 }; 3172 3173 struct SubcOpConversion : public FIROpConversion<fir::SubcOp> { 3174 using FIROpConversion::FIROpConversion; 3175 3176 mlir::LogicalResult 3177 matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor, 3178 mlir::ConversionPatternRewriter &rewriter) const override { 3179 // given: (x + iy) - (x' + iy') 3180 // result: (x - x') + i(y - y') 3181 auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(), 3182 rewriter, lowerTy()); 3183 rewriter.replaceOp(subc, r.getResult()); 3184 return mlir::success(); 3185 } 3186 }; 3187 3188 /// Inlined complex multiply 3189 struct MulcOpConversion : public FIROpConversion<fir::MulcOp> { 3190 using FIROpConversion::FIROpConversion; 3191 3192 mlir::LogicalResult 3193 matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor, 3194 mlir::ConversionPatternRewriter &rewriter) const override { 3195 // TODO: Can we use a call to __muldc3 ? 3196 // given: (x + iy) * (x' + iy') 3197 // result: (xx'-yy')+i(xy'+yx') 3198 mlir::Value a = adaptor.getOperands()[0]; 3199 mlir::Value b = adaptor.getOperands()[1]; 3200 auto loc = mulc.getLoc(); 3201 auto *ctx = mulc.getContext(); 3202 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3203 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3204 mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType())); 3205 mlir::Type ty = convertType(mulc.getType()); 3206 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3207 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3208 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3209 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3210 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3211 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3212 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3213 auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx); 3214 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3215 auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy); 3216 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3217 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3218 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3219 rewriter.replaceOp(mulc, r0.getResult()); 3220 return mlir::success(); 3221 } 3222 }; 3223 3224 /// Inlined complex division 3225 struct DivcOpConversion : public FIROpConversion<fir::DivcOp> { 3226 using FIROpConversion::FIROpConversion; 3227 3228 mlir::LogicalResult 3229 matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor, 3230 mlir::ConversionPatternRewriter &rewriter) const override { 3231 // TODO: Can we use a call to __divdc3 instead? 3232 // Just generate inline code for now. 3233 // given: (x + iy) / (x' + iy') 3234 // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y' 3235 mlir::Value a = adaptor.getOperands()[0]; 3236 mlir::Value b = adaptor.getOperands()[1]; 3237 auto loc = divc.getLoc(); 3238 auto *ctx = divc.getContext(); 3239 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3240 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3241 mlir::Type eleTy = convertType(getComplexEleTy(divc.getType())); 3242 mlir::Type ty = convertType(divc.getType()); 3243 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3244 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3245 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3246 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3247 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3248 auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1); 3249 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3250 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3251 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3252 auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1); 3253 auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1); 3254 auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy); 3255 auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy); 3256 auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d); 3257 auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d); 3258 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3259 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3260 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3261 rewriter.replaceOp(divc, r0.getResult()); 3262 return mlir::success(); 3263 } 3264 }; 3265 3266 /// Inlined complex negation 3267 struct NegcOpConversion : public FIROpConversion<fir::NegcOp> { 3268 using FIROpConversion::FIROpConversion; 3269 3270 mlir::LogicalResult 3271 matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor, 3272 mlir::ConversionPatternRewriter &rewriter) const override { 3273 // given: -(x + iy) 3274 // result: -x - iy 3275 auto *ctxt = neg.getContext(); 3276 auto eleTy = convertType(getComplexEleTy(neg.getType())); 3277 auto ty = convertType(neg.getType()); 3278 auto loc = neg.getLoc(); 3279 mlir::Value o0 = adaptor.getOperands()[0]; 3280 auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 3281 auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 3282 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0); 3283 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1); 3284 auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp); 3285 auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip); 3286 auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0); 3287 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1); 3288 return mlir::success(); 3289 } 3290 }; 3291 3292 /// Conversion pattern for operation that must be dead. The information in these 3293 /// operations is used by other operation. At this point they should not have 3294 /// anymore uses. 3295 /// These operations are normally dead after the pre-codegen pass. 3296 template <typename FromOp> 3297 struct MustBeDeadConversion : public FIROpConversion<FromOp> { 3298 explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering, 3299 const fir::FIRToLLVMPassOptions &options) 3300 : FIROpConversion<FromOp>(lowering, options) {} 3301 using OpAdaptor = typename FromOp::Adaptor; 3302 3303 mlir::LogicalResult 3304 matchAndRewrite(FromOp op, OpAdaptor adaptor, 3305 mlir::ConversionPatternRewriter &rewriter) const final { 3306 if (!op->getUses().empty()) 3307 return rewriter.notifyMatchFailure(op, "op must be dead"); 3308 rewriter.eraseOp(op); 3309 return mlir::success(); 3310 } 3311 }; 3312 3313 struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> { 3314 using MustBeDeadConversion::MustBeDeadConversion; 3315 }; 3316 3317 struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> { 3318 using MustBeDeadConversion::MustBeDeadConversion; 3319 }; 3320 3321 struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> { 3322 using MustBeDeadConversion::MustBeDeadConversion; 3323 }; 3324 3325 struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> { 3326 using MustBeDeadConversion::MustBeDeadConversion; 3327 }; 3328 3329 } // namespace 3330 3331 namespace { 3332 /// Convert FIR dialect to LLVM dialect 3333 /// 3334 /// This pass lowers all FIR dialect operations to LLVM IR dialect. An 3335 /// MLIR pass is used to lower residual Std dialect to LLVM IR dialect. 3336 class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> { 3337 public: 3338 FIRToLLVMLowering() = default; 3339 FIRToLLVMLowering(fir::FIRToLLVMPassOptions options) : options{options} {} 3340 mlir::ModuleOp getModule() { return getOperation(); } 3341 3342 void runOnOperation() override final { 3343 auto mod = getModule(); 3344 if (!forcedTargetTriple.empty()) 3345 fir::setTargetTriple(mod, forcedTargetTriple); 3346 3347 auto *context = getModule().getContext(); 3348 fir::LLVMTypeConverter typeConverter{getModule()}; 3349 mlir::RewritePatternSet pattern(context); 3350 pattern.insert< 3351 AbsentOpConversion, AddcOpConversion, AddrOfOpConversion, 3352 AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion, 3353 BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion, 3354 BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion, 3355 BoxProcHostOpConversion, BoxRankOpConversion, BoxTypeDescOpConversion, 3356 CallOpConversion, CmpcOpConversion, ConstcOpConversion, 3357 ConvertOpConversion, CoordinateOpConversion, DispatchOpConversion, 3358 DispatchTableOpConversion, DTEntryOpConversion, DivcOpConversion, 3359 EmboxOpConversion, EmboxCharOpConversion, EmboxProcOpConversion, 3360 ExtractValueOpConversion, FieldIndexOpConversion, FirEndOpConversion, 3361 FreeMemOpConversion, GenTypeDescOpConversion, GlobalLenOpConversion, 3362 GlobalOpConversion, HasValueOpConversion, InsertOnRangeOpConversion, 3363 InsertValueOpConversion, IsPresentOpConversion, 3364 LenParamIndexOpConversion, LoadOpConversion, MulcOpConversion, 3365 NegcOpConversion, NoReassocOpConversion, SelectCaseOpConversion, 3366 SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion, 3367 ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion, 3368 SliceOpConversion, StoreOpConversion, StringLitOpConversion, 3369 SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion, 3370 UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion, 3371 XEmboxOpConversion, XReboxOpConversion, ZeroOpConversion>(typeConverter, 3372 options); 3373 mlir::populateFuncToLLVMConversionPatterns(typeConverter, pattern); 3374 mlir::populateOpenMPToLLVMConversionPatterns(typeConverter, pattern); 3375 mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter, 3376 pattern); 3377 mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter, 3378 pattern); 3379 // Convert math-like dialect operations, which can be produced 3380 // when late math lowering mode is used, into llvm dialect. 3381 mlir::populateMathToLLVMConversionPatterns(typeConverter, pattern); 3382 mlir::populateMathToLibmConversionPatterns(pattern, /*benefit=*/0); 3383 mlir::ConversionTarget target{*context}; 3384 target.addLegalDialect<mlir::LLVM::LLVMDialect>(); 3385 // The OpenMP dialect is legal for Operations without regions, for those 3386 // which contains regions it is legal if the region contains only the 3387 // LLVM dialect. Add OpenMP dialect as a legal dialect for conversion and 3388 // legalize conversion of OpenMP operations without regions. 3389 mlir::configureOpenMPToLLVMConversionLegality(target, typeConverter); 3390 target.addLegalDialect<mlir::omp::OpenMPDialect>(); 3391 3392 // required NOPs for applying a full conversion 3393 target.addLegalOp<mlir::ModuleOp>(); 3394 3395 // apply the patterns 3396 if (mlir::failed(mlir::applyFullConversion(getModule(), target, 3397 std::move(pattern)))) { 3398 signalPassFailure(); 3399 } 3400 } 3401 3402 private: 3403 fir::FIRToLLVMPassOptions options; 3404 }; 3405 3406 /// Lower from LLVM IR dialect to proper LLVM-IR and dump the module 3407 struct LLVMIRLoweringPass 3408 : public mlir::PassWrapper<LLVMIRLoweringPass, 3409 mlir::OperationPass<mlir::ModuleOp>> { 3410 MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(LLVMIRLoweringPass) 3411 3412 LLVMIRLoweringPass(llvm::raw_ostream &output, fir::LLVMIRLoweringPrinter p) 3413 : output{output}, printer{p} {} 3414 3415 mlir::ModuleOp getModule() { return getOperation(); } 3416 3417 void runOnOperation() override final { 3418 auto *ctx = getModule().getContext(); 3419 auto optName = getModule().getName(); 3420 llvm::LLVMContext llvmCtx; 3421 if (auto llvmModule = mlir::translateModuleToLLVMIR( 3422 getModule(), llvmCtx, optName ? *optName : "FIRModule")) { 3423 printer(*llvmModule, output); 3424 return; 3425 } 3426 3427 mlir::emitError(mlir::UnknownLoc::get(ctx), "could not emit LLVM-IR\n"); 3428 signalPassFailure(); 3429 } 3430 3431 private: 3432 llvm::raw_ostream &output; 3433 fir::LLVMIRLoweringPrinter printer; 3434 }; 3435 3436 } // namespace 3437 3438 std::unique_ptr<mlir::Pass> fir::createFIRToLLVMPass() { 3439 return std::make_unique<FIRToLLVMLowering>(); 3440 } 3441 3442 std::unique_ptr<mlir::Pass> 3443 fir::createFIRToLLVMPass(fir::FIRToLLVMPassOptions options) { 3444 return std::make_unique<FIRToLLVMLowering>(options); 3445 } 3446 3447 std::unique_ptr<mlir::Pass> 3448 fir::createLLVMDialectToLLVMPass(llvm::raw_ostream &output, 3449 fir::LLVMIRLoweringPrinter printer) { 3450 return std::make_unique<LLVMIRLoweringPass>(output, printer); 3451 } 3452