1 //===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/ 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "flang/Optimizer/CodeGen/CodeGen.h" 14 #include "CGOps.h" 15 #include "PassDetail.h" 16 #include "flang/ISO_Fortran_binding.h" 17 #include "flang/Optimizer/Dialect/FIRAttr.h" 18 #include "flang/Optimizer/Dialect/FIROps.h" 19 #include "flang/Optimizer/Support/InternalNames.h" 20 #include "flang/Optimizer/Support/TypeCode.h" 21 #include "flang/Semantics/runtime-type-info.h" 22 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" 23 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" 24 #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" 25 #include "mlir/Conversion/LLVMCommon/Pattern.h" 26 #include "mlir/Conversion/MathToLLVM/MathToLLVM.h" 27 #include "mlir/Conversion/MathToLibm/MathToLibm.h" 28 #include "mlir/Conversion/OpenMPToLLVM/ConvertOpenMPToLLVM.h" 29 #include "mlir/IR/BuiltinTypes.h" 30 #include "mlir/IR/Matchers.h" 31 #include "mlir/Pass/Pass.h" 32 #include "mlir/Target/LLVMIR/ModuleTranslation.h" 33 #include "llvm/ADT/ArrayRef.h" 34 35 #define DEBUG_TYPE "flang-codegen" 36 37 // fir::LLVMTypeConverter for converting to LLVM IR dialect types. 38 #include "TypeConverter.h" 39 40 // TODO: This should really be recovered from the specified target. 41 static constexpr unsigned defaultAlign = 8; 42 43 /// `fir.box` attribute values as defined for CFI_attribute_t in 44 /// flang/ISO_Fortran_binding.h. 45 static constexpr unsigned kAttrPointer = CFI_attribute_pointer; 46 static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable; 47 48 static inline mlir::Type getVoidPtrType(mlir::MLIRContext *context) { 49 return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8)); 50 } 51 52 static mlir::LLVM::ConstantOp 53 genConstantIndex(mlir::Location loc, mlir::Type ity, 54 mlir::ConversionPatternRewriter &rewriter, 55 std::int64_t offset) { 56 auto cattr = rewriter.getI64IntegerAttr(offset); 57 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 58 } 59 60 static mlir::Block *createBlock(mlir::ConversionPatternRewriter &rewriter, 61 mlir::Block *insertBefore) { 62 assert(insertBefore && "expected valid insertion block"); 63 return rewriter.createBlock(insertBefore->getParent(), 64 mlir::Region::iterator(insertBefore)); 65 } 66 67 namespace { 68 /// FIR conversion pattern template 69 template <typename FromOp> 70 class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> { 71 public: 72 explicit FIROpConversion(fir::LLVMTypeConverter &lowering, 73 const fir::FIRToLLVMPassOptions &options) 74 : mlir::ConvertOpToLLVMPattern<FromOp>(lowering), options(options) {} 75 76 protected: 77 mlir::Type convertType(mlir::Type ty) const { 78 return lowerTy().convertType(ty); 79 } 80 mlir::Type voidPtrTy() const { return getVoidPtrType(); } 81 82 mlir::Type getVoidPtrType() const { 83 return mlir::LLVM::LLVMPointerType::get( 84 mlir::IntegerType::get(&lowerTy().getContext(), 8)); 85 } 86 87 mlir::LLVM::ConstantOp 88 genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 89 int value) const { 90 mlir::Type i32Ty = rewriter.getI32Type(); 91 mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value); 92 return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr); 93 } 94 95 mlir::LLVM::ConstantOp 96 genConstantOffset(mlir::Location loc, 97 mlir::ConversionPatternRewriter &rewriter, 98 int offset) const { 99 mlir::Type ity = lowerTy().offsetType(); 100 mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset); 101 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 102 } 103 104 /// Perform an extension or truncation as needed on an integer value. Lowering 105 /// to the specific target may involve some sign-extending or truncation of 106 /// values, particularly to fit them from abstract box types to the 107 /// appropriate reified structures. 108 mlir::Value integerCast(mlir::Location loc, 109 mlir::ConversionPatternRewriter &rewriter, 110 mlir::Type ty, mlir::Value val) const { 111 auto valTy = val.getType(); 112 // If the value was not yet lowered, lower its type so that it can 113 // be used in getPrimitiveTypeSizeInBits. 114 if (!valTy.isa<mlir::IntegerType>()) 115 valTy = convertType(valTy); 116 auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 117 auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy); 118 if (toSize < fromSize) 119 return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val); 120 if (toSize > fromSize) 121 return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val); 122 return val; 123 } 124 125 /// Construct code sequence to extract the specifc value from a `fir.box`. 126 mlir::Value getValueFromBox(mlir::Location loc, mlir::Value box, 127 mlir::Type resultTy, 128 mlir::ConversionPatternRewriter &rewriter, 129 unsigned boxValue) const { 130 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 131 mlir::LLVM::ConstantOp cValuePos = 132 genConstantOffset(loc, rewriter, boxValue); 133 auto pty = mlir::LLVM::LLVMPointerType::get(resultTy); 134 auto p = rewriter.create<mlir::LLVM::GEPOp>( 135 loc, pty, box, mlir::ValueRange{c0, cValuePos}); 136 return rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p); 137 } 138 139 /// Method to construct code sequence to get the triple for dimension `dim` 140 /// from a box. 141 llvm::SmallVector<mlir::Value, 3> 142 getDimsFromBox(mlir::Location loc, llvm::ArrayRef<mlir::Type> retTys, 143 mlir::Value box, mlir::Value dim, 144 mlir::ConversionPatternRewriter &rewriter) const { 145 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 146 mlir::LLVM::ConstantOp cDims = 147 genConstantOffset(loc, rewriter, kDimsPosInBox); 148 mlir::LLVM::LoadOp l0 = 149 loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter); 150 mlir::LLVM::LoadOp l1 = 151 loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter); 152 mlir::LLVM::LoadOp l2 = 153 loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter); 154 return {l0.getResult(), l1.getResult(), l2.getResult()}; 155 } 156 157 mlir::LLVM::LoadOp 158 loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0, 159 mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off, 160 mlir::Type ty, 161 mlir::ConversionPatternRewriter &rewriter) const { 162 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 163 mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off); 164 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c); 165 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 166 } 167 168 mlir::Value 169 loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim, 170 mlir::ConversionPatternRewriter &rewriter) const { 171 auto idxTy = lowerTy().indexType(); 172 auto c0 = genConstantOffset(loc, rewriter, 0); 173 auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox); 174 auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim); 175 return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy, 176 rewriter); 177 } 178 179 /// Read base address from a fir.box. Returned address has type ty. 180 mlir::Value 181 loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 182 mlir::ConversionPatternRewriter &rewriter) const { 183 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 184 mlir::LLVM::ConstantOp cAddr = 185 genConstantOffset(loc, rewriter, kAddrPosInBox); 186 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 187 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr); 188 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 189 } 190 191 mlir::Value 192 loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 193 mlir::ConversionPatternRewriter &rewriter) const { 194 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 195 mlir::LLVM::ConstantOp cElemLen = 196 genConstantOffset(loc, rewriter, kElemLenPosInBox); 197 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 198 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen); 199 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 200 } 201 202 // Get the element type given an LLVM type that is of the form 203 // [llvm.ptr](array|struct|vector)+ and the provided indexes. 204 static mlir::Type getBoxEleTy(mlir::Type type, 205 llvm::ArrayRef<unsigned> indexes) { 206 if (auto t = type.dyn_cast<mlir::LLVM::LLVMPointerType>()) 207 type = t.getElementType(); 208 for (auto i : indexes) { 209 if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) { 210 assert(!t.isOpaque() && i < t.getBody().size()); 211 type = t.getBody()[i]; 212 } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 213 type = t.getElementType(); 214 } else if (auto t = type.dyn_cast<mlir::VectorType>()) { 215 type = t.getElementType(); 216 } else { 217 fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()), 218 "request for invalid box element type"); 219 } 220 } 221 return type; 222 } 223 224 // Return LLVM type of the base address given the LLVM type 225 // of the related descriptor (lowered fir.box type). 226 static mlir::Type getBaseAddrTypeFromBox(mlir::Type type) { 227 return getBoxEleTy(type, {kAddrPosInBox}); 228 } 229 230 // Load the attribute from the \p box and perform a check against \p maskValue 231 // The final comparison is implemented as `(attribute & maskValue) != 0`. 232 mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box, 233 mlir::ConversionPatternRewriter &rewriter, 234 unsigned maskValue) const { 235 mlir::Type attrTy = rewriter.getI32Type(); 236 mlir::Value attribute = 237 getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox); 238 mlir::LLVM::ConstantOp attrMask = 239 genConstantOffset(loc, rewriter, maskValue); 240 auto maskRes = 241 rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask); 242 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 243 return rewriter.create<mlir::LLVM::ICmpOp>( 244 loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0); 245 } 246 247 template <typename... ARGS> 248 mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty, 249 mlir::ConversionPatternRewriter &rewriter, 250 mlir::Value base, ARGS... args) const { 251 llvm::SmallVector<mlir::Value> cv = {args...}; 252 return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv); 253 } 254 255 fir::LLVMTypeConverter &lowerTy() const { 256 return *static_cast<fir::LLVMTypeConverter *>(this->getTypeConverter()); 257 } 258 259 const fir::FIRToLLVMPassOptions &options; 260 }; 261 262 /// FIR conversion pattern template 263 template <typename FromOp> 264 class FIROpAndTypeConversion : public FIROpConversion<FromOp> { 265 public: 266 using FIROpConversion<FromOp>::FIROpConversion; 267 using OpAdaptor = typename FromOp::Adaptor; 268 269 mlir::LogicalResult 270 matchAndRewrite(FromOp op, OpAdaptor adaptor, 271 mlir::ConversionPatternRewriter &rewriter) const final { 272 mlir::Type ty = this->convertType(op.getType()); 273 return doRewrite(op, ty, adaptor, rewriter); 274 } 275 276 virtual mlir::LogicalResult 277 doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor, 278 mlir::ConversionPatternRewriter &rewriter) const = 0; 279 }; 280 } // namespace 281 282 namespace { 283 /// Lower `fir.address_of` operation to `llvm.address_of` operation. 284 struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> { 285 using FIROpConversion::FIROpConversion; 286 287 mlir::LogicalResult 288 matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor, 289 mlir::ConversionPatternRewriter &rewriter) const override { 290 auto ty = convertType(addr.getType()); 291 rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>( 292 addr, ty, addr.getSymbol().getRootReference().getValue()); 293 return mlir::success(); 294 } 295 }; 296 } // namespace 297 298 /// Lookup the function to compute the memory size of this parametric derived 299 /// type. The size of the object may depend on the LEN type parameters of the 300 /// derived type. 301 static mlir::LLVM::LLVMFuncOp 302 getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op, 303 mlir::ConversionPatternRewriter &rewriter) { 304 auto module = op->getParentOfType<mlir::ModuleOp>(); 305 std::string name = recTy.getName().str() + "P.mem.size"; 306 if (auto memSizeFunc = module.lookupSymbol<mlir::LLVM::LLVMFuncOp>(name)) 307 return memSizeFunc; 308 TODO(op.getLoc(), "did not find allocation function"); 309 } 310 311 // Compute the alloc scale size (constant factors encoded in the array type). 312 // We do this for arrays without a constant interior or arrays of character with 313 // dynamic length arrays, since those are the only ones that get decayed to a 314 // pointer to the element type. 315 template <typename OP> 316 static mlir::Value 317 genAllocationScaleSize(OP op, mlir::Type ity, 318 mlir::ConversionPatternRewriter &rewriter) { 319 mlir::Location loc = op.getLoc(); 320 mlir::Type dataTy = op.getInType(); 321 mlir::Type scalarType = fir::unwrapSequenceType(dataTy); 322 auto seqTy = dataTy.dyn_cast<fir::SequenceType>(); 323 if ((op.hasShapeOperands() && seqTy && !seqTy.hasConstantInterior()) || 324 (seqTy && fir::characterWithDynamicLen(scalarType))) { 325 fir::SequenceType::Extent constSize = 1; 326 for (auto extent : seqTy.getShape()) 327 if (extent != fir::SequenceType::getUnknownExtent()) 328 constSize *= extent; 329 if (constSize != 1) { 330 mlir::Value constVal{ 331 genConstantIndex(loc, ity, rewriter, constSize).getResult()}; 332 return constVal; 333 } 334 } 335 return nullptr; 336 } 337 338 namespace { 339 /// convert to LLVM IR dialect `alloca` 340 struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> { 341 using FIROpConversion::FIROpConversion; 342 343 mlir::LogicalResult 344 matchAndRewrite(fir::AllocaOp alloc, OpAdaptor adaptor, 345 mlir::ConversionPatternRewriter &rewriter) const override { 346 mlir::ValueRange operands = adaptor.getOperands(); 347 auto loc = alloc.getLoc(); 348 mlir::Type ity = lowerTy().indexType(); 349 unsigned i = 0; 350 mlir::Value size = genConstantIndex(loc, ity, rewriter, 1).getResult(); 351 mlir::Type ty = convertType(alloc.getType()); 352 mlir::Type resultTy = ty; 353 if (alloc.hasLenParams()) { 354 unsigned end = alloc.numLenParams(); 355 llvm::SmallVector<mlir::Value> lenParams; 356 for (; i < end; ++i) 357 lenParams.push_back(operands[i]); 358 mlir::Type scalarType = fir::unwrapSequenceType(alloc.getInType()); 359 if (auto chrTy = scalarType.dyn_cast<fir::CharacterType>()) { 360 fir::CharacterType rawCharTy = fir::CharacterType::getUnknownLen( 361 chrTy.getContext(), chrTy.getFKind()); 362 ty = mlir::LLVM::LLVMPointerType::get(convertType(rawCharTy)); 363 assert(end == 1); 364 size = integerCast(loc, rewriter, ity, lenParams[0]); 365 } else if (auto recTy = scalarType.dyn_cast<fir::RecordType>()) { 366 mlir::LLVM::LLVMFuncOp memSizeFn = 367 getDependentTypeMemSizeFn(recTy, alloc, rewriter); 368 if (!memSizeFn) 369 emitError(loc, "did not find allocation function"); 370 mlir::NamedAttribute attr = rewriter.getNamedAttr( 371 "callee", mlir::SymbolRefAttr::get(memSizeFn)); 372 auto call = rewriter.create<mlir::LLVM::CallOp>( 373 loc, ity, lenParams, llvm::ArrayRef<mlir::NamedAttribute>{attr}); 374 size = call.getResult(0); 375 ty = ::getVoidPtrType(alloc.getContext()); 376 } else { 377 return emitError(loc, "unexpected type ") 378 << scalarType << " with type parameters"; 379 } 380 } 381 if (auto scaleSize = genAllocationScaleSize(alloc, ity, rewriter)) 382 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize); 383 if (alloc.hasShapeOperands()) { 384 unsigned end = operands.size(); 385 for (; i < end; ++i) 386 size = rewriter.create<mlir::LLVM::MulOp>( 387 loc, ity, size, integerCast(loc, rewriter, ity, operands[i])); 388 } 389 if (ty == resultTy) { 390 // Do not emit the bitcast if ty and resultTy are the same. 391 rewriter.replaceOpWithNewOp<mlir::LLVM::AllocaOp>(alloc, ty, size, 392 alloc->getAttrs()); 393 } else { 394 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, ty, size, 395 alloc->getAttrs()); 396 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(alloc, resultTy, al); 397 } 398 return mlir::success(); 399 } 400 }; 401 } // namespace 402 403 /// Construct an `llvm.extractvalue` instruction. It will return value at 404 /// element \p x from \p tuple. 405 static mlir::LLVM::ExtractValueOp 406 genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty, 407 mlir::ConversionPatternRewriter &rewriter, 408 mlir::MLIRContext *ctx, int x) { 409 auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x)); 410 auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x]; 411 return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx); 412 } 413 414 namespace { 415 /// Lower `fir.box_addr` to the sequence of operations to extract the first 416 /// element of the box. 417 struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> { 418 using FIROpConversion::FIROpConversion; 419 420 mlir::LogicalResult 421 matchAndRewrite(fir::BoxAddrOp boxaddr, OpAdaptor adaptor, 422 mlir::ConversionPatternRewriter &rewriter) const override { 423 mlir::Value a = adaptor.getOperands()[0]; 424 auto loc = boxaddr.getLoc(); 425 mlir::Type ty = convertType(boxaddr.getType()); 426 if (auto argty = boxaddr.getVal().getType().dyn_cast<fir::BoxType>()) { 427 rewriter.replaceOp(boxaddr, loadBaseAddrFromBox(loc, ty, a, rewriter)); 428 } else { 429 auto c0attr = rewriter.getI32IntegerAttr(0); 430 auto c0 = mlir::ArrayAttr::get(boxaddr.getContext(), c0attr); 431 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(boxaddr, ty, a, 432 c0); 433 } 434 return mlir::success(); 435 } 436 }; 437 438 /// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the 439 /// boxchar. 440 struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> { 441 using FIROpConversion::FIROpConversion; 442 443 mlir::LogicalResult 444 matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor, 445 mlir::ConversionPatternRewriter &rewriter) const override { 446 mlir::Value boxChar = adaptor.getOperands()[0]; 447 mlir::Location loc = boxChar.getLoc(); 448 mlir::MLIRContext *ctx = boxChar.getContext(); 449 mlir::Type returnValTy = boxCharLen.getResult().getType(); 450 451 constexpr int boxcharLenIdx = 1; 452 mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex( 453 loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx); 454 mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len); 455 rewriter.replaceOp(boxCharLen, lenAfterCast); 456 457 return mlir::success(); 458 } 459 }; 460 461 /// Lower `fir.box_dims` to a sequence of operations to extract the requested 462 /// dimension infomartion from the boxed value. 463 /// Result in a triple set of GEPs and loads. 464 struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> { 465 using FIROpConversion::FIROpConversion; 466 467 mlir::LogicalResult 468 matchAndRewrite(fir::BoxDimsOp boxdims, OpAdaptor adaptor, 469 mlir::ConversionPatternRewriter &rewriter) const override { 470 llvm::SmallVector<mlir::Type, 3> resultTypes = { 471 convertType(boxdims.getResult(0).getType()), 472 convertType(boxdims.getResult(1).getType()), 473 convertType(boxdims.getResult(2).getType()), 474 }; 475 auto results = 476 getDimsFromBox(boxdims.getLoc(), resultTypes, adaptor.getOperands()[0], 477 adaptor.getOperands()[1], rewriter); 478 rewriter.replaceOp(boxdims, results); 479 return mlir::success(); 480 } 481 }; 482 483 /// Lower `fir.box_elesize` to a sequence of operations ro extract the size of 484 /// an element in the boxed value. 485 struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> { 486 using FIROpConversion::FIROpConversion; 487 488 mlir::LogicalResult 489 matchAndRewrite(fir::BoxEleSizeOp boxelesz, OpAdaptor adaptor, 490 mlir::ConversionPatternRewriter &rewriter) const override { 491 mlir::Value a = adaptor.getOperands()[0]; 492 auto loc = boxelesz.getLoc(); 493 auto ty = convertType(boxelesz.getType()); 494 auto elemSize = getValueFromBox(loc, a, ty, rewriter, kElemLenPosInBox); 495 rewriter.replaceOp(boxelesz, elemSize); 496 return mlir::success(); 497 } 498 }; 499 500 /// Lower `fir.box_isalloc` to a sequence of operations to determine if the 501 /// boxed value was from an ALLOCATABLE entity. 502 struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> { 503 using FIROpConversion::FIROpConversion; 504 505 mlir::LogicalResult 506 matchAndRewrite(fir::BoxIsAllocOp boxisalloc, OpAdaptor adaptor, 507 mlir::ConversionPatternRewriter &rewriter) const override { 508 mlir::Value box = adaptor.getOperands()[0]; 509 auto loc = boxisalloc.getLoc(); 510 mlir::Value check = 511 genBoxAttributeCheck(loc, box, rewriter, kAttrAllocatable); 512 rewriter.replaceOp(boxisalloc, check); 513 return mlir::success(); 514 } 515 }; 516 517 /// Lower `fir.box_isarray` to a sequence of operations to determine if the 518 /// boxed is an array. 519 struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> { 520 using FIROpConversion::FIROpConversion; 521 522 mlir::LogicalResult 523 matchAndRewrite(fir::BoxIsArrayOp boxisarray, OpAdaptor adaptor, 524 mlir::ConversionPatternRewriter &rewriter) const override { 525 mlir::Value a = adaptor.getOperands()[0]; 526 auto loc = boxisarray.getLoc(); 527 auto rank = 528 getValueFromBox(loc, a, rewriter.getI32Type(), rewriter, kRankPosInBox); 529 auto c0 = genConstantOffset(loc, rewriter, 0); 530 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 531 boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0); 532 return mlir::success(); 533 } 534 }; 535 536 /// Lower `fir.box_isptr` to a sequence of operations to determined if the 537 /// boxed value was from a POINTER entity. 538 struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> { 539 using FIROpConversion::FIROpConversion; 540 541 mlir::LogicalResult 542 matchAndRewrite(fir::BoxIsPtrOp boxisptr, OpAdaptor adaptor, 543 mlir::ConversionPatternRewriter &rewriter) const override { 544 mlir::Value box = adaptor.getOperands()[0]; 545 auto loc = boxisptr.getLoc(); 546 mlir::Value check = genBoxAttributeCheck(loc, box, rewriter, kAttrPointer); 547 rewriter.replaceOp(boxisptr, check); 548 return mlir::success(); 549 } 550 }; 551 552 /// Lower `fir.box_rank` to the sequence of operation to extract the rank from 553 /// the box. 554 struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> { 555 using FIROpConversion::FIROpConversion; 556 557 mlir::LogicalResult 558 matchAndRewrite(fir::BoxRankOp boxrank, OpAdaptor adaptor, 559 mlir::ConversionPatternRewriter &rewriter) const override { 560 mlir::Value a = adaptor.getOperands()[0]; 561 auto loc = boxrank.getLoc(); 562 mlir::Type ty = convertType(boxrank.getType()); 563 auto result = getValueFromBox(loc, a, ty, rewriter, kRankPosInBox); 564 rewriter.replaceOp(boxrank, result); 565 return mlir::success(); 566 } 567 }; 568 569 /// Lower `fir.boxproc_host` operation. Extracts the host pointer from the 570 /// boxproc. 571 /// TODO: Part of supporting Fortran 2003 procedure pointers. 572 struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> { 573 using FIROpConversion::FIROpConversion; 574 575 mlir::LogicalResult 576 matchAndRewrite(fir::BoxProcHostOp boxprochost, OpAdaptor adaptor, 577 mlir::ConversionPatternRewriter &rewriter) const override { 578 TODO(boxprochost.getLoc(), "fir.boxproc_host codegen"); 579 return mlir::failure(); 580 } 581 }; 582 583 /// Lower `fir.box_tdesc` to the sequence of operations to extract the type 584 /// descriptor from the box. 585 struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> { 586 using FIROpConversion::FIROpConversion; 587 588 mlir::LogicalResult 589 matchAndRewrite(fir::BoxTypeDescOp boxtypedesc, OpAdaptor adaptor, 590 mlir::ConversionPatternRewriter &rewriter) const override { 591 mlir::Value box = adaptor.getOperands()[0]; 592 auto loc = boxtypedesc.getLoc(); 593 mlir::Type typeTy = 594 fir::getDescFieldTypeModel<kTypePosInBox>()(boxtypedesc.getContext()); 595 auto result = getValueFromBox(loc, box, typeTy, rewriter, kTypePosInBox); 596 auto typePtrTy = mlir::LLVM::LLVMPointerType::get(typeTy); 597 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(boxtypedesc, typePtrTy, 598 result); 599 return mlir::success(); 600 } 601 }; 602 603 /// Lower `fir.string_lit` to LLVM IR dialect operation. 604 struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> { 605 using FIROpConversion::FIROpConversion; 606 607 mlir::LogicalResult 608 matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor, 609 mlir::ConversionPatternRewriter &rewriter) const override { 610 auto ty = convertType(constop.getType()); 611 auto attr = constop.getValue(); 612 if (attr.isa<mlir::StringAttr>()) { 613 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr); 614 return mlir::success(); 615 } 616 617 auto charTy = constop.getType().cast<fir::CharacterType>(); 618 unsigned bits = lowerTy().characterBitsize(charTy); 619 mlir::Type intTy = rewriter.getIntegerType(bits); 620 mlir::Location loc = constop.getLoc(); 621 mlir::Value cst = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 622 if (auto arr = attr.dyn_cast<mlir::DenseElementsAttr>()) { 623 cst = rewriter.create<mlir::LLVM::ConstantOp>(loc, ty, arr); 624 } else if (auto arr = attr.dyn_cast<mlir::ArrayAttr>()) { 625 for (auto a : llvm::enumerate(arr.getValue())) { 626 // convert each character to a precise bitsize 627 auto elemAttr = mlir::IntegerAttr::get( 628 intTy, 629 a.value().cast<mlir::IntegerAttr>().getValue().zextOrTrunc(bits)); 630 auto elemCst = 631 rewriter.create<mlir::LLVM::ConstantOp>(loc, intTy, elemAttr); 632 auto index = mlir::ArrayAttr::get( 633 constop.getContext(), rewriter.getI32IntegerAttr(a.index())); 634 cst = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, cst, elemCst, 635 index); 636 } 637 } else { 638 return mlir::failure(); 639 } 640 rewriter.replaceOp(constop, cst); 641 return mlir::success(); 642 } 643 }; 644 645 /// `fir.call` -> `llvm.call` 646 struct CallOpConversion : public FIROpConversion<fir::CallOp> { 647 using FIROpConversion::FIROpConversion; 648 649 mlir::LogicalResult 650 matchAndRewrite(fir::CallOp call, OpAdaptor adaptor, 651 mlir::ConversionPatternRewriter &rewriter) const override { 652 llvm::SmallVector<mlir::Type> resultTys; 653 for (auto r : call.getResults()) 654 resultTys.push_back(convertType(r.getType())); 655 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 656 call, resultTys, adaptor.getOperands(), call->getAttrs()); 657 return mlir::success(); 658 } 659 }; 660 } // namespace 661 662 static mlir::Type getComplexEleTy(mlir::Type complex) { 663 if (auto cc = complex.dyn_cast<mlir::ComplexType>()) 664 return cc.getElementType(); 665 return complex.cast<fir::ComplexType>().getElementType(); 666 } 667 668 namespace { 669 /// Compare complex values 670 /// 671 /// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une). 672 /// 673 /// For completeness, all other comparison are done on the real component only. 674 struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> { 675 using FIROpConversion::FIROpConversion; 676 677 mlir::LogicalResult 678 matchAndRewrite(fir::CmpcOp cmp, OpAdaptor adaptor, 679 mlir::ConversionPatternRewriter &rewriter) const override { 680 mlir::ValueRange operands = adaptor.getOperands(); 681 mlir::MLIRContext *ctxt = cmp.getContext(); 682 mlir::Type eleTy = convertType(getComplexEleTy(cmp.getLhs().getType())); 683 mlir::Type resTy = convertType(cmp.getType()); 684 mlir::Location loc = cmp.getLoc(); 685 auto pos0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 686 llvm::SmallVector<mlir::Value, 2> rp = { 687 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 688 pos0), 689 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 690 pos0)}; 691 auto rcp = 692 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, rp, cmp->getAttrs()); 693 auto pos1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 694 llvm::SmallVector<mlir::Value, 2> ip = { 695 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 696 pos1), 697 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 698 pos1)}; 699 auto icp = 700 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, ip, cmp->getAttrs()); 701 llvm::SmallVector<mlir::Value, 2> cp = {rcp, icp}; 702 switch (cmp.getPredicate()) { 703 case mlir::arith::CmpFPredicate::OEQ: // .EQ. 704 rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(cmp, resTy, cp); 705 break; 706 case mlir::arith::CmpFPredicate::UNE: // .NE. 707 rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(cmp, resTy, cp); 708 break; 709 default: 710 rewriter.replaceOp(cmp, rcp.getResult()); 711 break; 712 } 713 return mlir::success(); 714 } 715 }; 716 717 /// Lower complex constants 718 struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> { 719 using FIROpConversion::FIROpConversion; 720 721 mlir::LogicalResult 722 matchAndRewrite(fir::ConstcOp conc, OpAdaptor, 723 mlir::ConversionPatternRewriter &rewriter) const override { 724 mlir::Location loc = conc.getLoc(); 725 mlir::MLIRContext *ctx = conc.getContext(); 726 mlir::Type ty = convertType(conc.getType()); 727 mlir::Type ety = convertType(getComplexEleTy(conc.getType())); 728 auto realFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getReal())); 729 auto realPart = 730 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, realFloatAttr); 731 auto imFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getImaginary())); 732 auto imPart = 733 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, imFloatAttr); 734 auto realIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 735 auto imIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 736 auto undef = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 737 auto setReal = rewriter.create<mlir::LLVM::InsertValueOp>( 738 loc, ty, undef, realPart, realIndex); 739 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(conc, ty, setReal, 740 imPart, imIndex); 741 return mlir::success(); 742 } 743 744 inline llvm::APFloat getValue(mlir::Attribute attr) const { 745 return attr.cast<fir::RealAttr>().getValue(); 746 } 747 }; 748 749 /// convert value of from-type to value of to-type 750 struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> { 751 using FIROpConversion::FIROpConversion; 752 753 static bool isFloatingPointTy(mlir::Type ty) { 754 return ty.isa<mlir::FloatType>(); 755 } 756 757 mlir::LogicalResult 758 matchAndRewrite(fir::ConvertOp convert, OpAdaptor adaptor, 759 mlir::ConversionPatternRewriter &rewriter) const override { 760 auto fromFirTy = convert.getValue().getType(); 761 auto toFirTy = convert.getRes().getType(); 762 auto fromTy = convertType(fromFirTy); 763 auto toTy = convertType(toFirTy); 764 mlir::Value op0 = adaptor.getOperands()[0]; 765 if (fromTy == toTy) { 766 rewriter.replaceOp(convert, op0); 767 return mlir::success(); 768 } 769 auto loc = convert.getLoc(); 770 auto convertFpToFp = [&](mlir::Value val, unsigned fromBits, 771 unsigned toBits, mlir::Type toTy) -> mlir::Value { 772 if (fromBits == toBits) { 773 // TODO: Converting between two floating-point representations with the 774 // same bitwidth is not allowed for now. 775 mlir::emitError(loc, 776 "cannot implicitly convert between two floating-point " 777 "representations of the same bitwidth"); 778 return {}; 779 } 780 if (fromBits > toBits) 781 return rewriter.create<mlir::LLVM::FPTruncOp>(loc, toTy, val); 782 return rewriter.create<mlir::LLVM::FPExtOp>(loc, toTy, val); 783 }; 784 // Complex to complex conversion. 785 if (fir::isa_complex(fromFirTy) && fir::isa_complex(toFirTy)) { 786 // Special case: handle the conversion of a complex such that both the 787 // real and imaginary parts are converted together. 788 auto zero = mlir::ArrayAttr::get(convert.getContext(), 789 rewriter.getI32IntegerAttr(0)); 790 auto one = mlir::ArrayAttr::get(convert.getContext(), 791 rewriter.getI32IntegerAttr(1)); 792 auto ty = convertType(getComplexEleTy(convert.getValue().getType())); 793 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, zero); 794 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, one); 795 auto nt = convertType(getComplexEleTy(convert.getRes().getType())); 796 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 797 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(nt); 798 auto rc = convertFpToFp(rp, fromBits, toBits, nt); 799 auto ic = convertFpToFp(ip, fromBits, toBits, nt); 800 auto un = rewriter.create<mlir::LLVM::UndefOp>(loc, toTy); 801 auto i1 = 802 rewriter.create<mlir::LLVM::InsertValueOp>(loc, toTy, un, rc, zero); 803 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(convert, toTy, i1, 804 ic, one); 805 return mlir::success(); 806 } 807 808 // Follow UNIX F77 convention for logicals: 809 // 1. underlying integer is not zero => logical is .TRUE. 810 // 2. logical is .TRUE. => set underlying integer to 1. 811 auto i1Type = mlir::IntegerType::get(convert.getContext(), 1); 812 if (fromFirTy.isa<fir::LogicalType>() && toFirTy == i1Type) { 813 mlir::Value zero = genConstantIndex(loc, fromTy, rewriter, 0); 814 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 815 convert, mlir::LLVM::ICmpPredicate::ne, op0, zero); 816 return mlir::success(); 817 } 818 if (fromFirTy == i1Type && toFirTy.isa<fir::LogicalType>()) { 819 rewriter.replaceOpWithNewOp<mlir::LLVM::ZExtOp>(convert, toTy, op0); 820 return mlir::success(); 821 } 822 823 // Floating point to floating point conversion. 824 if (isFloatingPointTy(fromTy)) { 825 if (isFloatingPointTy(toTy)) { 826 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 827 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 828 auto v = convertFpToFp(op0, fromBits, toBits, toTy); 829 rewriter.replaceOp(convert, v); 830 return mlir::success(); 831 } 832 if (toTy.isa<mlir::IntegerType>()) { 833 rewriter.replaceOpWithNewOp<mlir::LLVM::FPToSIOp>(convert, toTy, op0); 834 return mlir::success(); 835 } 836 } else if (fromTy.isa<mlir::IntegerType>()) { 837 // Integer to integer conversion. 838 if (toTy.isa<mlir::IntegerType>()) { 839 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 840 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 841 assert(fromBits != toBits); 842 if (fromBits > toBits) { 843 rewriter.replaceOpWithNewOp<mlir::LLVM::TruncOp>(convert, toTy, op0); 844 return mlir::success(); 845 } 846 rewriter.replaceOpWithNewOp<mlir::LLVM::SExtOp>(convert, toTy, op0); 847 return mlir::success(); 848 } 849 // Integer to floating point conversion. 850 if (isFloatingPointTy(toTy)) { 851 rewriter.replaceOpWithNewOp<mlir::LLVM::SIToFPOp>(convert, toTy, op0); 852 return mlir::success(); 853 } 854 // Integer to pointer conversion. 855 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 856 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(convert, toTy, op0); 857 return mlir::success(); 858 } 859 } else if (fromTy.isa<mlir::LLVM::LLVMPointerType>()) { 860 // Pointer to integer conversion. 861 if (toTy.isa<mlir::IntegerType>()) { 862 rewriter.replaceOpWithNewOp<mlir::LLVM::PtrToIntOp>(convert, toTy, op0); 863 return mlir::success(); 864 } 865 // Pointer to pointer conversion. 866 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 867 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(convert, toTy, op0); 868 return mlir::success(); 869 } 870 } 871 return emitError(loc) << "cannot convert " << fromTy << " to " << toTy; 872 } 873 }; 874 875 /// Lower `fir.dispatch` operation. A virtual call to a method in a dispatch 876 /// table. 877 struct DispatchOpConversion : public FIROpConversion<fir::DispatchOp> { 878 using FIROpConversion::FIROpConversion; 879 880 mlir::LogicalResult 881 matchAndRewrite(fir::DispatchOp dispatch, OpAdaptor adaptor, 882 mlir::ConversionPatternRewriter &rewriter) const override { 883 TODO(dispatch.getLoc(), "fir.dispatch codegen"); 884 return mlir::failure(); 885 } 886 }; 887 888 /// Lower `fir.dispatch_table` operation. The dispatch table for a Fortran 889 /// derived type. 890 struct DispatchTableOpConversion 891 : public FIROpConversion<fir::DispatchTableOp> { 892 using FIROpConversion::FIROpConversion; 893 894 mlir::LogicalResult 895 matchAndRewrite(fir::DispatchTableOp dispTab, OpAdaptor adaptor, 896 mlir::ConversionPatternRewriter &rewriter) const override { 897 TODO(dispTab.getLoc(), "fir.dispatch_table codegen"); 898 return mlir::failure(); 899 } 900 }; 901 902 /// Lower `fir.dt_entry` operation. An entry in a dispatch table; binds a 903 /// method-name to a function. 904 struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> { 905 using FIROpConversion::FIROpConversion; 906 907 mlir::LogicalResult 908 matchAndRewrite(fir::DTEntryOp dtEnt, OpAdaptor adaptor, 909 mlir::ConversionPatternRewriter &rewriter) const override { 910 TODO(dtEnt.getLoc(), "fir.dt_entry codegen"); 911 return mlir::failure(); 912 } 913 }; 914 915 /// Lower `fir.global_len` operation. 916 struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> { 917 using FIROpConversion::FIROpConversion; 918 919 mlir::LogicalResult 920 matchAndRewrite(fir::GlobalLenOp globalLen, OpAdaptor adaptor, 921 mlir::ConversionPatternRewriter &rewriter) const override { 922 TODO(globalLen.getLoc(), "fir.global_len codegen"); 923 return mlir::failure(); 924 } 925 }; 926 927 /// Lower fir.len_param_index 928 struct LenParamIndexOpConversion 929 : public FIROpConversion<fir::LenParamIndexOp> { 930 using FIROpConversion::FIROpConversion; 931 932 // FIXME: this should be specialized by the runtime target 933 mlir::LogicalResult 934 matchAndRewrite(fir::LenParamIndexOp lenp, OpAdaptor, 935 mlir::ConversionPatternRewriter &rewriter) const override { 936 TODO(lenp.getLoc(), "fir.len_param_index codegen"); 937 } 938 }; 939 940 /// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of 941 /// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element 942 /// in this struct is a pointer. Its type is determined from `KIND`. The 2nd 943 /// element is the length of the character buffer (`#n`). 944 struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> { 945 using FIROpConversion::FIROpConversion; 946 947 mlir::LogicalResult 948 matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor, 949 mlir::ConversionPatternRewriter &rewriter) const override { 950 mlir::ValueRange operands = adaptor.getOperands(); 951 auto *ctx = emboxChar.getContext(); 952 953 mlir::Value charBuffer = operands[0]; 954 mlir::Value charBufferLen = operands[1]; 955 956 mlir::Location loc = emboxChar.getLoc(); 957 mlir::Type llvmStructTy = convertType(emboxChar.getType()); 958 auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy); 959 960 mlir::Type lenTy = 961 llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1]; 962 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen); 963 964 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 965 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 966 auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>( 967 loc, llvmStructTy, llvmStruct, charBuffer, c0); 968 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 969 emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1); 970 971 return mlir::success(); 972 } 973 }; 974 } // namespace 975 976 /// Return the LLVMFuncOp corresponding to the standard malloc call. 977 static mlir::LLVM::LLVMFuncOp 978 getMalloc(fir::AllocMemOp op, mlir::ConversionPatternRewriter &rewriter) { 979 auto module = op->getParentOfType<mlir::ModuleOp>(); 980 if (mlir::LLVM::LLVMFuncOp mallocFunc = 981 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("malloc")) 982 return mallocFunc; 983 mlir::OpBuilder moduleBuilder( 984 op->getParentOfType<mlir::ModuleOp>().getBodyRegion()); 985 auto indexType = mlir::IntegerType::get(op.getContext(), 64); 986 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 987 rewriter.getUnknownLoc(), "malloc", 988 mlir::LLVM::LLVMFunctionType::get(getVoidPtrType(op.getContext()), 989 indexType, 990 /*isVarArg=*/false)); 991 } 992 993 /// Helper function for generating the LLVM IR that computes the size 994 /// in bytes for a derived type. 995 static mlir::Value 996 computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy, 997 mlir::ConversionPatternRewriter &rewriter) { 998 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 999 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 1000 llvm::SmallVector<mlir::Value> args = {one}; 1001 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, args); 1002 return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep); 1003 } 1004 1005 namespace { 1006 /// Lower a `fir.allocmem` instruction into `llvm.call @malloc` 1007 struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> { 1008 using FIROpConversion::FIROpConversion; 1009 1010 mlir::LogicalResult 1011 matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor, 1012 mlir::ConversionPatternRewriter &rewriter) const override { 1013 mlir::Type heapTy = heap.getType(); 1014 mlir::Type ty = convertType(heapTy); 1015 mlir::LLVM::LLVMFuncOp mallocFunc = getMalloc(heap, rewriter); 1016 mlir::Location loc = heap.getLoc(); 1017 auto ity = lowerTy().indexType(); 1018 mlir::Type dataTy = fir::unwrapRefType(heapTy); 1019 if (fir::isRecordWithTypeParameters(fir::unwrapSequenceType(dataTy))) 1020 TODO(loc, "fir.allocmem codegen of derived type with length parameters"); 1021 mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, ty); 1022 if (auto scaleSize = genAllocationScaleSize(heap, ity, rewriter)) 1023 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize); 1024 for (mlir::Value opnd : adaptor.getOperands()) 1025 size = rewriter.create<mlir::LLVM::MulOp>( 1026 loc, ity, size, integerCast(loc, rewriter, ity, opnd)); 1027 heap->setAttr("callee", mlir::SymbolRefAttr::get(mallocFunc)); 1028 auto malloc = rewriter.create<mlir::LLVM::CallOp>( 1029 loc, ::getVoidPtrType(heap.getContext()), size, heap->getAttrs()); 1030 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(heap, ty, 1031 malloc.getResult(0)); 1032 return mlir::success(); 1033 } 1034 1035 // Compute the (allocation) size of the allocmem type in bytes. 1036 mlir::Value genTypeSizeInBytes(mlir::Location loc, mlir::Type idxTy, 1037 mlir::ConversionPatternRewriter &rewriter, 1038 mlir::Type llTy) const { 1039 // Use the primitive size, if available. 1040 auto ptrTy = llTy.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1041 if (auto size = 1042 mlir::LLVM::getPrimitiveTypeSizeInBits(ptrTy.getElementType())) 1043 return genConstantIndex(loc, idxTy, rewriter, size / 8); 1044 1045 // Otherwise, generate the GEP trick in LLVM IR to compute the size. 1046 return computeDerivedTypeSize(loc, ptrTy, idxTy, rewriter); 1047 } 1048 }; 1049 } // namespace 1050 1051 /// Return the LLVMFuncOp corresponding to the standard free call. 1052 static mlir::LLVM::LLVMFuncOp 1053 getFree(fir::FreeMemOp op, mlir::ConversionPatternRewriter &rewriter) { 1054 auto module = op->getParentOfType<mlir::ModuleOp>(); 1055 if (mlir::LLVM::LLVMFuncOp freeFunc = 1056 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("free")) 1057 return freeFunc; 1058 mlir::OpBuilder moduleBuilder(module.getBodyRegion()); 1059 auto voidType = mlir::LLVM::LLVMVoidType::get(op.getContext()); 1060 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 1061 rewriter.getUnknownLoc(), "free", 1062 mlir::LLVM::LLVMFunctionType::get(voidType, 1063 getVoidPtrType(op.getContext()), 1064 /*isVarArg=*/false)); 1065 } 1066 1067 static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) { 1068 unsigned result = 1; 1069 for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>(); 1070 eleTy; 1071 eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>()) 1072 ++result; 1073 return result; 1074 } 1075 1076 namespace { 1077 /// Lower a `fir.freemem` instruction into `llvm.call @free` 1078 struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> { 1079 using FIROpConversion::FIROpConversion; 1080 1081 mlir::LogicalResult 1082 matchAndRewrite(fir::FreeMemOp freemem, OpAdaptor adaptor, 1083 mlir::ConversionPatternRewriter &rewriter) const override { 1084 mlir::LLVM::LLVMFuncOp freeFunc = getFree(freemem, rewriter); 1085 mlir::Location loc = freemem.getLoc(); 1086 auto bitcast = rewriter.create<mlir::LLVM::BitcastOp>( 1087 freemem.getLoc(), voidPtrTy(), adaptor.getOperands()[0]); 1088 freemem->setAttr("callee", mlir::SymbolRefAttr::get(freeFunc)); 1089 rewriter.create<mlir::LLVM::CallOp>( 1090 loc, mlir::TypeRange{}, mlir::ValueRange{bitcast}, freemem->getAttrs()); 1091 rewriter.eraseOp(freemem); 1092 return mlir::success(); 1093 } 1094 }; 1095 } // namespace 1096 1097 /// Common base class for embox to descriptor conversion. 1098 template <typename OP> 1099 struct EmboxCommonConversion : public FIROpConversion<OP> { 1100 using FIROpConversion<OP>::FIROpConversion; 1101 1102 // Find the LLVMFuncOp in whose entry block the alloca should be inserted. 1103 // The order to find the LLVMFuncOp is as follows: 1104 // 1. The parent operation of the current block if it is a LLVMFuncOp. 1105 // 2. The first ancestor that is a LLVMFuncOp. 1106 mlir::LLVM::LLVMFuncOp 1107 getFuncForAllocaInsert(mlir::ConversionPatternRewriter &rewriter) const { 1108 mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp(); 1109 return mlir::isa<mlir::LLVM::LLVMFuncOp>(parentOp) 1110 ? mlir::cast<mlir::LLVM::LLVMFuncOp>(parentOp) 1111 : parentOp->getParentOfType<mlir::LLVM::LLVMFuncOp>(); 1112 } 1113 1114 // Generate an alloca of size 1 and type \p toTy. 1115 mlir::LLVM::AllocaOp 1116 genAllocaWithType(mlir::Location loc, mlir::Type toTy, unsigned alignment, 1117 mlir::ConversionPatternRewriter &rewriter) const { 1118 auto thisPt = rewriter.saveInsertionPoint(); 1119 mlir::LLVM::LLVMFuncOp func = getFuncForAllocaInsert(rewriter); 1120 rewriter.setInsertionPointToStart(&func.front()); 1121 auto size = this->genI32Constant(loc, rewriter, 1); 1122 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, toTy, size, alignment); 1123 rewriter.restoreInsertionPoint(thisPt); 1124 return al; 1125 } 1126 1127 static int getCFIAttr(fir::BoxType boxTy) { 1128 auto eleTy = boxTy.getEleTy(); 1129 if (eleTy.isa<fir::PointerType>()) 1130 return CFI_attribute_pointer; 1131 if (eleTy.isa<fir::HeapType>()) 1132 return CFI_attribute_allocatable; 1133 return CFI_attribute_other; 1134 } 1135 1136 static fir::RecordType unwrapIfDerived(fir::BoxType boxTy) { 1137 return fir::unwrapSequenceType(fir::dyn_cast_ptrOrBoxEleTy(boxTy)) 1138 .template dyn_cast<fir::RecordType>(); 1139 } 1140 static bool isDerivedTypeWithLenParams(fir::BoxType boxTy) { 1141 auto recTy = unwrapIfDerived(boxTy); 1142 return recTy && recTy.getNumLenParams() > 0; 1143 } 1144 static bool isDerivedType(fir::BoxType boxTy) { 1145 return static_cast<bool>(unwrapIfDerived(boxTy)); 1146 } 1147 1148 // Get the element size and CFI type code of the boxed value. 1149 std::tuple<mlir::Value, mlir::Value> getSizeAndTypeCode( 1150 mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 1151 mlir::Type boxEleTy, mlir::ValueRange lenParams = {}) const { 1152 auto doInteger = 1153 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1154 int typeCode = fir::integerBitsToTypeCode(width); 1155 return {this->genConstantOffset(loc, rewriter, width / 8), 1156 this->genConstantOffset(loc, rewriter, typeCode)}; 1157 }; 1158 auto doLogical = 1159 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1160 int typeCode = fir::logicalBitsToTypeCode(width); 1161 return {this->genConstantOffset(loc, rewriter, width / 8), 1162 this->genConstantOffset(loc, rewriter, typeCode)}; 1163 }; 1164 auto doFloat = [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1165 int typeCode = fir::realBitsToTypeCode(width); 1166 return {this->genConstantOffset(loc, rewriter, width / 8), 1167 this->genConstantOffset(loc, rewriter, typeCode)}; 1168 }; 1169 auto doComplex = 1170 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1171 auto typeCode = fir::complexBitsToTypeCode(width); 1172 return {this->genConstantOffset(loc, rewriter, width / 8 * 2), 1173 this->genConstantOffset(loc, rewriter, typeCode)}; 1174 }; 1175 auto doCharacter = 1176 [&](unsigned width, 1177 mlir::Value len) -> std::tuple<mlir::Value, mlir::Value> { 1178 auto typeCode = fir::characterBitsToTypeCode(width); 1179 auto typeCodeVal = this->genConstantOffset(loc, rewriter, typeCode); 1180 if (width == 8) 1181 return {len, typeCodeVal}; 1182 auto i64Ty = mlir::IntegerType::get(&this->lowerTy().getContext(), 64); 1183 auto byteWidth = genConstantIndex(loc, i64Ty, rewriter, width / 8); 1184 auto len64 = FIROpConversion<OP>::integerCast(loc, rewriter, i64Ty, len); 1185 auto size = 1186 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len64); 1187 return {size, typeCodeVal}; 1188 }; 1189 auto getKindMap = [&]() -> fir::KindMapping & { 1190 return this->lowerTy().getKindMap(); 1191 }; 1192 // Pointer-like types. 1193 if (auto eleTy = fir::dyn_cast_ptrEleTy(boxEleTy)) 1194 boxEleTy = eleTy; 1195 // Integer types. 1196 if (fir::isa_integer(boxEleTy)) { 1197 if (auto ty = boxEleTy.dyn_cast<mlir::IntegerType>()) 1198 return doInteger(ty.getWidth()); 1199 auto ty = boxEleTy.cast<fir::IntegerType>(); 1200 return doInteger(getKindMap().getIntegerBitsize(ty.getFKind())); 1201 } 1202 // Floating point types. 1203 if (fir::isa_real(boxEleTy)) { 1204 if (auto ty = boxEleTy.dyn_cast<mlir::FloatType>()) 1205 return doFloat(ty.getWidth()); 1206 auto ty = boxEleTy.cast<fir::RealType>(); 1207 return doFloat(getKindMap().getRealBitsize(ty.getFKind())); 1208 } 1209 // Complex types. 1210 if (fir::isa_complex(boxEleTy)) { 1211 if (auto ty = boxEleTy.dyn_cast<mlir::ComplexType>()) 1212 return doComplex( 1213 ty.getElementType().cast<mlir::FloatType>().getWidth()); 1214 auto ty = boxEleTy.cast<fir::ComplexType>(); 1215 return doComplex(getKindMap().getRealBitsize(ty.getFKind())); 1216 } 1217 // Character types. 1218 if (auto ty = boxEleTy.dyn_cast<fir::CharacterType>()) { 1219 auto charWidth = getKindMap().getCharacterBitsize(ty.getFKind()); 1220 if (ty.getLen() != fir::CharacterType::unknownLen()) { 1221 auto len = this->genConstantOffset(loc, rewriter, ty.getLen()); 1222 return doCharacter(charWidth, len); 1223 } 1224 assert(!lenParams.empty()); 1225 return doCharacter(charWidth, lenParams.back()); 1226 } 1227 // Logical type. 1228 if (auto ty = boxEleTy.dyn_cast<fir::LogicalType>()) 1229 return doLogical(getKindMap().getLogicalBitsize(ty.getFKind())); 1230 // Array types. 1231 if (auto seqTy = boxEleTy.dyn_cast<fir::SequenceType>()) 1232 return getSizeAndTypeCode(loc, rewriter, seqTy.getEleTy(), lenParams); 1233 // Derived-type types. 1234 if (boxEleTy.isa<fir::RecordType>()) { 1235 auto ptrTy = mlir::LLVM::LLVMPointerType::get( 1236 this->lowerTy().convertType(boxEleTy)); 1237 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 1238 auto one = 1239 genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1); 1240 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, 1241 mlir::ValueRange{one}); 1242 auto eleSize = rewriter.create<mlir::LLVM::PtrToIntOp>( 1243 loc, this->lowerTy().indexType(), gep); 1244 return {eleSize, 1245 this->genConstantOffset(loc, rewriter, fir::derivedToTypeCode())}; 1246 } 1247 // Reference type. 1248 if (fir::isa_ref_type(boxEleTy)) { 1249 // FIXME: use the target pointer size rather than sizeof(void*) 1250 return {this->genConstantOffset(loc, rewriter, sizeof(void *)), 1251 this->genConstantOffset(loc, rewriter, CFI_type_cptr)}; 1252 } 1253 fir::emitFatalError(loc, "unhandled type in fir.box code generation"); 1254 } 1255 1256 /// Basic pattern to write a field in the descriptor 1257 mlir::Value insertField(mlir::ConversionPatternRewriter &rewriter, 1258 mlir::Location loc, mlir::Value dest, 1259 llvm::ArrayRef<unsigned> fldIndexes, 1260 mlir::Value value, bool bitcast = false) const { 1261 auto boxTy = dest.getType(); 1262 auto fldTy = this->getBoxEleTy(boxTy, fldIndexes); 1263 if (bitcast) 1264 value = rewriter.create<mlir::LLVM::BitcastOp>(loc, fldTy, value); 1265 else 1266 value = this->integerCast(loc, rewriter, fldTy, value); 1267 llvm::SmallVector<mlir::Attribute, 2> attrs; 1268 for (auto i : fldIndexes) 1269 attrs.push_back(rewriter.getI32IntegerAttr(i)); 1270 auto indexesAttr = mlir::ArrayAttr::get(rewriter.getContext(), attrs); 1271 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, boxTy, dest, value, 1272 indexesAttr); 1273 } 1274 1275 inline mlir::Value 1276 insertBaseAddress(mlir::ConversionPatternRewriter &rewriter, 1277 mlir::Location loc, mlir::Value dest, 1278 mlir::Value base) const { 1279 return insertField(rewriter, loc, dest, {kAddrPosInBox}, base, 1280 /*bitCast=*/true); 1281 } 1282 1283 inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter, 1284 mlir::Location loc, mlir::Value dest, 1285 unsigned dim, mlir::Value lb) const { 1286 return insertField(rewriter, loc, dest, 1287 {kDimsPosInBox, dim, kDimLowerBoundPos}, lb); 1288 } 1289 1290 inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter, 1291 mlir::Location loc, mlir::Value dest, 1292 unsigned dim, mlir::Value extent) const { 1293 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos}, 1294 extent); 1295 } 1296 1297 inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter, 1298 mlir::Location loc, mlir::Value dest, 1299 unsigned dim, mlir::Value stride) const { 1300 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos}, 1301 stride); 1302 } 1303 1304 /// Get the address of the type descriptor global variable that was created by 1305 /// lowering for derived type \p recType. 1306 template <typename BOX> 1307 mlir::Value 1308 getTypeDescriptor(BOX box, mlir::ConversionPatternRewriter &rewriter, 1309 mlir::Location loc, fir::RecordType recType) const { 1310 std::string name = 1311 fir::NameUniquer::getTypeDescriptorName(recType.getName()); 1312 auto module = box->template getParentOfType<mlir::ModuleOp>(); 1313 if (auto global = module.template lookupSymbol<fir::GlobalOp>(name)) { 1314 auto ty = mlir::LLVM::LLVMPointerType::get( 1315 this->lowerTy().convertType(global.getType())); 1316 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1317 global.getSymName()); 1318 } 1319 if (auto global = 1320 module.template lookupSymbol<mlir::LLVM::GlobalOp>(name)) { 1321 // The global may have already been translated to LLVM. 1322 auto ty = mlir::LLVM::LLVMPointerType::get(global.getType()); 1323 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1324 global.getSymName()); 1325 } 1326 // Type info derived types do not have type descriptors since they are the 1327 // types defining type descriptors. 1328 if (!this->options.ignoreMissingTypeDescriptors && 1329 !fir::NameUniquer::belongsToModule( 1330 name, Fortran::semantics::typeInfoBuiltinModule)) 1331 fir::emitFatalError( 1332 loc, "runtime derived type info descriptor was not generated"); 1333 return rewriter.create<mlir::LLVM::NullOp>( 1334 loc, ::getVoidPtrType(box.getContext())); 1335 } 1336 1337 template <typename BOX> 1338 std::tuple<fir::BoxType, mlir::Value, mlir::Value> 1339 consDescriptorPrefix(BOX box, mlir::ConversionPatternRewriter &rewriter, 1340 unsigned rank, mlir::ValueRange lenParams) const { 1341 auto loc = box.getLoc(); 1342 auto boxTy = box.getType().template dyn_cast<fir::BoxType>(); 1343 auto convTy = this->lowerTy().convertBoxType(boxTy, rank); 1344 auto llvmBoxPtrTy = convTy.template cast<mlir::LLVM::LLVMPointerType>(); 1345 auto llvmBoxTy = llvmBoxPtrTy.getElementType(); 1346 mlir::Value descriptor = 1347 rewriter.create<mlir::LLVM::UndefOp>(loc, llvmBoxTy); 1348 1349 llvm::SmallVector<mlir::Value> typeparams = lenParams; 1350 if constexpr (!std::is_same_v<BOX, fir::EmboxOp>) { 1351 if (!box.substr().empty() && fir::hasDynamicSize(boxTy.getEleTy())) 1352 typeparams.push_back(box.substr()[1]); 1353 } 1354 1355 // Write each of the fields with the appropriate values 1356 auto [eleSize, cfiTy] = 1357 getSizeAndTypeCode(loc, rewriter, boxTy.getEleTy(), typeparams); 1358 descriptor = 1359 insertField(rewriter, loc, descriptor, {kElemLenPosInBox}, eleSize); 1360 descriptor = insertField(rewriter, loc, descriptor, {kVersionPosInBox}, 1361 this->genI32Constant(loc, rewriter, CFI_VERSION)); 1362 descriptor = insertField(rewriter, loc, descriptor, {kRankPosInBox}, 1363 this->genI32Constant(loc, rewriter, rank)); 1364 descriptor = insertField(rewriter, loc, descriptor, {kTypePosInBox}, cfiTy); 1365 descriptor = 1366 insertField(rewriter, loc, descriptor, {kAttributePosInBox}, 1367 this->genI32Constant(loc, rewriter, getCFIAttr(boxTy))); 1368 const bool hasAddendum = isDerivedType(boxTy); 1369 descriptor = 1370 insertField(rewriter, loc, descriptor, {kF18AddendumPosInBox}, 1371 this->genI32Constant(loc, rewriter, hasAddendum ? 1 : 0)); 1372 1373 if (hasAddendum) { 1374 auto isArray = 1375 fir::dyn_cast_ptrOrBoxEleTy(boxTy).template isa<fir::SequenceType>(); 1376 unsigned typeDescFieldId = isArray ? kOptTypePtrPosInBox : kDimsPosInBox; 1377 auto typeDesc = 1378 getTypeDescriptor(box, rewriter, loc, unwrapIfDerived(boxTy)); 1379 descriptor = 1380 insertField(rewriter, loc, descriptor, {typeDescFieldId}, typeDesc, 1381 /*bitCast=*/true); 1382 } 1383 1384 return {boxTy, descriptor, eleSize}; 1385 } 1386 1387 /// Compute the base address of a substring given the base address of a scalar 1388 /// string and the zero based string lower bound. 1389 mlir::Value shiftSubstringBase(mlir::ConversionPatternRewriter &rewriter, 1390 mlir::Location loc, mlir::Value base, 1391 mlir::Value lowerBound) const { 1392 llvm::SmallVector<mlir::Value> gepOperands; 1393 auto baseType = 1394 base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1395 if (auto arrayType = baseType.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 1396 // FIXME: The baseType should be the array element type here, meaning 1397 // there should at most be one dimension (constant length characters are 1398 // lowered to LLVM as an array of length one characters.). However, using 1399 // the character type in the GEP does not lead to correct GEPs when llvm 1400 // opaque pointers are enabled. 1401 auto idxTy = this->lowerTy().indexType(); 1402 gepOperands.append(getDimension(arrayType), 1403 genConstantIndex(loc, idxTy, rewriter, 0)); 1404 gepOperands.push_back(lowerBound); 1405 } else { 1406 gepOperands.push_back(lowerBound); 1407 } 1408 return this->genGEP(loc, base.getType(), rewriter, base, gepOperands); 1409 } 1410 1411 /// If the embox is not in a globalOp body, allocate storage for the box; 1412 /// store the value inside and return the generated alloca. Return the input 1413 /// value otherwise. 1414 mlir::Value 1415 placeInMemoryIfNotGlobalInit(mlir::ConversionPatternRewriter &rewriter, 1416 mlir::Location loc, mlir::Value boxValue) const { 1417 auto *thisBlock = rewriter.getInsertionBlock(); 1418 if (thisBlock && mlir::isa<mlir::LLVM::GlobalOp>(thisBlock->getParentOp())) 1419 return boxValue; 1420 auto boxPtrTy = mlir::LLVM::LLVMPointerType::get(boxValue.getType()); 1421 auto alloca = genAllocaWithType(loc, boxPtrTy, defaultAlign, rewriter); 1422 rewriter.create<mlir::LLVM::StoreOp>(loc, boxValue, alloca); 1423 return alloca; 1424 } 1425 }; 1426 1427 /// Compute the extent of a triplet slice (lb:ub:step). 1428 static mlir::Value 1429 computeTripletExtent(mlir::ConversionPatternRewriter &rewriter, 1430 mlir::Location loc, mlir::Value lb, mlir::Value ub, 1431 mlir::Value step, mlir::Value zero, mlir::Type type) { 1432 mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb); 1433 extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step); 1434 extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step); 1435 // If the resulting extent is negative (`ub-lb` and `step` have different 1436 // signs), zero must be returned instead. 1437 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1438 loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero); 1439 return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero); 1440 } 1441 1442 /// Create a generic box on a memory reference. This conversions lowers the 1443 /// abstract box to the appropriate, initialized descriptor. 1444 struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> { 1445 using EmboxCommonConversion::EmboxCommonConversion; 1446 1447 mlir::LogicalResult 1448 matchAndRewrite(fir::EmboxOp embox, OpAdaptor adaptor, 1449 mlir::ConversionPatternRewriter &rewriter) const override { 1450 mlir::ValueRange operands = adaptor.getOperands(); 1451 assert(!embox.getShape() && "There should be no dims on this embox op"); 1452 auto [boxTy, dest, eleSize] = consDescriptorPrefix( 1453 embox, rewriter, /*rank=*/0, /*lenParams=*/operands.drop_front(1)); 1454 dest = insertBaseAddress(rewriter, embox.getLoc(), dest, operands[0]); 1455 if (isDerivedTypeWithLenParams(boxTy)) { 1456 TODO(embox.getLoc(), 1457 "fir.embox codegen of derived with length parameters"); 1458 return mlir::failure(); 1459 } 1460 auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest); 1461 rewriter.replaceOp(embox, result); 1462 return mlir::success(); 1463 } 1464 }; 1465 1466 /// Create a generic box on a memory reference. 1467 struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> { 1468 using EmboxCommonConversion::EmboxCommonConversion; 1469 1470 mlir::LogicalResult 1471 matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor, 1472 mlir::ConversionPatternRewriter &rewriter) const override { 1473 mlir::ValueRange operands = adaptor.getOperands(); 1474 auto [boxTy, dest, eleSize] = 1475 consDescriptorPrefix(xbox, rewriter, xbox.getOutRank(), 1476 operands.drop_front(xbox.lenParamOffset())); 1477 // Generate the triples in the dims field of the descriptor 1478 auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64); 1479 mlir::Value base = operands[0]; 1480 assert(!xbox.shape().empty() && "must have a shape"); 1481 unsigned shapeOffset = xbox.shapeOffset(); 1482 bool hasShift = !xbox.shift().empty(); 1483 unsigned shiftOffset = xbox.shiftOffset(); 1484 bool hasSlice = !xbox.slice().empty(); 1485 unsigned sliceOffset = xbox.sliceOffset(); 1486 mlir::Location loc = xbox.getLoc(); 1487 mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0); 1488 mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1); 1489 mlir::Value prevPtrOff = one; 1490 mlir::Type eleTy = boxTy.getEleTy(); 1491 const unsigned rank = xbox.getRank(); 1492 llvm::SmallVector<mlir::Value> gepArgs; 1493 unsigned constRows = 0; 1494 mlir::Value ptrOffset = zero; 1495 mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()); 1496 assert(memEleTy.isa<fir::SequenceType>()); 1497 auto seqTy = memEleTy.cast<fir::SequenceType>(); 1498 mlir::Type seqEleTy = seqTy.getEleTy(); 1499 // Adjust the element scaling factor if the element is a dependent type. 1500 if (fir::hasDynamicSize(seqEleTy)) { 1501 if (auto charTy = seqEleTy.dyn_cast<fir::CharacterType>()) { 1502 assert(xbox.lenParams().size() == 1); 1503 mlir::LLVM::ConstantOp charSize = genConstantIndex( 1504 loc, i64Ty, rewriter, lowerTy().characterBitsize(charTy) / 8); 1505 mlir::Value castedLen = 1506 integerCast(loc, rewriter, i64Ty, operands[xbox.lenParamOffset()]); 1507 auto byteOffset = 1508 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, charSize, castedLen); 1509 prevPtrOff = integerCast(loc, rewriter, i64Ty, byteOffset); 1510 } else if (seqEleTy.isa<fir::RecordType>()) { 1511 // prevPtrOff = ; 1512 TODO(loc, "generate call to calculate size of PDT"); 1513 } else { 1514 fir::emitFatalError(loc, "unexpected dynamic type"); 1515 } 1516 } else { 1517 constRows = seqTy.getConstantRows(); 1518 } 1519 1520 const auto hasSubcomp = !xbox.subcomponent().empty(); 1521 const bool hasSubstr = !xbox.substr().empty(); 1522 /// Compute initial element stride that will be use to compute the step in 1523 /// each dimension. 1524 mlir::Value prevDimByteStride = integerCast(loc, rewriter, i64Ty, eleSize); 1525 if (hasSubcomp) { 1526 // We have a subcomponent. The step value needs to be the number of 1527 // bytes per element (which is a derived type). 1528 auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy)); 1529 prevDimByteStride = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter); 1530 } else if (hasSubstr) { 1531 // We have a substring. The step value needs to be the number of bytes 1532 // per CHARACTER element. 1533 auto charTy = seqEleTy.cast<fir::CharacterType>(); 1534 if (fir::hasDynamicSize(charTy)) { 1535 prevDimByteStride = prevPtrOff; 1536 } else { 1537 prevDimByteStride = genConstantIndex( 1538 loc, i64Ty, rewriter, 1539 charTy.getLen() * lowerTy().characterBitsize(charTy) / 8); 1540 } 1541 } 1542 1543 // Process the array subspace arguments (shape, shift, etc.), if any, 1544 // translating everything to values in the descriptor wherever the entity 1545 // has a dynamic array dimension. 1546 for (unsigned di = 0, descIdx = 0; di < rank; ++di) { 1547 mlir::Value extent = operands[shapeOffset]; 1548 mlir::Value outerExtent = extent; 1549 bool skipNext = false; 1550 if (hasSlice) { 1551 mlir::Value off = operands[sliceOffset]; 1552 mlir::Value adj = one; 1553 if (hasShift) 1554 adj = operands[shiftOffset]; 1555 auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj); 1556 if (constRows > 0) { 1557 gepArgs.push_back(ao); 1558 } else { 1559 auto dimOff = 1560 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff); 1561 ptrOffset = 1562 rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset); 1563 } 1564 if (mlir::isa_and_nonnull<fir::UndefOp>( 1565 xbox.slice()[3 * di + 1].getDefiningOp())) { 1566 // This dimension contains a scalar expression in the array slice op. 1567 // The dimension is loop invariant, will be dropped, and will not 1568 // appear in the descriptor. 1569 skipNext = true; 1570 } 1571 } 1572 if (!skipNext) { 1573 // store extent 1574 if (hasSlice) 1575 extent = computeTripletExtent(rewriter, loc, operands[sliceOffset], 1576 operands[sliceOffset + 1], 1577 operands[sliceOffset + 2], zero, i64Ty); 1578 // Lower bound is normalized to 0 for BIND(C) interoperability. 1579 mlir::Value lb = zero; 1580 const bool isaPointerOrAllocatable = 1581 eleTy.isa<fir::PointerType>() || eleTy.isa<fir::HeapType>(); 1582 // Lower bound is defaults to 1 for POINTER, ALLOCATABLE, and 1583 // denormalized descriptors. 1584 if (isaPointerOrAllocatable || !normalizedLowerBound(xbox)) 1585 lb = one; 1586 // If there is a shifted origin, and no fir.slice, and this is not 1587 // a normalized descriptor then use the value from the shift op as 1588 // the lower bound. 1589 if (hasShift && !(hasSlice || hasSubcomp || hasSubstr) && 1590 (isaPointerOrAllocatable || !normalizedLowerBound(xbox))) { 1591 lb = operands[shiftOffset]; 1592 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1593 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1594 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, 1595 lb); 1596 } 1597 dest = insertLowerBound(rewriter, loc, dest, descIdx, lb); 1598 1599 dest = insertExtent(rewriter, loc, dest, descIdx, extent); 1600 1601 // store step (scaled by shaped extent) 1602 mlir::Value step = prevDimByteStride; 1603 if (hasSlice) 1604 step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step, 1605 operands[sliceOffset + 2]); 1606 dest = insertStride(rewriter, loc, dest, descIdx, step); 1607 ++descIdx; 1608 } 1609 1610 // compute the stride and offset for the next natural dimension 1611 prevDimByteStride = rewriter.create<mlir::LLVM::MulOp>( 1612 loc, i64Ty, prevDimByteStride, outerExtent); 1613 if (constRows == 0) 1614 prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff, 1615 outerExtent); 1616 else 1617 --constRows; 1618 1619 // increment iterators 1620 ++shapeOffset; 1621 if (hasShift) 1622 ++shiftOffset; 1623 if (hasSlice) 1624 sliceOffset += 3; 1625 } 1626 if (hasSlice || hasSubcomp || hasSubstr) { 1627 llvm::SmallVector<mlir::Value> args = {ptrOffset}; 1628 args.append(gepArgs.rbegin(), gepArgs.rend()); 1629 if (hasSubcomp) { 1630 // For each field in the path add the offset to base via the args list. 1631 // In the most general case, some offsets must be computed since 1632 // they are not be known until runtime. 1633 if (fir::hasDynamicSize(fir::unwrapSequenceType( 1634 fir::unwrapPassByRefType(xbox.memref().getType())))) 1635 TODO(loc, "fir.embox codegen dynamic size component in derived type"); 1636 args.append(operands.begin() + xbox.subcomponentOffset(), 1637 operands.begin() + xbox.subcomponentOffset() + 1638 xbox.subcomponent().size()); 1639 } 1640 base = 1641 rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), base, args); 1642 if (hasSubstr) 1643 base = shiftSubstringBase(rewriter, loc, base, 1644 operands[xbox.substrOffset()]); 1645 } 1646 dest = insertBaseAddress(rewriter, loc, dest, base); 1647 if (isDerivedTypeWithLenParams(boxTy)) 1648 TODO(loc, "fir.embox codegen of derived with length parameters"); 1649 1650 mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest); 1651 rewriter.replaceOp(xbox, result); 1652 return mlir::success(); 1653 } 1654 1655 /// Return true if `xbox` has a normalized lower bounds attribute. A box value 1656 /// that is neither a POINTER nor an ALLOCATABLE should be normalized to a 1657 /// zero origin lower bound for interoperability with BIND(C). 1658 inline static bool normalizedLowerBound(fir::cg::XEmboxOp xbox) { 1659 return xbox->hasAttr(fir::getNormalizedLowerBoundAttrName()); 1660 } 1661 }; 1662 1663 /// Create a new box given a box reference. 1664 struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> { 1665 using EmboxCommonConversion::EmboxCommonConversion; 1666 1667 mlir::LogicalResult 1668 matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor, 1669 mlir::ConversionPatternRewriter &rewriter) const override { 1670 mlir::Location loc = rebox.getLoc(); 1671 mlir::Type idxTy = lowerTy().indexType(); 1672 mlir::Value loweredBox = adaptor.getOperands()[0]; 1673 mlir::ValueRange operands = adaptor.getOperands(); 1674 1675 // Create new descriptor and fill its non-shape related data. 1676 llvm::SmallVector<mlir::Value, 2> lenParams; 1677 mlir::Type inputEleTy = getInputEleTy(rebox); 1678 if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) { 1679 mlir::Value len = 1680 loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter); 1681 if (charTy.getFKind() != 1) { 1682 mlir::Value width = 1683 genConstantIndex(loc, idxTy, rewriter, charTy.getFKind()); 1684 len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width); 1685 } 1686 lenParams.emplace_back(len); 1687 } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) { 1688 if (recTy.getNumLenParams() != 0) 1689 TODO(loc, "reboxing descriptor of derived type with length parameters"); 1690 } 1691 auto [boxTy, dest, eleSize] = 1692 consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams); 1693 1694 // Read input extents, strides, and base address 1695 llvm::SmallVector<mlir::Value> inputExtents; 1696 llvm::SmallVector<mlir::Value> inputStrides; 1697 const unsigned inputRank = rebox.getRank(); 1698 for (unsigned i = 0; i < inputRank; ++i) { 1699 mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i); 1700 llvm::SmallVector<mlir::Value, 3> dimInfo = 1701 getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter); 1702 inputExtents.emplace_back(dimInfo[1]); 1703 inputStrides.emplace_back(dimInfo[2]); 1704 } 1705 1706 mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType()); 1707 mlir::Value baseAddr = 1708 loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter); 1709 1710 if (!rebox.slice().empty() || !rebox.subcomponent().empty()) 1711 return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1712 operands, rewriter); 1713 return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1714 operands, rewriter); 1715 } 1716 1717 private: 1718 /// Write resulting shape and base address in descriptor, and replace rebox 1719 /// op. 1720 mlir::LogicalResult 1721 finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1722 mlir::ValueRange lbounds, mlir::ValueRange extents, 1723 mlir::ValueRange strides, 1724 mlir::ConversionPatternRewriter &rewriter) const { 1725 mlir::Location loc = rebox.getLoc(); 1726 mlir::Value zero = 1727 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 1728 mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1); 1729 for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) { 1730 mlir::Value extent = std::get<0>(iter.value()); 1731 unsigned dim = iter.index(); 1732 mlir::Value lb = one; 1733 if (!lbounds.empty()) { 1734 lb = lbounds[dim]; 1735 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1736 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1737 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, lb); 1738 }; 1739 dest = insertLowerBound(rewriter, loc, dest, dim, lb); 1740 dest = insertExtent(rewriter, loc, dest, dim, extent); 1741 dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value())); 1742 } 1743 dest = insertBaseAddress(rewriter, loc, dest, base); 1744 mlir::Value result = 1745 placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest); 1746 rewriter.replaceOp(rebox, result); 1747 return mlir::success(); 1748 } 1749 1750 // Apply slice given the base address, extents and strides of the input box. 1751 mlir::LogicalResult 1752 sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1753 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1754 mlir::ValueRange operands, 1755 mlir::ConversionPatternRewriter &rewriter) const { 1756 mlir::Location loc = rebox.getLoc(); 1757 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1758 mlir::Type idxTy = lowerTy().indexType(); 1759 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1760 // Apply subcomponent and substring shift on base address. 1761 if (!rebox.subcomponent().empty() || !rebox.substr().empty()) { 1762 // Cast to inputEleTy* so that a GEP can be used. 1763 mlir::Type inputEleTy = getInputEleTy(rebox); 1764 auto llvmElePtrTy = 1765 mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy)); 1766 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base); 1767 1768 if (!rebox.subcomponent().empty()) { 1769 llvm::SmallVector<mlir::Value> gepOperands = {zero}; 1770 for (unsigned i = 0; i < rebox.subcomponent().size(); ++i) 1771 gepOperands.push_back(operands[rebox.subcomponentOffset() + i]); 1772 base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands); 1773 } 1774 if (!rebox.substr().empty()) 1775 base = shiftSubstringBase(rewriter, loc, base, 1776 operands[rebox.substrOffset()]); 1777 } 1778 1779 if (rebox.slice().empty()) 1780 // The array section is of the form array[%component][substring], keep 1781 // the input array extents and strides. 1782 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1783 inputExtents, inputStrides, rewriter); 1784 1785 // Strides from the fir.box are in bytes. 1786 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1787 1788 // The slice is of the form array(i:j:k)[%component]. Compute new extents 1789 // and strides. 1790 llvm::SmallVector<mlir::Value> slicedExtents; 1791 llvm::SmallVector<mlir::Value> slicedStrides; 1792 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 1793 const bool sliceHasOrigins = !rebox.shift().empty(); 1794 unsigned sliceOps = rebox.sliceOffset(); 1795 unsigned shiftOps = rebox.shiftOffset(); 1796 auto strideOps = inputStrides.begin(); 1797 const unsigned inputRank = inputStrides.size(); 1798 for (unsigned i = 0; i < inputRank; 1799 ++i, ++strideOps, ++shiftOps, sliceOps += 3) { 1800 mlir::Value sliceLb = 1801 integerCast(loc, rewriter, idxTy, operands[sliceOps]); 1802 mlir::Value inputStride = *strideOps; // already idxTy 1803 // Apply origin shift: base += (lb-shift)*input_stride 1804 mlir::Value sliceOrigin = 1805 sliceHasOrigins 1806 ? integerCast(loc, rewriter, idxTy, operands[shiftOps]) 1807 : one; 1808 mlir::Value diff = 1809 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin); 1810 mlir::Value offset = 1811 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride); 1812 base = genGEP(loc, voidPtrTy, rewriter, base, offset); 1813 // Apply upper bound and step if this is a triplet. Otherwise, the 1814 // dimension is dropped and no extents/strides are computed. 1815 mlir::Value upper = operands[sliceOps + 1]; 1816 const bool isTripletSlice = 1817 !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp()); 1818 if (isTripletSlice) { 1819 mlir::Value step = 1820 integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]); 1821 // extent = ub-lb+step/step 1822 mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper); 1823 mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb, 1824 sliceUb, step, zero, idxTy); 1825 slicedExtents.emplace_back(extent); 1826 // stride = step*input_stride 1827 mlir::Value stride = 1828 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride); 1829 slicedStrides.emplace_back(stride); 1830 } 1831 } 1832 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1833 slicedExtents, slicedStrides, rewriter); 1834 } 1835 1836 /// Apply a new shape to the data described by a box given the base address, 1837 /// extents and strides of the box. 1838 mlir::LogicalResult 1839 reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1840 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1841 mlir::ValueRange operands, 1842 mlir::ConversionPatternRewriter &rewriter) const { 1843 mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(), 1844 operands.begin() + rebox.shiftOffset() + 1845 rebox.shift().size()}; 1846 if (rebox.shape().empty()) { 1847 // Only setting new lower bounds. 1848 return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents, 1849 inputStrides, rewriter); 1850 } 1851 1852 mlir::Location loc = rebox.getLoc(); 1853 // Strides from the fir.box are in bytes. 1854 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1855 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1856 1857 llvm::SmallVector<mlir::Value> newStrides; 1858 llvm::SmallVector<mlir::Value> newExtents; 1859 mlir::Type idxTy = lowerTy().indexType(); 1860 // First stride from input box is kept. The rest is assumed contiguous 1861 // (it is not possible to reshape otherwise). If the input is scalar, 1862 // which may be OK if all new extents are ones, the stride does not 1863 // matter, use one. 1864 mlir::Value stride = inputStrides.empty() 1865 ? genConstantIndex(loc, idxTy, rewriter, 1) 1866 : inputStrides[0]; 1867 for (unsigned i = 0; i < rebox.shape().size(); ++i) { 1868 mlir::Value rawExtent = operands[rebox.shapeOffset() + i]; 1869 mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent); 1870 newExtents.emplace_back(extent); 1871 newStrides.emplace_back(stride); 1872 // nextStride = extent * stride; 1873 stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride); 1874 } 1875 return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides, 1876 rewriter); 1877 } 1878 1879 /// Return scalar element type of the input box. 1880 static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) { 1881 auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType()); 1882 if (auto seqTy = ty.dyn_cast<fir::SequenceType>()) 1883 return seqTy.getEleTy(); 1884 return ty; 1885 } 1886 }; 1887 1888 /// Lower `fir.emboxproc` operation. Creates a procedure box. 1889 /// TODO: Part of supporting Fortran 2003 procedure pointers. 1890 struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> { 1891 using FIROpConversion::FIROpConversion; 1892 1893 mlir::LogicalResult 1894 matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor, 1895 mlir::ConversionPatternRewriter &rewriter) const override { 1896 TODO(emboxproc.getLoc(), "fir.emboxproc codegen"); 1897 return mlir::failure(); 1898 } 1899 }; 1900 1901 // Code shared between insert_value and extract_value Ops. 1902 struct ValueOpCommon { 1903 // Translate the arguments pertaining to any multidimensional array to 1904 // row-major order for LLVM-IR. 1905 static void toRowMajor(llvm::SmallVectorImpl<mlir::Attribute> &attrs, 1906 mlir::Type ty) { 1907 assert(ty && "type is null"); 1908 const auto end = attrs.size(); 1909 for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) { 1910 if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 1911 const auto dim = getDimension(seq); 1912 if (dim > 1) { 1913 auto ub = std::min(i + dim, end); 1914 std::reverse(attrs.begin() + i, attrs.begin() + ub); 1915 i += dim - 1; 1916 } 1917 ty = getArrayElementType(seq); 1918 } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) { 1919 ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()]; 1920 } else { 1921 llvm_unreachable("index into invalid type"); 1922 } 1923 } 1924 } 1925 1926 static llvm::SmallVector<mlir::Attribute> 1927 collectIndices(mlir::ConversionPatternRewriter &rewriter, 1928 mlir::ArrayAttr arrAttr) { 1929 llvm::SmallVector<mlir::Attribute> attrs; 1930 for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) { 1931 if (i->isa<mlir::IntegerAttr>()) { 1932 attrs.push_back(*i); 1933 } else { 1934 auto fieldName = i->cast<mlir::StringAttr>().getValue(); 1935 ++i; 1936 auto ty = i->cast<mlir::TypeAttr>().getValue(); 1937 auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName); 1938 attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index)); 1939 } 1940 } 1941 return attrs; 1942 } 1943 1944 private: 1945 static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) { 1946 auto eleTy = ty.getElementType(); 1947 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 1948 eleTy = arrTy.getElementType(); 1949 return eleTy; 1950 } 1951 }; 1952 1953 namespace { 1954 /// Extract a subobject value from an ssa-value of aggregate type 1955 struct ExtractValueOpConversion 1956 : public FIROpAndTypeConversion<fir::ExtractValueOp>, 1957 public ValueOpCommon { 1958 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1959 1960 mlir::LogicalResult 1961 doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor, 1962 mlir::ConversionPatternRewriter &rewriter) const override { 1963 mlir::ValueRange operands = adaptor.getOperands(); 1964 auto attrs = collectIndices(rewriter, extractVal.getCoor()); 1965 toRowMajor(attrs, operands[0].getType()); 1966 auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs); 1967 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>( 1968 extractVal, ty, operands[0], position); 1969 return mlir::success(); 1970 } 1971 }; 1972 1973 /// InsertValue is the generalized instruction for the composition of new 1974 /// aggregate type values. 1975 struct InsertValueOpConversion 1976 : public FIROpAndTypeConversion<fir::InsertValueOp>, 1977 public ValueOpCommon { 1978 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1979 1980 mlir::LogicalResult 1981 doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor, 1982 mlir::ConversionPatternRewriter &rewriter) const override { 1983 mlir::ValueRange operands = adaptor.getOperands(); 1984 auto attrs = collectIndices(rewriter, insertVal.getCoor()); 1985 toRowMajor(attrs, operands[0].getType()); 1986 auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs); 1987 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 1988 insertVal, ty, operands[0], operands[1], position); 1989 return mlir::success(); 1990 } 1991 }; 1992 1993 /// InsertOnRange inserts a value into a sequence over a range of offsets. 1994 struct InsertOnRangeOpConversion 1995 : public FIROpAndTypeConversion<fir::InsertOnRangeOp> { 1996 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1997 1998 // Increments an array of subscripts in a row major fasion. 1999 void incrementSubscripts(const llvm::SmallVector<uint64_t> &dims, 2000 llvm::SmallVector<uint64_t> &subscripts) const { 2001 for (size_t i = dims.size(); i > 0; --i) { 2002 if (++subscripts[i - 1] < dims[i - 1]) { 2003 return; 2004 } 2005 subscripts[i - 1] = 0; 2006 } 2007 } 2008 2009 mlir::LogicalResult 2010 doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor, 2011 mlir::ConversionPatternRewriter &rewriter) const override { 2012 2013 llvm::SmallVector<uint64_t> dims; 2014 auto type = adaptor.getOperands()[0].getType(); 2015 2016 // Iteratively extract the array dimensions from the type. 2017 while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 2018 dims.push_back(t.getNumElements()); 2019 type = t.getElementType(); 2020 } 2021 2022 llvm::SmallVector<std::uint64_t> lBounds; 2023 llvm::SmallVector<std::uint64_t> uBounds; 2024 2025 // Unzip the upper and lower bound and convert to a row major format. 2026 mlir::DenseIntElementsAttr coor = range.getCoor(); 2027 auto reversedCoor = llvm::reverse(coor.getValues<int64_t>()); 2028 for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) { 2029 uBounds.push_back(*i++); 2030 lBounds.push_back(*i); 2031 } 2032 2033 auto &subscripts = lBounds; 2034 auto loc = range.getLoc(); 2035 mlir::Value lastOp = adaptor.getOperands()[0]; 2036 mlir::Value insertVal = adaptor.getOperands()[1]; 2037 2038 auto i64Ty = rewriter.getI64Type(); 2039 while (subscripts != uBounds) { 2040 // Convert uint64_t's to Attribute's. 2041 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2042 for (const auto &subscript : subscripts) 2043 subscriptAttrs.push_back(mlir::IntegerAttr::get(i64Ty, subscript)); 2044 lastOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2045 loc, ty, lastOp, insertVal, 2046 mlir::ArrayAttr::get(range.getContext(), subscriptAttrs)); 2047 2048 incrementSubscripts(dims, subscripts); 2049 } 2050 2051 // Convert uint64_t's to Attribute's. 2052 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2053 for (const auto &subscript : subscripts) 2054 subscriptAttrs.push_back( 2055 mlir::IntegerAttr::get(rewriter.getI64Type(), subscript)); 2056 mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs); 2057 2058 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2059 range, ty, lastOp, insertVal, 2060 mlir::ArrayAttr::get(range.getContext(), arrayRef)); 2061 2062 return mlir::success(); 2063 } 2064 }; 2065 } // namespace 2066 2067 namespace { 2068 /// XArrayCoor is the address arithmetic on a dynamically shaped, sliced, 2069 /// shifted etc. array. 2070 /// (See the static restriction on coordinate_of.) array_coor determines the 2071 /// coordinate (location) of a specific element. 2072 struct XArrayCoorOpConversion 2073 : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> { 2074 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2075 2076 mlir::LogicalResult 2077 doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor, 2078 mlir::ConversionPatternRewriter &rewriter) const override { 2079 auto loc = coor.getLoc(); 2080 mlir::ValueRange operands = adaptor.getOperands(); 2081 unsigned rank = coor.getRank(); 2082 assert(coor.indices().size() == rank); 2083 assert(coor.shape().empty() || coor.shape().size() == rank); 2084 assert(coor.shift().empty() || coor.shift().size() == rank); 2085 assert(coor.slice().empty() || coor.slice().size() == 3 * rank); 2086 mlir::Type idxTy = lowerTy().indexType(); 2087 unsigned indexOffset = coor.indicesOffset(); 2088 unsigned shapeOffset = coor.shapeOffset(); 2089 unsigned shiftOffset = coor.shiftOffset(); 2090 unsigned sliceOffset = coor.sliceOffset(); 2091 auto sliceOps = coor.slice().begin(); 2092 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2093 mlir::Value prevExt = one; 2094 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2095 mlir::Value offset = zero; 2096 const bool isShifted = !coor.shift().empty(); 2097 const bool isSliced = !coor.slice().empty(); 2098 const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>(); 2099 2100 // For each dimension of the array, generate the offset calculation. 2101 for (unsigned i = 0; i < rank; ++i, ++indexOffset, ++shapeOffset, 2102 ++shiftOffset, sliceOffset += 3, sliceOps += 3) { 2103 mlir::Value index = 2104 integerCast(loc, rewriter, idxTy, operands[indexOffset]); 2105 mlir::Value lb = 2106 isShifted ? integerCast(loc, rewriter, idxTy, operands[shiftOffset]) 2107 : one; 2108 mlir::Value step = one; 2109 bool normalSlice = isSliced; 2110 // Compute zero based index in dimension i of the element, applying 2111 // potential triplets and lower bounds. 2112 if (isSliced) { 2113 mlir::Value originalUb = *(sliceOps + 1); 2114 normalSlice = 2115 !mlir::isa_and_nonnull<fir::UndefOp>(originalUb.getDefiningOp()); 2116 if (normalSlice) 2117 step = integerCast(loc, rewriter, idxTy, operands[sliceOffset + 2]); 2118 } 2119 auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb); 2120 mlir::Value diff = 2121 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step); 2122 if (normalSlice) { 2123 mlir::Value sliceLb = 2124 integerCast(loc, rewriter, idxTy, operands[sliceOffset]); 2125 auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb); 2126 diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj); 2127 } 2128 // Update the offset given the stride and the zero based index `diff` 2129 // that was just computed. 2130 if (baseIsBoxed) { 2131 // Use stride in bytes from the descriptor. 2132 mlir::Value stride = loadStrideFromBox(loc, operands[0], i, rewriter); 2133 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride); 2134 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2135 } else { 2136 // Use stride computed at last iteration. 2137 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt); 2138 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2139 // Compute next stride assuming contiguity of the base array 2140 // (in element number). 2141 auto nextExt = integerCast(loc, rewriter, idxTy, operands[shapeOffset]); 2142 prevExt = 2143 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt); 2144 } 2145 } 2146 2147 // Add computed offset to the base address. 2148 if (baseIsBoxed) { 2149 // Working with byte offsets. The base address is read from the fir.box. 2150 // and need to be casted to i8* to do the pointer arithmetic. 2151 mlir::Type baseTy = getBaseAddrTypeFromBox(operands[0].getType()); 2152 mlir::Value base = 2153 loadBaseAddrFromBox(loc, baseTy, operands[0], rewriter); 2154 mlir::Type voidPtrTy = getVoidPtrType(); 2155 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2156 llvm::SmallVector<mlir::Value> args{offset}; 2157 auto addr = 2158 rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args); 2159 if (coor.subcomponent().empty()) { 2160 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, addr); 2161 return mlir::success(); 2162 } 2163 auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr); 2164 args.clear(); 2165 args.push_back(zero); 2166 if (!coor.lenParams().empty()) { 2167 // If type parameters are present, then we don't want to use a GEPOp 2168 // as below, as the LLVM struct type cannot be statically defined. 2169 TODO(loc, "derived type with type parameters"); 2170 } 2171 // TODO: array offset subcomponents must be converted to LLVM's 2172 // row-major layout here. 2173 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2174 args.push_back(operands[i]); 2175 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, casted, args); 2176 return mlir::success(); 2177 } 2178 2179 // The array was not boxed, so it must be contiguous. offset is therefore an 2180 // element offset and the base type is kept in the GEP unless the element 2181 // type size is itself dynamic. 2182 mlir::Value base; 2183 if (coor.subcomponent().empty()) { 2184 // No subcomponent. 2185 if (!coor.lenParams().empty()) { 2186 // Type parameters. Adjust element size explicitly. 2187 auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType()); 2188 assert(eleTy && "result must be a reference-like type"); 2189 if (fir::characterWithDynamicLen(eleTy)) { 2190 assert(coor.lenParams().size() == 1); 2191 auto length = integerCast(loc, rewriter, idxTy, 2192 operands[coor.lenParamsOffset()]); 2193 offset = 2194 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, length); 2195 2196 } else { 2197 TODO(loc, "compute size of derived type with type parameters"); 2198 } 2199 } 2200 // Cast the base address to a pointer to T. 2201 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty, operands[0]); 2202 } else { 2203 // Operand #0 must have a pointer type. For subcomponent slicing, we 2204 // want to cast away the array type and have a plain struct type. 2205 mlir::Type ty0 = operands[0].getType(); 2206 auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 2207 assert(ptrTy && "expected pointer type"); 2208 mlir::Type eleTy = ptrTy.getElementType(); 2209 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2210 eleTy = arrTy.getElementType(); 2211 auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy); 2212 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy, operands[0]); 2213 } 2214 llvm::SmallVector<mlir::Value> args = {offset}; 2215 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2216 args.push_back(operands[i]); 2217 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args); 2218 return mlir::success(); 2219 } 2220 }; 2221 } // namespace 2222 2223 /// Convert to (memory) reference to a reference to a subobject. 2224 /// The coordinate_of op is a Swiss army knife operation that can be used on 2225 /// (memory) references to records, arrays, complex, etc. as well as boxes. 2226 /// With unboxed arrays, there is the restriction that the array have a static 2227 /// shape in all but the last column. 2228 struct CoordinateOpConversion 2229 : public FIROpAndTypeConversion<fir::CoordinateOp> { 2230 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2231 2232 mlir::LogicalResult 2233 doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor, 2234 mlir::ConversionPatternRewriter &rewriter) const override { 2235 mlir::ValueRange operands = adaptor.getOperands(); 2236 2237 mlir::Location loc = coor.getLoc(); 2238 mlir::Value base = operands[0]; 2239 mlir::Type baseObjectTy = coor.getBaseType(); 2240 mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2241 assert(objectTy && "fir.coordinate_of expects a reference type"); 2242 2243 // Complex type - basically, extract the real or imaginary part 2244 if (fir::isa_complex(objectTy)) { 2245 mlir::LLVM::ConstantOp c0 = 2246 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2247 llvm::SmallVector<mlir::Value> offs = {c0, operands[1]}; 2248 mlir::Value gep = genGEP(loc, ty, rewriter, base, offs); 2249 rewriter.replaceOp(coor, gep); 2250 return mlir::success(); 2251 } 2252 2253 // Boxed type - get the base pointer from the box 2254 if (baseObjectTy.dyn_cast<fir::BoxType>()) 2255 return doRewriteBox(coor, ty, operands, loc, rewriter); 2256 2257 // Reference, pointer or a heap type 2258 if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType, fir::HeapType>()) 2259 return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter); 2260 2261 return rewriter.notifyMatchFailure( 2262 coor, "fir.coordinate_of base operand has unsupported type"); 2263 } 2264 2265 static unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) { 2266 return fir::hasDynamicSize(ty) 2267 ? op.getDefiningOp() 2268 ->getAttrOfType<mlir::IntegerAttr>("field") 2269 .getInt() 2270 : getIntValue(op); 2271 } 2272 2273 static int64_t getIntValue(mlir::Value val) { 2274 assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value"); 2275 mlir::Operation *defop = val.getDefiningOp(); 2276 2277 if (auto constOp = mlir::dyn_cast<mlir::arith::ConstantIntOp>(defop)) 2278 return constOp.value(); 2279 if (auto llConstOp = mlir::dyn_cast<mlir::LLVM::ConstantOp>(defop)) 2280 if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>()) 2281 return attr.getValue().getSExtValue(); 2282 fir::emitFatalError(val.getLoc(), "must be a constant"); 2283 } 2284 2285 static bool hasSubDimensions(mlir::Type type) { 2286 return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>(); 2287 } 2288 2289 /// Check whether this form of `!fir.coordinate_of` is supported. These 2290 /// additional checks are required, because we are not yet able to convert 2291 /// all valid forms of `!fir.coordinate_of`. 2292 /// TODO: Either implement the unsupported cases or extend the verifier 2293 /// in FIROps.cpp instead. 2294 static bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) { 2295 const std::size_t numOfCoors = coors.size(); 2296 std::size_t i = 0; 2297 bool subEle = false; 2298 bool ptrEle = false; 2299 for (; i < numOfCoors; ++i) { 2300 mlir::Value nxtOpnd = coors[i]; 2301 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2302 subEle = true; 2303 i += arrTy.getDimension() - 1; 2304 type = arrTy.getEleTy(); 2305 } else if (auto recTy = type.dyn_cast<fir::RecordType>()) { 2306 subEle = true; 2307 type = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2308 } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) { 2309 subEle = true; 2310 type = tupTy.getType(getIntValue(nxtOpnd)); 2311 } else { 2312 ptrEle = true; 2313 } 2314 } 2315 if (ptrEle) 2316 return (!subEle) && (numOfCoors == 1); 2317 return subEle && (i >= numOfCoors); 2318 } 2319 2320 /// Walk the abstract memory layout and determine if the path traverses any 2321 /// array types with unknown shape. Return true iff all the array types have a 2322 /// constant shape along the path. 2323 static bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) { 2324 for (std::size_t i = 0, sz = coors.size(); i < sz; ++i) { 2325 mlir::Value nxtOpnd = coors[i]; 2326 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2327 if (fir::sequenceWithNonConstantShape(arrTy)) 2328 return false; 2329 i += arrTy.getDimension() - 1; 2330 type = arrTy.getEleTy(); 2331 } else if (auto strTy = type.dyn_cast<fir::RecordType>()) { 2332 type = strTy.getType(getFieldNumber(strTy, nxtOpnd)); 2333 } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) { 2334 type = strTy.getType(getIntValue(nxtOpnd)); 2335 } else { 2336 return true; 2337 } 2338 } 2339 return true; 2340 } 2341 2342 private: 2343 mlir::LogicalResult 2344 doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands, 2345 mlir::Location loc, 2346 mlir::ConversionPatternRewriter &rewriter) const { 2347 mlir::Type boxObjTy = coor.getBaseType(); 2348 assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`"); 2349 2350 mlir::Value boxBaseAddr = operands[0]; 2351 2352 // 1. SPECIAL CASE (uses `fir.len_param_index`): 2353 // %box = ... : !fir.box<!fir.type<derived{len1:i32}>> 2354 // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}> 2355 // %addr = coordinate_of %box, %lenp 2356 if (coor.getNumOperands() == 2) { 2357 mlir::Operation *coordinateDef = 2358 (*coor.getCoor().begin()).getDefiningOp(); 2359 if (mlir::isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef)) 2360 TODO(loc, 2361 "fir.coordinate_of - fir.len_param_index is not supported yet"); 2362 } 2363 2364 // 2. GENERAL CASE: 2365 // 2.1. (`fir.array`) 2366 // %box = ... : !fix.box<!fir.array<?xU>> 2367 // %idx = ... : index 2368 // %resultAddr = coordinate_of %box, %idx : !fir.ref<U> 2369 // 2.2 (`fir.derived`) 2370 // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>> 2371 // %idx = ... : i32 2372 // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32> 2373 // 2.3 (`fir.derived` inside `fir.array`) 2374 // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32, 2375 // field_2:f32}>>> %idx1 = ... : index %idx2 = ... : i32 %resultAddr = 2376 // coordinate_of %box, %idx1, %idx2 : !fir.ref<f32> 2377 // 2.4. TODO: Either document or disable any other case that the following 2378 // implementation might convert. 2379 mlir::LLVM::ConstantOp c0 = 2380 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2381 mlir::Value resultAddr = 2382 loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()), 2383 boxBaseAddr, rewriter); 2384 // Component Type 2385 auto cpnTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy); 2386 mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext()); 2387 2388 for (unsigned i = 1, last = operands.size(); i < last; ++i) { 2389 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2390 if (i != 1) 2391 TODO(loc, "fir.array nested inside other array and/or derived type"); 2392 // Applies byte strides from the box. Ignore lower bound from box 2393 // since fir.coordinate_of indexes are zero based. Lowering takes care 2394 // of lower bound aspects. This both accounts for dynamically sized 2395 // types and non contiguous arrays. 2396 auto idxTy = lowerTy().indexType(); 2397 mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0); 2398 for (unsigned index = i, lastIndex = i + arrTy.getDimension(); 2399 index < lastIndex; ++index) { 2400 mlir::Value stride = 2401 loadStrideFromBox(loc, operands[0], index - i, rewriter); 2402 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, 2403 operands[index], stride); 2404 off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off); 2405 } 2406 auto voidPtrBase = 2407 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr); 2408 llvm::SmallVector<mlir::Value> args = {off}; 2409 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, 2410 voidPtrBase, args); 2411 i += arrTy.getDimension() - 1; 2412 cpnTy = arrTy.getEleTy(); 2413 } else if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) { 2414 auto recRefTy = 2415 mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy)); 2416 mlir::Value nxtOpnd = operands[i]; 2417 auto memObj = 2418 rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr); 2419 llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd}; 2420 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2421 auto llvmCurrentObjTy = lowerTy().convertType(cpnTy); 2422 auto gep = rewriter.create<mlir::LLVM::GEPOp>( 2423 loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj, 2424 args); 2425 resultAddr = 2426 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep); 2427 } else { 2428 fir::emitFatalError(loc, "unexpected type in coordinate_of"); 2429 } 2430 } 2431 2432 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr); 2433 return mlir::success(); 2434 } 2435 2436 mlir::LogicalResult 2437 doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty, 2438 mlir::ValueRange operands, mlir::Location loc, 2439 mlir::ConversionPatternRewriter &rewriter) const { 2440 mlir::Type baseObjectTy = coor.getBaseType(); 2441 2442 // Component Type 2443 mlir::Type cpnTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2444 bool hasSubdimension = hasSubDimensions(cpnTy); 2445 bool columnIsDeferred = !hasSubdimension; 2446 2447 if (!supportedCoordinate(cpnTy, operands.drop_front(1))) 2448 TODO(loc, "unsupported combination of coordinate operands"); 2449 2450 const bool hasKnownShape = 2451 arraysHaveKnownShape(cpnTy, operands.drop_front(1)); 2452 2453 // If only the column is `?`, then we can simply place the column value in 2454 // the 0-th GEP position. 2455 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2456 if (!hasKnownShape) { 2457 const unsigned sz = arrTy.getDimension(); 2458 if (arraysHaveKnownShape(arrTy.getEleTy(), 2459 operands.drop_front(1 + sz))) { 2460 fir::SequenceType::ShapeRef shape = arrTy.getShape(); 2461 bool allConst = true; 2462 for (unsigned i = 0; i < sz - 1; ++i) { 2463 if (shape[i] < 0) { 2464 allConst = false; 2465 break; 2466 } 2467 } 2468 if (allConst) 2469 columnIsDeferred = true; 2470 } 2471 } 2472 } 2473 2474 if (fir::hasDynamicSize(fir::unwrapSequenceType(cpnTy))) 2475 return mlir::emitError( 2476 loc, "fir.coordinate_of with a dynamic element size is unsupported"); 2477 2478 if (hasKnownShape || columnIsDeferred) { 2479 llvm::SmallVector<mlir::Value> offs; 2480 if (hasKnownShape && hasSubdimension) { 2481 mlir::LLVM::ConstantOp c0 = 2482 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2483 offs.push_back(c0); 2484 } 2485 llvm::Optional<int> dims; 2486 llvm::SmallVector<mlir::Value> arrIdx; 2487 for (std::size_t i = 1, sz = operands.size(); i < sz; ++i) { 2488 mlir::Value nxtOpnd = operands[i]; 2489 2490 if (!cpnTy) 2491 return mlir::emitError(loc, "invalid coordinate/check failed"); 2492 2493 // check if the i-th coordinate relates to an array 2494 if (dims) { 2495 arrIdx.push_back(nxtOpnd); 2496 int dimsLeft = *dims; 2497 if (dimsLeft > 1) { 2498 dims = dimsLeft - 1; 2499 continue; 2500 } 2501 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2502 // append array range in reverse (FIR arrays are column-major) 2503 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2504 arrIdx.clear(); 2505 dims.reset(); 2506 continue; 2507 } 2508 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2509 int d = arrTy.getDimension() - 1; 2510 if (d > 0) { 2511 dims = d; 2512 arrIdx.push_back(nxtOpnd); 2513 continue; 2514 } 2515 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2516 offs.push_back(nxtOpnd); 2517 continue; 2518 } 2519 2520 // check if the i-th coordinate relates to a field 2521 if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) 2522 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2523 else if (auto tupTy = cpnTy.dyn_cast<mlir::TupleType>()) 2524 cpnTy = tupTy.getType(getIntValue(nxtOpnd)); 2525 else 2526 cpnTy = nullptr; 2527 2528 offs.push_back(nxtOpnd); 2529 } 2530 if (dims) 2531 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2532 mlir::Value base = operands[0]; 2533 mlir::Value retval = genGEP(loc, ty, rewriter, base, offs); 2534 rewriter.replaceOp(coor, retval); 2535 return mlir::success(); 2536 } 2537 2538 return mlir::emitError( 2539 loc, "fir.coordinate_of base operand has unsupported type"); 2540 } 2541 }; 2542 2543 /// Convert `fir.field_index`. The conversion depends on whether the size of 2544 /// the record is static or dynamic. 2545 struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> { 2546 using FIROpConversion::FIROpConversion; 2547 2548 // NB: most field references should be resolved by this point 2549 mlir::LogicalResult 2550 matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor, 2551 mlir::ConversionPatternRewriter &rewriter) const override { 2552 auto recTy = field.getOnType().cast<fir::RecordType>(); 2553 unsigned index = recTy.getFieldIndex(field.getFieldId()); 2554 2555 if (!fir::hasDynamicSize(recTy)) { 2556 // Derived type has compile-time constant layout. Return index of the 2557 // component type in the parent type (to be used in GEP). 2558 rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset( 2559 field.getLoc(), rewriter, index)}); 2560 return mlir::success(); 2561 } 2562 2563 // Derived type has compile-time constant layout. Call the compiler 2564 // generated function to determine the byte offset of the field at runtime. 2565 // This returns a non-constant. 2566 mlir::FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get( 2567 field.getContext(), getOffsetMethodName(recTy, field.getFieldId())); 2568 mlir::NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr); 2569 mlir::NamedAttribute fieldAttr = rewriter.getNamedAttr( 2570 "field", mlir::IntegerAttr::get(lowerTy().indexType(), index)); 2571 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 2572 field, lowerTy().offsetType(), adaptor.getOperands(), 2573 llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr}); 2574 return mlir::success(); 2575 } 2576 2577 // Re-Construct the name of the compiler generated method that calculates the 2578 // offset 2579 inline static std::string getOffsetMethodName(fir::RecordType recTy, 2580 llvm::StringRef field) { 2581 return recTy.getName().str() + "P." + field.str() + ".offset"; 2582 } 2583 }; 2584 2585 /// Convert `fir.end` 2586 struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> { 2587 using FIROpConversion::FIROpConversion; 2588 2589 mlir::LogicalResult 2590 matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor, 2591 mlir::ConversionPatternRewriter &rewriter) const override { 2592 TODO(firEnd.getLoc(), "fir.end codegen"); 2593 return mlir::failure(); 2594 } 2595 }; 2596 2597 /// Lower `fir.gentypedesc` to a global constant. 2598 struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> { 2599 using FIROpConversion::FIROpConversion; 2600 2601 mlir::LogicalResult 2602 matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor, 2603 mlir::ConversionPatternRewriter &rewriter) const override { 2604 TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen"); 2605 return mlir::failure(); 2606 } 2607 }; 2608 2609 /// Lower `fir.has_value` operation to `llvm.return` operation. 2610 struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> { 2611 using FIROpConversion::FIROpConversion; 2612 2613 mlir::LogicalResult 2614 matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor, 2615 mlir::ConversionPatternRewriter &rewriter) const override { 2616 rewriter.replaceOpWithNewOp<mlir::LLVM::ReturnOp>(op, 2617 adaptor.getOperands()); 2618 return mlir::success(); 2619 } 2620 }; 2621 2622 /// Lower `fir.global` operation to `llvm.global` operation. 2623 /// `fir.insert_on_range` operations are replaced with constant dense attribute 2624 /// if they are applied on the full range. 2625 struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> { 2626 using FIROpConversion::FIROpConversion; 2627 2628 mlir::LogicalResult 2629 matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor, 2630 mlir::ConversionPatternRewriter &rewriter) const override { 2631 auto tyAttr = convertType(global.getType()); 2632 if (global.getType().isa<fir::BoxType>()) 2633 tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType(); 2634 auto loc = global.getLoc(); 2635 mlir::Attribute initAttr; 2636 if (global.getInitVal()) 2637 initAttr = global.getInitVal().getValue(); 2638 auto linkage = convertLinkage(global.getLinkName()); 2639 auto isConst = global.getConstant().hasValue(); 2640 auto g = rewriter.create<mlir::LLVM::GlobalOp>( 2641 loc, tyAttr, isConst, linkage, global.getSymName(), initAttr); 2642 auto &gr = g.getInitializerRegion(); 2643 rewriter.inlineRegionBefore(global.getRegion(), gr, gr.end()); 2644 if (!gr.empty()) { 2645 // Replace insert_on_range with a constant dense attribute if the 2646 // initialization is on the full range. 2647 auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>(); 2648 for (auto insertOp : insertOnRangeOps) { 2649 if (isFullRange(insertOp.getCoor(), insertOp.getType())) { 2650 auto seqTyAttr = convertType(insertOp.getType()); 2651 auto *op = insertOp.getVal().getDefiningOp(); 2652 auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op); 2653 if (!constant) { 2654 auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op); 2655 if (!convertOp) 2656 continue; 2657 constant = mlir::cast<mlir::arith::ConstantOp>( 2658 convertOp.getValue().getDefiningOp()); 2659 } 2660 mlir::Type vecType = mlir::VectorType::get( 2661 insertOp.getType().getShape(), constant.getType()); 2662 auto denseAttr = mlir::DenseElementsAttr::get( 2663 vecType.cast<mlir::ShapedType>(), constant.getValue()); 2664 rewriter.setInsertionPointAfter(insertOp); 2665 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>( 2666 insertOp, seqTyAttr, denseAttr); 2667 } 2668 } 2669 } 2670 rewriter.eraseOp(global); 2671 return mlir::success(); 2672 } 2673 2674 bool isFullRange(mlir::DenseIntElementsAttr indexes, 2675 fir::SequenceType seqTy) const { 2676 auto extents = seqTy.getShape(); 2677 if (indexes.size() / 2 != static_cast<int64_t>(extents.size())) 2678 return false; 2679 auto cur_index = indexes.value_begin<int64_t>(); 2680 for (unsigned i = 0; i < indexes.size(); i += 2) { 2681 if (*(cur_index++) != 0) 2682 return false; 2683 if (*(cur_index++) != extents[i / 2] - 1) 2684 return false; 2685 } 2686 return true; 2687 } 2688 2689 // TODO: String comparaison should be avoided. Replace linkName with an 2690 // enumeration. 2691 mlir::LLVM::Linkage 2692 convertLinkage(llvm::Optional<llvm::StringRef> optLinkage) const { 2693 if (optLinkage.hasValue()) { 2694 auto name = optLinkage.getValue(); 2695 if (name == "internal") 2696 return mlir::LLVM::Linkage::Internal; 2697 if (name == "linkonce") 2698 return mlir::LLVM::Linkage::Linkonce; 2699 if (name == "linkonce_odr") 2700 return mlir::LLVM::Linkage::LinkonceODR; 2701 if (name == "common") 2702 return mlir::LLVM::Linkage::Common; 2703 if (name == "weak") 2704 return mlir::LLVM::Linkage::Weak; 2705 } 2706 return mlir::LLVM::Linkage::External; 2707 } 2708 }; 2709 2710 /// `fir.load` --> `llvm.load` 2711 struct LoadOpConversion : public FIROpConversion<fir::LoadOp> { 2712 using FIROpConversion::FIROpConversion; 2713 2714 mlir::LogicalResult 2715 matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor, 2716 mlir::ConversionPatternRewriter &rewriter) const override { 2717 // fir.box is a special case because it is considered as an ssa values in 2718 // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box> 2719 // and fir.box end up being the same llvm types and loading a 2720 // fir.ref<fir.box> is actually a no op in LLVM. 2721 if (load.getType().isa<fir::BoxType>()) { 2722 rewriter.replaceOp(load, adaptor.getOperands()[0]); 2723 } else { 2724 rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>( 2725 load, convertType(load.getType()), adaptor.getOperands(), 2726 load->getAttrs()); 2727 } 2728 return mlir::success(); 2729 } 2730 }; 2731 2732 /// Lower `fir.no_reassoc` to LLVM IR dialect. 2733 /// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast 2734 /// math flags? 2735 struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> { 2736 using FIROpConversion::FIROpConversion; 2737 2738 mlir::LogicalResult 2739 matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor, 2740 mlir::ConversionPatternRewriter &rewriter) const override { 2741 rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]); 2742 return mlir::success(); 2743 } 2744 }; 2745 2746 static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest, 2747 llvm::Optional<mlir::ValueRange> destOps, 2748 mlir::ConversionPatternRewriter &rewriter, 2749 mlir::Block *newBlock) { 2750 if (destOps.hasValue()) 2751 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, destOps.getValue(), 2752 newBlock, mlir::ValueRange()); 2753 else 2754 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock); 2755 } 2756 2757 template <typename A, typename B> 2758 static void genBrOp(A caseOp, mlir::Block *dest, llvm::Optional<B> destOps, 2759 mlir::ConversionPatternRewriter &rewriter) { 2760 if (destOps.hasValue()) 2761 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, destOps.getValue(), 2762 dest); 2763 else 2764 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest); 2765 } 2766 2767 static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp, 2768 mlir::Block *dest, 2769 llvm::Optional<mlir::ValueRange> destOps, 2770 mlir::ConversionPatternRewriter &rewriter) { 2771 auto *thisBlock = rewriter.getInsertionBlock(); 2772 auto *newBlock = createBlock(rewriter, dest); 2773 rewriter.setInsertionPointToEnd(thisBlock); 2774 genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock); 2775 rewriter.setInsertionPointToEnd(newBlock); 2776 } 2777 2778 /// Conversion of `fir.select_case` 2779 /// 2780 /// The `fir.select_case` operation is converted to a if-then-else ladder. 2781 /// Depending on the case condition type, one or several comparison and 2782 /// conditional branching can be generated. 2783 /// 2784 /// A a point value case such as `case(4)`, a lower bound case such as 2785 /// `case(5:)` or an upper bound case such as `case(:3)` are converted to a 2786 /// simple comparison between the selector value and the constant value in the 2787 /// case. The block associated with the case condition is then executed if 2788 /// the comparison succeed otherwise it branch to the next block with the 2789 /// comparison for the the next case conditon. 2790 /// 2791 /// A closed interval case condition such as `case(7:10)` is converted with a 2792 /// first comparison and conditional branching for the lower bound. If 2793 /// successful, it branch to a second block with the comparison for the 2794 /// upper bound in the same case condition. 2795 /// 2796 /// TODO: lowering of CHARACTER type cases is not handled yet. 2797 struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> { 2798 using FIROpConversion::FIROpConversion; 2799 2800 mlir::LogicalResult 2801 matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor, 2802 mlir::ConversionPatternRewriter &rewriter) const override { 2803 unsigned conds = caseOp.getNumConditions(); 2804 llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue(); 2805 // Type can be CHARACTER, INTEGER, or LOGICAL (C1145) 2806 auto ty = caseOp.getSelector().getType(); 2807 if (ty.isa<fir::CharacterType>()) { 2808 TODO(caseOp.getLoc(), "fir.select_case codegen with character type"); 2809 return mlir::failure(); 2810 } 2811 mlir::Value selector = caseOp.getSelector(adaptor.getOperands()); 2812 auto loc = caseOp.getLoc(); 2813 for (unsigned t = 0; t != conds; ++t) { 2814 mlir::Block *dest = caseOp.getSuccessor(t); 2815 llvm::Optional<mlir::ValueRange> destOps = 2816 caseOp.getSuccessorOperands(adaptor.getOperands(), t); 2817 llvm::Optional<mlir::ValueRange> cmpOps = 2818 *caseOp.getCompareOperands(adaptor.getOperands(), t); 2819 mlir::Value caseArg = *(cmpOps.value().begin()); 2820 mlir::Attribute attr = cases[t]; 2821 if (attr.isa<fir::PointIntervalAttr>()) { 2822 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2823 loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg); 2824 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2825 continue; 2826 } 2827 if (attr.isa<fir::LowerBoundAttr>()) { 2828 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2829 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2830 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2831 continue; 2832 } 2833 if (attr.isa<fir::UpperBoundAttr>()) { 2834 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2835 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg); 2836 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2837 continue; 2838 } 2839 if (attr.isa<fir::ClosedIntervalAttr>()) { 2840 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2841 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2842 auto *thisBlock = rewriter.getInsertionBlock(); 2843 auto *newBlock1 = createBlock(rewriter, dest); 2844 auto *newBlock2 = createBlock(rewriter, dest); 2845 rewriter.setInsertionPointToEnd(thisBlock); 2846 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2); 2847 rewriter.setInsertionPointToEnd(newBlock1); 2848 mlir::Value caseArg0 = *(cmpOps.value().begin() + 1); 2849 auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>( 2850 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0); 2851 genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2); 2852 rewriter.setInsertionPointToEnd(newBlock2); 2853 continue; 2854 } 2855 assert(attr.isa<mlir::UnitAttr>()); 2856 assert((t + 1 == conds) && "unit must be last"); 2857 genBrOp(caseOp, dest, destOps, rewriter); 2858 } 2859 return mlir::success(); 2860 } 2861 }; 2862 2863 template <typename OP> 2864 static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select, 2865 typename OP::Adaptor adaptor, 2866 mlir::ConversionPatternRewriter &rewriter) { 2867 unsigned conds = select.getNumConditions(); 2868 auto cases = select.getCases().getValue(); 2869 mlir::Value selector = adaptor.getSelector(); 2870 auto loc = select.getLoc(); 2871 assert(conds > 0 && "select must have cases"); 2872 2873 llvm::SmallVector<mlir::Block *> destinations; 2874 llvm::SmallVector<mlir::ValueRange> destinationsOperands; 2875 mlir::Block *defaultDestination; 2876 mlir::ValueRange defaultOperands; 2877 llvm::SmallVector<int32_t> caseValues; 2878 2879 for (unsigned t = 0; t != conds; ++t) { 2880 mlir::Block *dest = select.getSuccessor(t); 2881 auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t); 2882 const mlir::Attribute &attr = cases[t]; 2883 if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) { 2884 destinations.push_back(dest); 2885 destinationsOperands.push_back(destOps.hasValue() ? *destOps 2886 : mlir::ValueRange{}); 2887 caseValues.push_back(intAttr.getInt()); 2888 continue; 2889 } 2890 assert(attr.template dyn_cast_or_null<mlir::UnitAttr>()); 2891 assert((t + 1 == conds) && "unit must be last"); 2892 defaultDestination = dest; 2893 defaultOperands = destOps.hasValue() ? *destOps : mlir::ValueRange{}; 2894 } 2895 2896 // LLVM::SwitchOp takes a i32 type for the selector. 2897 if (select.getSelector().getType() != rewriter.getI32Type()) 2898 selector = rewriter.create<mlir::LLVM::TruncOp>(loc, rewriter.getI32Type(), 2899 selector); 2900 2901 rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>( 2902 select, selector, 2903 /*defaultDestination=*/defaultDestination, 2904 /*defaultOperands=*/defaultOperands, 2905 /*caseValues=*/caseValues, 2906 /*caseDestinations=*/destinations, 2907 /*caseOperands=*/destinationsOperands, 2908 /*branchWeights=*/llvm::ArrayRef<std::int32_t>()); 2909 } 2910 2911 /// conversion of fir::SelectOp to an if-then-else ladder 2912 struct SelectOpConversion : public FIROpConversion<fir::SelectOp> { 2913 using FIROpConversion::FIROpConversion; 2914 2915 mlir::LogicalResult 2916 matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor, 2917 mlir::ConversionPatternRewriter &rewriter) const override { 2918 selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter); 2919 return mlir::success(); 2920 } 2921 }; 2922 2923 /// conversion of fir::SelectRankOp to an if-then-else ladder 2924 struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> { 2925 using FIROpConversion::FIROpConversion; 2926 2927 mlir::LogicalResult 2928 matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor, 2929 mlir::ConversionPatternRewriter &rewriter) const override { 2930 selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter); 2931 return mlir::success(); 2932 } 2933 }; 2934 2935 /// Lower `fir.select_type` to LLVM IR dialect. 2936 struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> { 2937 using FIROpConversion::FIROpConversion; 2938 2939 mlir::LogicalResult 2940 matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor, 2941 mlir::ConversionPatternRewriter &rewriter) const override { 2942 mlir::emitError(select.getLoc(), 2943 "fir.select_type should have already been converted"); 2944 return mlir::failure(); 2945 } 2946 }; 2947 2948 /// `fir.store` --> `llvm.store` 2949 struct StoreOpConversion : public FIROpConversion<fir::StoreOp> { 2950 using FIROpConversion::FIROpConversion; 2951 2952 mlir::LogicalResult 2953 matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor, 2954 mlir::ConversionPatternRewriter &rewriter) const override { 2955 if (store.getValue().getType().isa<fir::BoxType>()) { 2956 // fir.box value is actually in memory, load it first before storing it. 2957 mlir::Location loc = store.getLoc(); 2958 mlir::Type boxPtrTy = adaptor.getOperands()[0].getType(); 2959 auto val = rewriter.create<mlir::LLVM::LoadOp>( 2960 loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(), 2961 adaptor.getOperands()[0]); 2962 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2963 store, val, adaptor.getOperands()[1]); 2964 } else { 2965 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2966 store, adaptor.getOperands()[0], adaptor.getOperands()[1]); 2967 } 2968 return mlir::success(); 2969 } 2970 }; 2971 2972 namespace { 2973 2974 /// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for 2975 /// the character buffer and one for the buffer length. 2976 struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> { 2977 using FIROpConversion::FIROpConversion; 2978 2979 mlir::LogicalResult 2980 matchAndRewrite(fir::UnboxCharOp unboxchar, OpAdaptor adaptor, 2981 mlir::ConversionPatternRewriter &rewriter) const override { 2982 auto *ctx = unboxchar.getContext(); 2983 2984 mlir::Type lenTy = convertType(unboxchar.getType(1)); 2985 mlir::Value tuple = adaptor.getOperands()[0]; 2986 mlir::Type tupleTy = tuple.getType(); 2987 2988 mlir::Location loc = unboxchar.getLoc(); 2989 mlir::Value ptrToBuffer = 2990 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 0); 2991 2992 mlir::LLVM::ExtractValueOp len = 2993 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 1); 2994 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, len); 2995 2996 rewriter.replaceOp(unboxchar, 2997 llvm::ArrayRef<mlir::Value>{ptrToBuffer, lenAfterCast}); 2998 return mlir::success(); 2999 } 3000 }; 3001 3002 /// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its 3003 /// components. 3004 /// TODO: Part of supporting Fortran 2003 procedure pointers. 3005 struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> { 3006 using FIROpConversion::FIROpConversion; 3007 3008 mlir::LogicalResult 3009 matchAndRewrite(fir::UnboxProcOp unboxproc, OpAdaptor adaptor, 3010 mlir::ConversionPatternRewriter &rewriter) const override { 3011 TODO(unboxproc.getLoc(), "fir.unboxproc codegen"); 3012 return mlir::failure(); 3013 } 3014 }; 3015 3016 /// convert to LLVM IR dialect `undef` 3017 struct UndefOpConversion : public FIROpConversion<fir::UndefOp> { 3018 using FIROpConversion::FIROpConversion; 3019 3020 mlir::LogicalResult 3021 matchAndRewrite(fir::UndefOp undef, OpAdaptor, 3022 mlir::ConversionPatternRewriter &rewriter) const override { 3023 rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>( 3024 undef, convertType(undef.getType())); 3025 return mlir::success(); 3026 } 3027 }; 3028 3029 struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> { 3030 using FIROpConversion::FIROpConversion; 3031 3032 mlir::LogicalResult 3033 matchAndRewrite(fir::ZeroOp zero, OpAdaptor, 3034 mlir::ConversionPatternRewriter &rewriter) const override { 3035 mlir::Type ty = convertType(zero.getType()); 3036 if (ty.isa<mlir::LLVM::LLVMPointerType>()) { 3037 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty); 3038 } else if (ty.isa<mlir::IntegerType>()) { 3039 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3040 zero, ty, mlir::IntegerAttr::get(zero.getType(), 0)); 3041 } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) { 3042 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3043 zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0)); 3044 } else { 3045 // TODO: create ConstantAggregateZero for FIR aggregate/array types. 3046 return rewriter.notifyMatchFailure( 3047 zero, 3048 "conversion of fir.zero with aggregate type not implemented yet"); 3049 } 3050 return mlir::success(); 3051 } 3052 }; 3053 3054 /// `fir.unreachable` --> `llvm.unreachable` 3055 struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> { 3056 using FIROpConversion::FIROpConversion; 3057 3058 mlir::LogicalResult 3059 matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor, 3060 mlir::ConversionPatternRewriter &rewriter) const override { 3061 rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach); 3062 return mlir::success(); 3063 } 3064 }; 3065 3066 /// `fir.is_present` --> 3067 /// ``` 3068 /// %0 = llvm.mlir.constant(0 : i64) 3069 /// %1 = llvm.ptrtoint %0 3070 /// %2 = llvm.icmp "ne" %1, %0 : i64 3071 /// ``` 3072 struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> { 3073 using FIROpConversion::FIROpConversion; 3074 3075 mlir::LogicalResult 3076 matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor, 3077 mlir::ConversionPatternRewriter &rewriter) const override { 3078 mlir::Type idxTy = lowerTy().indexType(); 3079 mlir::Location loc = isPresent.getLoc(); 3080 auto ptr = adaptor.getOperands()[0]; 3081 3082 if (isPresent.getVal().getType().isa<fir::BoxCharType>()) { 3083 auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>(); 3084 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3085 3086 mlir::Type ty = structTy.getBody()[0]; 3087 mlir::MLIRContext *ctx = isPresent.getContext(); 3088 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3089 ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0); 3090 } 3091 mlir::LLVM::ConstantOp c0 = 3092 genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0); 3093 auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr); 3094 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 3095 isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0); 3096 3097 return mlir::success(); 3098 } 3099 }; 3100 3101 /// Create value signaling an absent optional argument in a call, e.g. 3102 /// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>` 3103 struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> { 3104 using FIROpConversion::FIROpConversion; 3105 3106 mlir::LogicalResult 3107 matchAndRewrite(fir::AbsentOp absent, OpAdaptor, 3108 mlir::ConversionPatternRewriter &rewriter) const override { 3109 mlir::Type ty = convertType(absent.getType()); 3110 mlir::Location loc = absent.getLoc(); 3111 3112 if (absent.getType().isa<fir::BoxCharType>()) { 3113 auto structTy = ty.cast<mlir::LLVM::LLVMStructType>(); 3114 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3115 auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3116 auto nullField = 3117 rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]); 3118 mlir::MLIRContext *ctx = absent.getContext(); 3119 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3120 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 3121 absent, ty, undefStruct, nullField, c0); 3122 } else { 3123 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty); 3124 } 3125 return mlir::success(); 3126 } 3127 }; 3128 3129 // 3130 // Primitive operations on Complex types 3131 // 3132 3133 /// Generate inline code for complex addition/subtraction 3134 template <typename LLVMOP, typename OPTY> 3135 static mlir::LLVM::InsertValueOp 3136 complexSum(OPTY sumop, mlir::ValueRange opnds, 3137 mlir::ConversionPatternRewriter &rewriter, 3138 fir::LLVMTypeConverter &lowering) { 3139 mlir::Value a = opnds[0]; 3140 mlir::Value b = opnds[1]; 3141 auto loc = sumop.getLoc(); 3142 auto ctx = sumop.getContext(); 3143 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3144 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3145 mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType())); 3146 mlir::Type ty = lowering.convertType(sumop.getType()); 3147 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3148 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3149 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3150 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3151 auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1); 3152 auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1); 3153 auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3154 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0); 3155 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1); 3156 } 3157 } // namespace 3158 3159 namespace { 3160 struct AddcOpConversion : public FIROpConversion<fir::AddcOp> { 3161 using FIROpConversion::FIROpConversion; 3162 3163 mlir::LogicalResult 3164 matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor, 3165 mlir::ConversionPatternRewriter &rewriter) const override { 3166 // given: (x + iy) + (x' + iy') 3167 // result: (x + x') + i(y + y') 3168 auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(), 3169 rewriter, lowerTy()); 3170 rewriter.replaceOp(addc, r.getResult()); 3171 return mlir::success(); 3172 } 3173 }; 3174 3175 struct SubcOpConversion : public FIROpConversion<fir::SubcOp> { 3176 using FIROpConversion::FIROpConversion; 3177 3178 mlir::LogicalResult 3179 matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor, 3180 mlir::ConversionPatternRewriter &rewriter) const override { 3181 // given: (x + iy) - (x' + iy') 3182 // result: (x - x') + i(y - y') 3183 auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(), 3184 rewriter, lowerTy()); 3185 rewriter.replaceOp(subc, r.getResult()); 3186 return mlir::success(); 3187 } 3188 }; 3189 3190 /// Inlined complex multiply 3191 struct MulcOpConversion : public FIROpConversion<fir::MulcOp> { 3192 using FIROpConversion::FIROpConversion; 3193 3194 mlir::LogicalResult 3195 matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor, 3196 mlir::ConversionPatternRewriter &rewriter) const override { 3197 // TODO: Can we use a call to __muldc3 ? 3198 // given: (x + iy) * (x' + iy') 3199 // result: (xx'-yy')+i(xy'+yx') 3200 mlir::Value a = adaptor.getOperands()[0]; 3201 mlir::Value b = adaptor.getOperands()[1]; 3202 auto loc = mulc.getLoc(); 3203 auto *ctx = mulc.getContext(); 3204 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3205 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3206 mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType())); 3207 mlir::Type ty = convertType(mulc.getType()); 3208 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3209 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3210 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3211 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3212 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3213 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3214 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3215 auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx); 3216 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3217 auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy); 3218 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3219 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3220 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3221 rewriter.replaceOp(mulc, r0.getResult()); 3222 return mlir::success(); 3223 } 3224 }; 3225 3226 /// Inlined complex division 3227 struct DivcOpConversion : public FIROpConversion<fir::DivcOp> { 3228 using FIROpConversion::FIROpConversion; 3229 3230 mlir::LogicalResult 3231 matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor, 3232 mlir::ConversionPatternRewriter &rewriter) const override { 3233 // TODO: Can we use a call to __divdc3 instead? 3234 // Just generate inline code for now. 3235 // given: (x + iy) / (x' + iy') 3236 // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y' 3237 mlir::Value a = adaptor.getOperands()[0]; 3238 mlir::Value b = adaptor.getOperands()[1]; 3239 auto loc = divc.getLoc(); 3240 auto *ctx = divc.getContext(); 3241 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3242 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3243 mlir::Type eleTy = convertType(getComplexEleTy(divc.getType())); 3244 mlir::Type ty = convertType(divc.getType()); 3245 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3246 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3247 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3248 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3249 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3250 auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1); 3251 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3252 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3253 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3254 auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1); 3255 auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1); 3256 auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy); 3257 auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy); 3258 auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d); 3259 auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d); 3260 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3261 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3262 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3263 rewriter.replaceOp(divc, r0.getResult()); 3264 return mlir::success(); 3265 } 3266 }; 3267 3268 /// Inlined complex negation 3269 struct NegcOpConversion : public FIROpConversion<fir::NegcOp> { 3270 using FIROpConversion::FIROpConversion; 3271 3272 mlir::LogicalResult 3273 matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor, 3274 mlir::ConversionPatternRewriter &rewriter) const override { 3275 // given: -(x + iy) 3276 // result: -x - iy 3277 auto *ctxt = neg.getContext(); 3278 auto eleTy = convertType(getComplexEleTy(neg.getType())); 3279 auto ty = convertType(neg.getType()); 3280 auto loc = neg.getLoc(); 3281 mlir::Value o0 = adaptor.getOperands()[0]; 3282 auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 3283 auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 3284 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0); 3285 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1); 3286 auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp); 3287 auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip); 3288 auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0); 3289 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1); 3290 return mlir::success(); 3291 } 3292 }; 3293 3294 /// Conversion pattern for operation that must be dead. The information in these 3295 /// operations is used by other operation. At this point they should not have 3296 /// anymore uses. 3297 /// These operations are normally dead after the pre-codegen pass. 3298 template <typename FromOp> 3299 struct MustBeDeadConversion : public FIROpConversion<FromOp> { 3300 explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering, 3301 const fir::FIRToLLVMPassOptions &options) 3302 : FIROpConversion<FromOp>(lowering, options) {} 3303 using OpAdaptor = typename FromOp::Adaptor; 3304 3305 mlir::LogicalResult 3306 matchAndRewrite(FromOp op, OpAdaptor adaptor, 3307 mlir::ConversionPatternRewriter &rewriter) const final { 3308 if (!op->getUses().empty()) 3309 return rewriter.notifyMatchFailure(op, "op must be dead"); 3310 rewriter.eraseOp(op); 3311 return mlir::success(); 3312 } 3313 }; 3314 3315 struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> { 3316 using MustBeDeadConversion::MustBeDeadConversion; 3317 }; 3318 3319 struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> { 3320 using MustBeDeadConversion::MustBeDeadConversion; 3321 }; 3322 3323 struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> { 3324 using MustBeDeadConversion::MustBeDeadConversion; 3325 }; 3326 3327 struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> { 3328 using MustBeDeadConversion::MustBeDeadConversion; 3329 }; 3330 3331 } // namespace 3332 3333 namespace { 3334 /// Convert FIR dialect to LLVM dialect 3335 /// 3336 /// This pass lowers all FIR dialect operations to LLVM IR dialect. An 3337 /// MLIR pass is used to lower residual Std dialect to LLVM IR dialect. 3338 class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> { 3339 public: 3340 FIRToLLVMLowering() = default; 3341 FIRToLLVMLowering(fir::FIRToLLVMPassOptions options) : options{options} {} 3342 mlir::ModuleOp getModule() { return getOperation(); } 3343 3344 void runOnOperation() override final { 3345 auto mod = getModule(); 3346 if (!forcedTargetTriple.empty()) 3347 fir::setTargetTriple(mod, forcedTargetTriple); 3348 3349 auto *context = getModule().getContext(); 3350 fir::LLVMTypeConverter typeConverter{getModule()}; 3351 mlir::RewritePatternSet pattern(context); 3352 pattern.insert< 3353 AbsentOpConversion, AddcOpConversion, AddrOfOpConversion, 3354 AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion, 3355 BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion, 3356 BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion, 3357 BoxProcHostOpConversion, BoxRankOpConversion, BoxTypeDescOpConversion, 3358 CallOpConversion, CmpcOpConversion, ConstcOpConversion, 3359 ConvertOpConversion, CoordinateOpConversion, DispatchOpConversion, 3360 DispatchTableOpConversion, DTEntryOpConversion, DivcOpConversion, 3361 EmboxOpConversion, EmboxCharOpConversion, EmboxProcOpConversion, 3362 ExtractValueOpConversion, FieldIndexOpConversion, FirEndOpConversion, 3363 FreeMemOpConversion, GenTypeDescOpConversion, GlobalLenOpConversion, 3364 GlobalOpConversion, HasValueOpConversion, InsertOnRangeOpConversion, 3365 InsertValueOpConversion, IsPresentOpConversion, 3366 LenParamIndexOpConversion, LoadOpConversion, MulcOpConversion, 3367 NegcOpConversion, NoReassocOpConversion, SelectCaseOpConversion, 3368 SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion, 3369 ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion, 3370 SliceOpConversion, StoreOpConversion, StringLitOpConversion, 3371 SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion, 3372 UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion, 3373 XEmboxOpConversion, XReboxOpConversion, ZeroOpConversion>(typeConverter, 3374 options); 3375 mlir::populateFuncToLLVMConversionPatterns(typeConverter, pattern); 3376 mlir::populateOpenMPToLLVMConversionPatterns(typeConverter, pattern); 3377 mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter, 3378 pattern); 3379 mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter, 3380 pattern); 3381 // Convert math-like dialect operations, which can be produced 3382 // when late math lowering mode is used, into llvm dialect. 3383 mlir::populateMathToLLVMConversionPatterns(typeConverter, pattern); 3384 mlir::populateMathToLibmConversionPatterns(pattern, /*benefit=*/0); 3385 mlir::ConversionTarget target{*context}; 3386 target.addLegalDialect<mlir::LLVM::LLVMDialect>(); 3387 // The OpenMP dialect is legal for Operations without regions, for those 3388 // which contains regions it is legal if the region contains only the 3389 // LLVM dialect. Add OpenMP dialect as a legal dialect for conversion and 3390 // legalize conversion of OpenMP operations without regions. 3391 mlir::configureOpenMPToLLVMConversionLegality(target, typeConverter); 3392 target.addLegalDialect<mlir::omp::OpenMPDialect>(); 3393 3394 // required NOPs for applying a full conversion 3395 target.addLegalOp<mlir::ModuleOp>(); 3396 3397 // apply the patterns 3398 if (mlir::failed(mlir::applyFullConversion(getModule(), target, 3399 std::move(pattern)))) { 3400 signalPassFailure(); 3401 } 3402 } 3403 3404 private: 3405 fir::FIRToLLVMPassOptions options; 3406 }; 3407 3408 /// Lower from LLVM IR dialect to proper LLVM-IR and dump the module 3409 struct LLVMIRLoweringPass 3410 : public mlir::PassWrapper<LLVMIRLoweringPass, 3411 mlir::OperationPass<mlir::ModuleOp>> { 3412 MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(LLVMIRLoweringPass) 3413 3414 LLVMIRLoweringPass(llvm::raw_ostream &output, fir::LLVMIRLoweringPrinter p) 3415 : output{output}, printer{p} {} 3416 3417 mlir::ModuleOp getModule() { return getOperation(); } 3418 3419 void runOnOperation() override final { 3420 auto *ctx = getModule().getContext(); 3421 auto optName = getModule().getName(); 3422 llvm::LLVMContext llvmCtx; 3423 if (auto llvmModule = mlir::translateModuleToLLVMIR( 3424 getModule(), llvmCtx, optName ? *optName : "FIRModule")) { 3425 printer(*llvmModule, output); 3426 return; 3427 } 3428 3429 mlir::emitError(mlir::UnknownLoc::get(ctx), "could not emit LLVM-IR\n"); 3430 signalPassFailure(); 3431 } 3432 3433 private: 3434 llvm::raw_ostream &output; 3435 fir::LLVMIRLoweringPrinter printer; 3436 }; 3437 3438 } // namespace 3439 3440 std::unique_ptr<mlir::Pass> fir::createFIRToLLVMPass() { 3441 return std::make_unique<FIRToLLVMLowering>(); 3442 } 3443 3444 std::unique_ptr<mlir::Pass> 3445 fir::createFIRToLLVMPass(fir::FIRToLLVMPassOptions options) { 3446 return std::make_unique<FIRToLLVMLowering>(options); 3447 } 3448 3449 std::unique_ptr<mlir::Pass> 3450 fir::createLLVMDialectToLLVMPass(llvm::raw_ostream &output, 3451 fir::LLVMIRLoweringPrinter printer) { 3452 return std::make_unique<LLVMIRLoweringPass>(output, printer); 3453 } 3454