1 //===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/ 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "flang/Optimizer/CodeGen/CodeGen.h" 14 #include "CGOps.h" 15 #include "PassDetail.h" 16 #include "flang/ISO_Fortran_binding.h" 17 #include "flang/Optimizer/Dialect/FIRAttr.h" 18 #include "flang/Optimizer/Dialect/FIROps.h" 19 #include "flang/Optimizer/Support/InternalNames.h" 20 #include "flang/Optimizer/Support/TypeCode.h" 21 #include "flang/Semantics/runtime-type-info.h" 22 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" 23 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" 24 #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" 25 #include "mlir/Conversion/LLVMCommon/Pattern.h" 26 #include "mlir/Conversion/MathToLLVM/MathToLLVM.h" 27 #include "mlir/Conversion/MathToLibm/MathToLibm.h" 28 #include "mlir/Conversion/OpenMPToLLVM/ConvertOpenMPToLLVM.h" 29 #include "mlir/IR/BuiltinTypes.h" 30 #include "mlir/IR/Matchers.h" 31 #include "mlir/Pass/Pass.h" 32 #include "mlir/Target/LLVMIR/ModuleTranslation.h" 33 #include "llvm/ADT/ArrayRef.h" 34 35 #define DEBUG_TYPE "flang-codegen" 36 37 // fir::LLVMTypeConverter for converting to LLVM IR dialect types. 38 #include "TypeConverter.h" 39 40 // TODO: This should really be recovered from the specified target. 41 static constexpr unsigned defaultAlign = 8; 42 43 /// `fir.box` attribute values as defined for CFI_attribute_t in 44 /// flang/ISO_Fortran_binding.h. 45 static constexpr unsigned kAttrPointer = CFI_attribute_pointer; 46 static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable; 47 48 static inline mlir::Type getVoidPtrType(mlir::MLIRContext *context) { 49 return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8)); 50 } 51 52 static mlir::LLVM::ConstantOp 53 genConstantIndex(mlir::Location loc, mlir::Type ity, 54 mlir::ConversionPatternRewriter &rewriter, 55 std::int64_t offset) { 56 auto cattr = rewriter.getI64IntegerAttr(offset); 57 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 58 } 59 60 static mlir::Block *createBlock(mlir::ConversionPatternRewriter &rewriter, 61 mlir::Block *insertBefore) { 62 assert(insertBefore && "expected valid insertion block"); 63 return rewriter.createBlock(insertBefore->getParent(), 64 mlir::Region::iterator(insertBefore)); 65 } 66 67 namespace { 68 /// FIR conversion pattern template 69 template <typename FromOp> 70 class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> { 71 public: 72 explicit FIROpConversion(fir::LLVMTypeConverter &lowering, 73 const fir::FIRToLLVMPassOptions &options) 74 : mlir::ConvertOpToLLVMPattern<FromOp>(lowering), options(options) {} 75 76 protected: 77 mlir::Type convertType(mlir::Type ty) const { 78 return lowerTy().convertType(ty); 79 } 80 mlir::Type voidPtrTy() const { return getVoidPtrType(); } 81 82 mlir::Type getVoidPtrType() const { 83 return mlir::LLVM::LLVMPointerType::get( 84 mlir::IntegerType::get(&lowerTy().getContext(), 8)); 85 } 86 87 mlir::LLVM::ConstantOp 88 genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 89 int value) const { 90 mlir::Type i32Ty = rewriter.getI32Type(); 91 mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value); 92 return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr); 93 } 94 95 mlir::LLVM::ConstantOp 96 genConstantOffset(mlir::Location loc, 97 mlir::ConversionPatternRewriter &rewriter, 98 int offset) const { 99 mlir::Type ity = lowerTy().offsetType(); 100 mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset); 101 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 102 } 103 104 /// Perform an extension or truncation as needed on an integer value. Lowering 105 /// to the specific target may involve some sign-extending or truncation of 106 /// values, particularly to fit them from abstract box types to the 107 /// appropriate reified structures. 108 mlir::Value integerCast(mlir::Location loc, 109 mlir::ConversionPatternRewriter &rewriter, 110 mlir::Type ty, mlir::Value val) const { 111 auto valTy = val.getType(); 112 // If the value was not yet lowered, lower its type so that it can 113 // be used in getPrimitiveTypeSizeInBits. 114 if (!valTy.isa<mlir::IntegerType>()) 115 valTy = convertType(valTy); 116 auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 117 auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy); 118 if (toSize < fromSize) 119 return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val); 120 if (toSize > fromSize) 121 return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val); 122 return val; 123 } 124 125 /// Construct code sequence to extract the specifc value from a `fir.box`. 126 mlir::Value getValueFromBox(mlir::Location loc, mlir::Value box, 127 mlir::Type resultTy, 128 mlir::ConversionPatternRewriter &rewriter, 129 unsigned boxValue) const { 130 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 131 mlir::LLVM::ConstantOp cValuePos = 132 genConstantOffset(loc, rewriter, boxValue); 133 auto pty = mlir::LLVM::LLVMPointerType::get(resultTy); 134 auto p = rewriter.create<mlir::LLVM::GEPOp>( 135 loc, pty, box, mlir::ValueRange{c0, cValuePos}); 136 return rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p); 137 } 138 139 /// Method to construct code sequence to get the triple for dimension `dim` 140 /// from a box. 141 llvm::SmallVector<mlir::Value, 3> 142 getDimsFromBox(mlir::Location loc, llvm::ArrayRef<mlir::Type> retTys, 143 mlir::Value box, mlir::Value dim, 144 mlir::ConversionPatternRewriter &rewriter) const { 145 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 146 mlir::LLVM::ConstantOp cDims = 147 genConstantOffset(loc, rewriter, kDimsPosInBox); 148 mlir::LLVM::LoadOp l0 = 149 loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter); 150 mlir::LLVM::LoadOp l1 = 151 loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter); 152 mlir::LLVM::LoadOp l2 = 153 loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter); 154 return {l0.getResult(), l1.getResult(), l2.getResult()}; 155 } 156 157 mlir::LLVM::LoadOp 158 loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0, 159 mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off, 160 mlir::Type ty, 161 mlir::ConversionPatternRewriter &rewriter) const { 162 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 163 mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off); 164 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c); 165 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 166 } 167 168 mlir::Value 169 loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim, 170 mlir::ConversionPatternRewriter &rewriter) const { 171 auto idxTy = lowerTy().indexType(); 172 auto c0 = genConstantOffset(loc, rewriter, 0); 173 auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox); 174 auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim); 175 return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy, 176 rewriter); 177 } 178 179 /// Read base address from a fir.box. Returned address has type ty. 180 mlir::Value 181 loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 182 mlir::ConversionPatternRewriter &rewriter) const { 183 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 184 mlir::LLVM::ConstantOp cAddr = 185 genConstantOffset(loc, rewriter, kAddrPosInBox); 186 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 187 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr); 188 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 189 } 190 191 mlir::Value 192 loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 193 mlir::ConversionPatternRewriter &rewriter) const { 194 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 195 mlir::LLVM::ConstantOp cElemLen = 196 genConstantOffset(loc, rewriter, kElemLenPosInBox); 197 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 198 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen); 199 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 200 } 201 202 // Get the element type given an LLVM type that is of the form 203 // [llvm.ptr](array|struct|vector)+ and the provided indexes. 204 static mlir::Type getBoxEleTy(mlir::Type type, 205 llvm::ArrayRef<unsigned> indexes) { 206 if (auto t = type.dyn_cast<mlir::LLVM::LLVMPointerType>()) 207 type = t.getElementType(); 208 for (auto i : indexes) { 209 if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) { 210 assert(!t.isOpaque() && i < t.getBody().size()); 211 type = t.getBody()[i]; 212 } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 213 type = t.getElementType(); 214 } else if (auto t = type.dyn_cast<mlir::VectorType>()) { 215 type = t.getElementType(); 216 } else { 217 fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()), 218 "request for invalid box element type"); 219 } 220 } 221 return type; 222 } 223 224 // Return LLVM type of the base address given the LLVM type 225 // of the related descriptor (lowered fir.box type). 226 static mlir::Type getBaseAddrTypeFromBox(mlir::Type type) { 227 return getBoxEleTy(type, {kAddrPosInBox}); 228 } 229 230 // Load the attribute from the \p box and perform a check against \p maskValue 231 // The final comparison is implemented as `(attribute & maskValue) != 0`. 232 mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box, 233 mlir::ConversionPatternRewriter &rewriter, 234 unsigned maskValue) const { 235 mlir::Type attrTy = rewriter.getI32Type(); 236 mlir::Value attribute = 237 getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox); 238 mlir::LLVM::ConstantOp attrMask = 239 genConstantOffset(loc, rewriter, maskValue); 240 auto maskRes = 241 rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask); 242 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 243 return rewriter.create<mlir::LLVM::ICmpOp>( 244 loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0); 245 } 246 247 template <typename... ARGS> 248 mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty, 249 mlir::ConversionPatternRewriter &rewriter, 250 mlir::Value base, ARGS... args) const { 251 llvm::SmallVector<mlir::Value> cv = {args...}; 252 return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv); 253 } 254 255 fir::LLVMTypeConverter &lowerTy() const { 256 return *static_cast<fir::LLVMTypeConverter *>(this->getTypeConverter()); 257 } 258 259 const fir::FIRToLLVMPassOptions &options; 260 }; 261 262 /// FIR conversion pattern template 263 template <typename FromOp> 264 class FIROpAndTypeConversion : public FIROpConversion<FromOp> { 265 public: 266 using FIROpConversion<FromOp>::FIROpConversion; 267 using OpAdaptor = typename FromOp::Adaptor; 268 269 mlir::LogicalResult 270 matchAndRewrite(FromOp op, OpAdaptor adaptor, 271 mlir::ConversionPatternRewriter &rewriter) const final { 272 mlir::Type ty = this->convertType(op.getType()); 273 return doRewrite(op, ty, adaptor, rewriter); 274 } 275 276 virtual mlir::LogicalResult 277 doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor, 278 mlir::ConversionPatternRewriter &rewriter) const = 0; 279 }; 280 } // namespace 281 282 namespace { 283 /// Lower `fir.address_of` operation to `llvm.address_of` operation. 284 struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> { 285 using FIROpConversion::FIROpConversion; 286 287 mlir::LogicalResult 288 matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor, 289 mlir::ConversionPatternRewriter &rewriter) const override { 290 auto ty = convertType(addr.getType()); 291 rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>( 292 addr, ty, addr.getSymbol().getRootReference().getValue()); 293 return mlir::success(); 294 } 295 }; 296 } // namespace 297 298 /// Lookup the function to compute the memory size of this parametric derived 299 /// type. The size of the object may depend on the LEN type parameters of the 300 /// derived type. 301 static mlir::LLVM::LLVMFuncOp 302 getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op, 303 mlir::ConversionPatternRewriter &rewriter) { 304 auto module = op->getParentOfType<mlir::ModuleOp>(); 305 std::string name = recTy.getName().str() + "P.mem.size"; 306 if (auto memSizeFunc = module.lookupSymbol<mlir::LLVM::LLVMFuncOp>(name)) 307 return memSizeFunc; 308 TODO(op.getLoc(), "did not find allocation function"); 309 } 310 311 // Compute the alloc scale size (constant factors encoded in the array type). 312 // We do this for arrays without a constant interior or arrays of character with 313 // dynamic length arrays, since those are the only ones that get decayed to a 314 // pointer to the element type. 315 template <typename OP> 316 static mlir::Value 317 genAllocationScaleSize(OP op, mlir::Type ity, 318 mlir::ConversionPatternRewriter &rewriter) { 319 mlir::Location loc = op.getLoc(); 320 mlir::Type dataTy = op.getInType(); 321 mlir::Type scalarType = fir::unwrapSequenceType(dataTy); 322 auto seqTy = dataTy.dyn_cast<fir::SequenceType>(); 323 if ((op.hasShapeOperands() && seqTy && !seqTy.hasConstantInterior()) || 324 (seqTy && fir::characterWithDynamicLen(scalarType))) { 325 fir::SequenceType::Extent constSize = 1; 326 for (auto extent : seqTy.getShape()) 327 if (extent != fir::SequenceType::getUnknownExtent()) 328 constSize *= extent; 329 if (constSize != 1) { 330 mlir::Value constVal{ 331 genConstantIndex(loc, ity, rewriter, constSize).getResult()}; 332 return constVal; 333 } 334 } 335 return nullptr; 336 } 337 338 namespace { 339 /// convert to LLVM IR dialect `alloca` 340 struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> { 341 using FIROpConversion::FIROpConversion; 342 343 mlir::LogicalResult 344 matchAndRewrite(fir::AllocaOp alloc, OpAdaptor adaptor, 345 mlir::ConversionPatternRewriter &rewriter) const override { 346 mlir::ValueRange operands = adaptor.getOperands(); 347 auto loc = alloc.getLoc(); 348 mlir::Type ity = lowerTy().indexType(); 349 unsigned i = 0; 350 mlir::Value size = genConstantIndex(loc, ity, rewriter, 1).getResult(); 351 mlir::Type ty = convertType(alloc.getType()); 352 mlir::Type resultTy = ty; 353 if (alloc.hasLenParams()) { 354 unsigned end = alloc.numLenParams(); 355 llvm::SmallVector<mlir::Value> lenParams; 356 for (; i < end; ++i) 357 lenParams.push_back(operands[i]); 358 mlir::Type scalarType = fir::unwrapSequenceType(alloc.getInType()); 359 if (auto chrTy = scalarType.dyn_cast<fir::CharacterType>()) { 360 fir::CharacterType rawCharTy = fir::CharacterType::getUnknownLen( 361 chrTy.getContext(), chrTy.getFKind()); 362 ty = mlir::LLVM::LLVMPointerType::get(convertType(rawCharTy)); 363 assert(end == 1); 364 size = integerCast(loc, rewriter, ity, lenParams[0]); 365 } else if (auto recTy = scalarType.dyn_cast<fir::RecordType>()) { 366 mlir::LLVM::LLVMFuncOp memSizeFn = 367 getDependentTypeMemSizeFn(recTy, alloc, rewriter); 368 if (!memSizeFn) 369 emitError(loc, "did not find allocation function"); 370 mlir::NamedAttribute attr = rewriter.getNamedAttr( 371 "callee", mlir::SymbolRefAttr::get(memSizeFn)); 372 auto call = rewriter.create<mlir::LLVM::CallOp>( 373 loc, ity, lenParams, llvm::ArrayRef<mlir::NamedAttribute>{attr}); 374 size = call.getResult(0); 375 ty = ::getVoidPtrType(alloc.getContext()); 376 } else { 377 return emitError(loc, "unexpected type ") 378 << scalarType << " with type parameters"; 379 } 380 } 381 if (auto scaleSize = genAllocationScaleSize(alloc, ity, rewriter)) 382 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize); 383 if (alloc.hasShapeOperands()) { 384 unsigned end = operands.size(); 385 for (; i < end; ++i) 386 size = rewriter.create<mlir::LLVM::MulOp>( 387 loc, ity, size, integerCast(loc, rewriter, ity, operands[i])); 388 } 389 if (ty == resultTy) { 390 // Do not emit the bitcast if ty and resultTy are the same. 391 rewriter.replaceOpWithNewOp<mlir::LLVM::AllocaOp>(alloc, ty, size, 392 alloc->getAttrs()); 393 } else { 394 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, ty, size, 395 alloc->getAttrs()); 396 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(alloc, resultTy, al); 397 } 398 return mlir::success(); 399 } 400 }; 401 } // namespace 402 403 /// Construct an `llvm.extractvalue` instruction. It will return value at 404 /// element \p x from \p tuple. 405 static mlir::LLVM::ExtractValueOp 406 genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty, 407 mlir::ConversionPatternRewriter &rewriter, 408 mlir::MLIRContext *ctx, int x) { 409 auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x)); 410 auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x]; 411 return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx); 412 } 413 414 namespace { 415 /// Lower `fir.box_addr` to the sequence of operations to extract the first 416 /// element of the box. 417 struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> { 418 using FIROpConversion::FIROpConversion; 419 420 mlir::LogicalResult 421 matchAndRewrite(fir::BoxAddrOp boxaddr, OpAdaptor adaptor, 422 mlir::ConversionPatternRewriter &rewriter) const override { 423 mlir::Value a = adaptor.getOperands()[0]; 424 auto loc = boxaddr.getLoc(); 425 mlir::Type ty = convertType(boxaddr.getType()); 426 if (auto argty = boxaddr.getVal().getType().dyn_cast<fir::BoxType>()) { 427 rewriter.replaceOp(boxaddr, loadBaseAddrFromBox(loc, ty, a, rewriter)); 428 } else { 429 auto c0attr = rewriter.getI32IntegerAttr(0); 430 auto c0 = mlir::ArrayAttr::get(boxaddr.getContext(), c0attr); 431 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(boxaddr, ty, a, 432 c0); 433 } 434 return mlir::success(); 435 } 436 }; 437 438 /// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the 439 /// boxchar. 440 struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> { 441 using FIROpConversion::FIROpConversion; 442 443 mlir::LogicalResult 444 matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor, 445 mlir::ConversionPatternRewriter &rewriter) const override { 446 mlir::Value boxChar = adaptor.getOperands()[0]; 447 mlir::Location loc = boxChar.getLoc(); 448 mlir::MLIRContext *ctx = boxChar.getContext(); 449 mlir::Type returnValTy = boxCharLen.getResult().getType(); 450 451 constexpr int boxcharLenIdx = 1; 452 mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex( 453 loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx); 454 mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len); 455 rewriter.replaceOp(boxCharLen, lenAfterCast); 456 457 return mlir::success(); 458 } 459 }; 460 461 /// Lower `fir.box_dims` to a sequence of operations to extract the requested 462 /// dimension infomartion from the boxed value. 463 /// Result in a triple set of GEPs and loads. 464 struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> { 465 using FIROpConversion::FIROpConversion; 466 467 mlir::LogicalResult 468 matchAndRewrite(fir::BoxDimsOp boxdims, OpAdaptor adaptor, 469 mlir::ConversionPatternRewriter &rewriter) const override { 470 llvm::SmallVector<mlir::Type, 3> resultTypes = { 471 convertType(boxdims.getResult(0).getType()), 472 convertType(boxdims.getResult(1).getType()), 473 convertType(boxdims.getResult(2).getType()), 474 }; 475 auto results = 476 getDimsFromBox(boxdims.getLoc(), resultTypes, adaptor.getOperands()[0], 477 adaptor.getOperands()[1], rewriter); 478 rewriter.replaceOp(boxdims, results); 479 return mlir::success(); 480 } 481 }; 482 483 /// Lower `fir.box_elesize` to a sequence of operations ro extract the size of 484 /// an element in the boxed value. 485 struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> { 486 using FIROpConversion::FIROpConversion; 487 488 mlir::LogicalResult 489 matchAndRewrite(fir::BoxEleSizeOp boxelesz, OpAdaptor adaptor, 490 mlir::ConversionPatternRewriter &rewriter) const override { 491 mlir::Value a = adaptor.getOperands()[0]; 492 auto loc = boxelesz.getLoc(); 493 auto ty = convertType(boxelesz.getType()); 494 auto elemSize = getValueFromBox(loc, a, ty, rewriter, kElemLenPosInBox); 495 rewriter.replaceOp(boxelesz, elemSize); 496 return mlir::success(); 497 } 498 }; 499 500 /// Lower `fir.box_isalloc` to a sequence of operations to determine if the 501 /// boxed value was from an ALLOCATABLE entity. 502 struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> { 503 using FIROpConversion::FIROpConversion; 504 505 mlir::LogicalResult 506 matchAndRewrite(fir::BoxIsAllocOp boxisalloc, OpAdaptor adaptor, 507 mlir::ConversionPatternRewriter &rewriter) const override { 508 mlir::Value box = adaptor.getOperands()[0]; 509 auto loc = boxisalloc.getLoc(); 510 mlir::Value check = 511 genBoxAttributeCheck(loc, box, rewriter, kAttrAllocatable); 512 rewriter.replaceOp(boxisalloc, check); 513 return mlir::success(); 514 } 515 }; 516 517 /// Lower `fir.box_isarray` to a sequence of operations to determine if the 518 /// boxed is an array. 519 struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> { 520 using FIROpConversion::FIROpConversion; 521 522 mlir::LogicalResult 523 matchAndRewrite(fir::BoxIsArrayOp boxisarray, OpAdaptor adaptor, 524 mlir::ConversionPatternRewriter &rewriter) const override { 525 mlir::Value a = adaptor.getOperands()[0]; 526 auto loc = boxisarray.getLoc(); 527 auto rank = 528 getValueFromBox(loc, a, rewriter.getI32Type(), rewriter, kRankPosInBox); 529 auto c0 = genConstantOffset(loc, rewriter, 0); 530 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 531 boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0); 532 return mlir::success(); 533 } 534 }; 535 536 /// Lower `fir.box_isptr` to a sequence of operations to determined if the 537 /// boxed value was from a POINTER entity. 538 struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> { 539 using FIROpConversion::FIROpConversion; 540 541 mlir::LogicalResult 542 matchAndRewrite(fir::BoxIsPtrOp boxisptr, OpAdaptor adaptor, 543 mlir::ConversionPatternRewriter &rewriter) const override { 544 mlir::Value box = adaptor.getOperands()[0]; 545 auto loc = boxisptr.getLoc(); 546 mlir::Value check = genBoxAttributeCheck(loc, box, rewriter, kAttrPointer); 547 rewriter.replaceOp(boxisptr, check); 548 return mlir::success(); 549 } 550 }; 551 552 /// Lower `fir.box_rank` to the sequence of operation to extract the rank from 553 /// the box. 554 struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> { 555 using FIROpConversion::FIROpConversion; 556 557 mlir::LogicalResult 558 matchAndRewrite(fir::BoxRankOp boxrank, OpAdaptor adaptor, 559 mlir::ConversionPatternRewriter &rewriter) const override { 560 mlir::Value a = adaptor.getOperands()[0]; 561 auto loc = boxrank.getLoc(); 562 mlir::Type ty = convertType(boxrank.getType()); 563 auto result = getValueFromBox(loc, a, ty, rewriter, kRankPosInBox); 564 rewriter.replaceOp(boxrank, result); 565 return mlir::success(); 566 } 567 }; 568 569 /// Lower `fir.boxproc_host` operation. Extracts the host pointer from the 570 /// boxproc. 571 /// TODO: Part of supporting Fortran 2003 procedure pointers. 572 struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> { 573 using FIROpConversion::FIROpConversion; 574 575 mlir::LogicalResult 576 matchAndRewrite(fir::BoxProcHostOp boxprochost, OpAdaptor adaptor, 577 mlir::ConversionPatternRewriter &rewriter) const override { 578 TODO(boxprochost.getLoc(), "fir.boxproc_host codegen"); 579 return mlir::failure(); 580 } 581 }; 582 583 /// Lower `fir.box_tdesc` to the sequence of operations to extract the type 584 /// descriptor from the box. 585 struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> { 586 using FIROpConversion::FIROpConversion; 587 588 mlir::LogicalResult 589 matchAndRewrite(fir::BoxTypeDescOp boxtypedesc, OpAdaptor adaptor, 590 mlir::ConversionPatternRewriter &rewriter) const override { 591 mlir::Value box = adaptor.getOperands()[0]; 592 auto loc = boxtypedesc.getLoc(); 593 mlir::Type typeTy = 594 fir::getDescFieldTypeModel<kTypePosInBox>()(boxtypedesc.getContext()); 595 auto result = getValueFromBox(loc, box, typeTy, rewriter, kTypePosInBox); 596 auto typePtrTy = mlir::LLVM::LLVMPointerType::get(typeTy); 597 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(boxtypedesc, typePtrTy, 598 result); 599 return mlir::success(); 600 } 601 }; 602 603 /// Lower `fir.string_lit` to LLVM IR dialect operation. 604 struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> { 605 using FIROpConversion::FIROpConversion; 606 607 mlir::LogicalResult 608 matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor, 609 mlir::ConversionPatternRewriter &rewriter) const override { 610 auto ty = convertType(constop.getType()); 611 auto attr = constop.getValue(); 612 if (attr.isa<mlir::StringAttr>()) { 613 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr); 614 return mlir::success(); 615 } 616 617 auto charTy = constop.getType().cast<fir::CharacterType>(); 618 unsigned bits = lowerTy().characterBitsize(charTy); 619 mlir::Type intTy = rewriter.getIntegerType(bits); 620 mlir::Location loc = constop.getLoc(); 621 mlir::Value cst = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 622 if (auto arr = attr.dyn_cast<mlir::DenseElementsAttr>()) { 623 cst = rewriter.create<mlir::LLVM::ConstantOp>(loc, ty, arr); 624 } else if (auto arr = attr.dyn_cast<mlir::ArrayAttr>()) { 625 for (auto a : llvm::enumerate(arr.getValue())) { 626 // convert each character to a precise bitsize 627 auto elemAttr = mlir::IntegerAttr::get( 628 intTy, 629 a.value().cast<mlir::IntegerAttr>().getValue().zextOrTrunc(bits)); 630 auto elemCst = 631 rewriter.create<mlir::LLVM::ConstantOp>(loc, intTy, elemAttr); 632 auto index = mlir::ArrayAttr::get( 633 constop.getContext(), rewriter.getI32IntegerAttr(a.index())); 634 cst = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, cst, elemCst, 635 index); 636 } 637 } else { 638 return mlir::failure(); 639 } 640 rewriter.replaceOp(constop, cst); 641 return mlir::success(); 642 } 643 }; 644 645 /// `fir.call` -> `llvm.call` 646 struct CallOpConversion : public FIROpConversion<fir::CallOp> { 647 using FIROpConversion::FIROpConversion; 648 649 mlir::LogicalResult 650 matchAndRewrite(fir::CallOp call, OpAdaptor adaptor, 651 mlir::ConversionPatternRewriter &rewriter) const override { 652 llvm::SmallVector<mlir::Type> resultTys; 653 for (auto r : call.getResults()) 654 resultTys.push_back(convertType(r.getType())); 655 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 656 call, resultTys, adaptor.getOperands(), call->getAttrs()); 657 return mlir::success(); 658 } 659 }; 660 } // namespace 661 662 static mlir::Type getComplexEleTy(mlir::Type complex) { 663 if (auto cc = complex.dyn_cast<mlir::ComplexType>()) 664 return cc.getElementType(); 665 return complex.cast<fir::ComplexType>().getElementType(); 666 } 667 668 namespace { 669 /// Compare complex values 670 /// 671 /// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une). 672 /// 673 /// For completeness, all other comparison are done on the real component only. 674 struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> { 675 using FIROpConversion::FIROpConversion; 676 677 mlir::LogicalResult 678 matchAndRewrite(fir::CmpcOp cmp, OpAdaptor adaptor, 679 mlir::ConversionPatternRewriter &rewriter) const override { 680 mlir::ValueRange operands = adaptor.getOperands(); 681 mlir::MLIRContext *ctxt = cmp.getContext(); 682 mlir::Type eleTy = convertType(getComplexEleTy(cmp.getLhs().getType())); 683 mlir::Type resTy = convertType(cmp.getType()); 684 mlir::Location loc = cmp.getLoc(); 685 auto pos0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 686 llvm::SmallVector<mlir::Value, 2> rp = { 687 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 688 pos0), 689 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 690 pos0)}; 691 auto rcp = 692 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, rp, cmp->getAttrs()); 693 auto pos1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 694 llvm::SmallVector<mlir::Value, 2> ip = { 695 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 696 pos1), 697 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 698 pos1)}; 699 auto icp = 700 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, ip, cmp->getAttrs()); 701 llvm::SmallVector<mlir::Value, 2> cp = {rcp, icp}; 702 switch (cmp.getPredicate()) { 703 case mlir::arith::CmpFPredicate::OEQ: // .EQ. 704 rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(cmp, resTy, cp); 705 break; 706 case mlir::arith::CmpFPredicate::UNE: // .NE. 707 rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(cmp, resTy, cp); 708 break; 709 default: 710 rewriter.replaceOp(cmp, rcp.getResult()); 711 break; 712 } 713 return mlir::success(); 714 } 715 }; 716 717 /// Lower complex constants 718 struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> { 719 using FIROpConversion::FIROpConversion; 720 721 mlir::LogicalResult 722 matchAndRewrite(fir::ConstcOp conc, OpAdaptor, 723 mlir::ConversionPatternRewriter &rewriter) const override { 724 mlir::Location loc = conc.getLoc(); 725 mlir::MLIRContext *ctx = conc.getContext(); 726 mlir::Type ty = convertType(conc.getType()); 727 mlir::Type ety = convertType(getComplexEleTy(conc.getType())); 728 auto realFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getReal())); 729 auto realPart = 730 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, realFloatAttr); 731 auto imFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getImaginary())); 732 auto imPart = 733 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, imFloatAttr); 734 auto realIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 735 auto imIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 736 auto undef = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 737 auto setReal = rewriter.create<mlir::LLVM::InsertValueOp>( 738 loc, ty, undef, realPart, realIndex); 739 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(conc, ty, setReal, 740 imPart, imIndex); 741 return mlir::success(); 742 } 743 744 inline llvm::APFloat getValue(mlir::Attribute attr) const { 745 return attr.cast<fir::RealAttr>().getValue(); 746 } 747 }; 748 749 /// convert value of from-type to value of to-type 750 struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> { 751 using FIROpConversion::FIROpConversion; 752 753 static bool isFloatingPointTy(mlir::Type ty) { 754 return ty.isa<mlir::FloatType>(); 755 } 756 757 mlir::LogicalResult 758 matchAndRewrite(fir::ConvertOp convert, OpAdaptor adaptor, 759 mlir::ConversionPatternRewriter &rewriter) const override { 760 auto fromFirTy = convert.getValue().getType(); 761 auto toFirTy = convert.getRes().getType(); 762 auto fromTy = convertType(fromFirTy); 763 auto toTy = convertType(toFirTy); 764 mlir::Value op0 = adaptor.getOperands()[0]; 765 if (fromTy == toTy) { 766 rewriter.replaceOp(convert, op0); 767 return mlir::success(); 768 } 769 auto loc = convert.getLoc(); 770 auto convertFpToFp = [&](mlir::Value val, unsigned fromBits, 771 unsigned toBits, mlir::Type toTy) -> mlir::Value { 772 if (fromBits == toBits) { 773 // TODO: Converting between two floating-point representations with the 774 // same bitwidth is not allowed for now. 775 mlir::emitError(loc, 776 "cannot implicitly convert between two floating-point " 777 "representations of the same bitwidth"); 778 return {}; 779 } 780 if (fromBits > toBits) 781 return rewriter.create<mlir::LLVM::FPTruncOp>(loc, toTy, val); 782 return rewriter.create<mlir::LLVM::FPExtOp>(loc, toTy, val); 783 }; 784 // Complex to complex conversion. 785 if (fir::isa_complex(fromFirTy) && fir::isa_complex(toFirTy)) { 786 // Special case: handle the conversion of a complex such that both the 787 // real and imaginary parts are converted together. 788 auto zero = mlir::ArrayAttr::get(convert.getContext(), 789 rewriter.getI32IntegerAttr(0)); 790 auto one = mlir::ArrayAttr::get(convert.getContext(), 791 rewriter.getI32IntegerAttr(1)); 792 auto ty = convertType(getComplexEleTy(convert.getValue().getType())); 793 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, zero); 794 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, one); 795 auto nt = convertType(getComplexEleTy(convert.getRes().getType())); 796 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 797 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(nt); 798 auto rc = convertFpToFp(rp, fromBits, toBits, nt); 799 auto ic = convertFpToFp(ip, fromBits, toBits, nt); 800 auto un = rewriter.create<mlir::LLVM::UndefOp>(loc, toTy); 801 auto i1 = 802 rewriter.create<mlir::LLVM::InsertValueOp>(loc, toTy, un, rc, zero); 803 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(convert, toTy, i1, 804 ic, one); 805 return mlir::success(); 806 } 807 808 // Follow UNIX F77 convention for logicals: 809 // 1. underlying integer is not zero => logical is .TRUE. 810 // 2. logical is .TRUE. => set underlying integer to 1. 811 auto i1Type = mlir::IntegerType::get(convert.getContext(), 1); 812 if (fromFirTy.isa<fir::LogicalType>() && toFirTy == i1Type) { 813 mlir::Value zero = genConstantIndex(loc, fromTy, rewriter, 0); 814 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 815 convert, mlir::LLVM::ICmpPredicate::ne, op0, zero); 816 return mlir::success(); 817 } 818 if (fromFirTy == i1Type && toFirTy.isa<fir::LogicalType>()) { 819 rewriter.replaceOpWithNewOp<mlir::LLVM::ZExtOp>(convert, toTy, op0); 820 return mlir::success(); 821 } 822 823 // Floating point to floating point conversion. 824 if (isFloatingPointTy(fromTy)) { 825 if (isFloatingPointTy(toTy)) { 826 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 827 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 828 auto v = convertFpToFp(op0, fromBits, toBits, toTy); 829 rewriter.replaceOp(convert, v); 830 return mlir::success(); 831 } 832 if (toTy.isa<mlir::IntegerType>()) { 833 rewriter.replaceOpWithNewOp<mlir::LLVM::FPToSIOp>(convert, toTy, op0); 834 return mlir::success(); 835 } 836 } else if (fromTy.isa<mlir::IntegerType>()) { 837 // Integer to integer conversion. 838 if (toTy.isa<mlir::IntegerType>()) { 839 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 840 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 841 assert(fromBits != toBits); 842 if (fromBits > toBits) { 843 rewriter.replaceOpWithNewOp<mlir::LLVM::TruncOp>(convert, toTy, op0); 844 return mlir::success(); 845 } 846 rewriter.replaceOpWithNewOp<mlir::LLVM::SExtOp>(convert, toTy, op0); 847 return mlir::success(); 848 } 849 // Integer to floating point conversion. 850 if (isFloatingPointTy(toTy)) { 851 rewriter.replaceOpWithNewOp<mlir::LLVM::SIToFPOp>(convert, toTy, op0); 852 return mlir::success(); 853 } 854 // Integer to pointer conversion. 855 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 856 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(convert, toTy, op0); 857 return mlir::success(); 858 } 859 } else if (fromTy.isa<mlir::LLVM::LLVMPointerType>()) { 860 // Pointer to integer conversion. 861 if (toTy.isa<mlir::IntegerType>()) { 862 rewriter.replaceOpWithNewOp<mlir::LLVM::PtrToIntOp>(convert, toTy, op0); 863 return mlir::success(); 864 } 865 // Pointer to pointer conversion. 866 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 867 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(convert, toTy, op0); 868 return mlir::success(); 869 } 870 } 871 return emitError(loc) << "cannot convert " << fromTy << " to " << toTy; 872 } 873 }; 874 875 /// Lower `fir.dispatch` operation. A virtual call to a method in a dispatch 876 /// table. 877 struct DispatchOpConversion : public FIROpConversion<fir::DispatchOp> { 878 using FIROpConversion::FIROpConversion; 879 880 mlir::LogicalResult 881 matchAndRewrite(fir::DispatchOp dispatch, OpAdaptor adaptor, 882 mlir::ConversionPatternRewriter &rewriter) const override { 883 TODO(dispatch.getLoc(), "fir.dispatch codegen"); 884 return mlir::failure(); 885 } 886 }; 887 888 /// Lower `fir.dispatch_table` operation. The dispatch table for a Fortran 889 /// derived type. 890 struct DispatchTableOpConversion 891 : public FIROpConversion<fir::DispatchTableOp> { 892 using FIROpConversion::FIROpConversion; 893 894 mlir::LogicalResult 895 matchAndRewrite(fir::DispatchTableOp dispTab, OpAdaptor adaptor, 896 mlir::ConversionPatternRewriter &rewriter) const override { 897 TODO(dispTab.getLoc(), "fir.dispatch_table codegen"); 898 return mlir::failure(); 899 } 900 }; 901 902 /// Lower `fir.dt_entry` operation. An entry in a dispatch table; binds a 903 /// method-name to a function. 904 struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> { 905 using FIROpConversion::FIROpConversion; 906 907 mlir::LogicalResult 908 matchAndRewrite(fir::DTEntryOp dtEnt, OpAdaptor adaptor, 909 mlir::ConversionPatternRewriter &rewriter) const override { 910 TODO(dtEnt.getLoc(), "fir.dt_entry codegen"); 911 return mlir::failure(); 912 } 913 }; 914 915 /// Lower `fir.global_len` operation. 916 struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> { 917 using FIROpConversion::FIROpConversion; 918 919 mlir::LogicalResult 920 matchAndRewrite(fir::GlobalLenOp globalLen, OpAdaptor adaptor, 921 mlir::ConversionPatternRewriter &rewriter) const override { 922 TODO(globalLen.getLoc(), "fir.global_len codegen"); 923 return mlir::failure(); 924 } 925 }; 926 927 /// Lower fir.len_param_index 928 struct LenParamIndexOpConversion 929 : public FIROpConversion<fir::LenParamIndexOp> { 930 using FIROpConversion::FIROpConversion; 931 932 // FIXME: this should be specialized by the runtime target 933 mlir::LogicalResult 934 matchAndRewrite(fir::LenParamIndexOp lenp, OpAdaptor, 935 mlir::ConversionPatternRewriter &rewriter) const override { 936 TODO(lenp.getLoc(), "fir.len_param_index codegen"); 937 } 938 }; 939 940 /// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of 941 /// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element 942 /// in this struct is a pointer. Its type is determined from `KIND`. The 2nd 943 /// element is the length of the character buffer (`#n`). 944 struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> { 945 using FIROpConversion::FIROpConversion; 946 947 mlir::LogicalResult 948 matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor, 949 mlir::ConversionPatternRewriter &rewriter) const override { 950 mlir::ValueRange operands = adaptor.getOperands(); 951 auto *ctx = emboxChar.getContext(); 952 953 mlir::Value charBuffer = operands[0]; 954 mlir::Value charBufferLen = operands[1]; 955 956 mlir::Location loc = emboxChar.getLoc(); 957 mlir::Type llvmStructTy = convertType(emboxChar.getType()); 958 auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy); 959 960 mlir::Type lenTy = 961 llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1]; 962 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen); 963 964 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 965 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 966 auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>( 967 loc, llvmStructTy, llvmStruct, charBuffer, c0); 968 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 969 emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1); 970 971 return mlir::success(); 972 } 973 }; 974 } // namespace 975 976 /// Return the LLVMFuncOp corresponding to the standard malloc call. 977 static mlir::LLVM::LLVMFuncOp 978 getMalloc(fir::AllocMemOp op, mlir::ConversionPatternRewriter &rewriter) { 979 auto module = op->getParentOfType<mlir::ModuleOp>(); 980 if (mlir::LLVM::LLVMFuncOp mallocFunc = 981 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("malloc")) 982 return mallocFunc; 983 mlir::OpBuilder moduleBuilder( 984 op->getParentOfType<mlir::ModuleOp>().getBodyRegion()); 985 auto indexType = mlir::IntegerType::get(op.getContext(), 64); 986 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 987 rewriter.getUnknownLoc(), "malloc", 988 mlir::LLVM::LLVMFunctionType::get(getVoidPtrType(op.getContext()), 989 indexType, 990 /*isVarArg=*/false)); 991 } 992 993 /// Helper function for generating the LLVM IR that computes the size 994 /// in bytes for a derived type. 995 static mlir::Value 996 computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy, 997 mlir::ConversionPatternRewriter &rewriter) { 998 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 999 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 1000 llvm::SmallVector<mlir::Value> args = {one}; 1001 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, args); 1002 return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep); 1003 } 1004 1005 namespace { 1006 /// Lower a `fir.allocmem` instruction into `llvm.call @malloc` 1007 struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> { 1008 using FIROpConversion::FIROpConversion; 1009 1010 mlir::LogicalResult 1011 matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor, 1012 mlir::ConversionPatternRewriter &rewriter) const override { 1013 mlir::Type heapTy = heap.getType(); 1014 mlir::Type ty = convertType(heapTy); 1015 mlir::LLVM::LLVMFuncOp mallocFunc = getMalloc(heap, rewriter); 1016 mlir::Location loc = heap.getLoc(); 1017 auto ity = lowerTy().indexType(); 1018 mlir::Type dataTy = fir::unwrapRefType(heapTy); 1019 if (fir::isRecordWithTypeParameters(fir::unwrapSequenceType(dataTy))) 1020 TODO(loc, "fir.allocmem codegen of derived type with length parameters"); 1021 mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, ty); 1022 if (auto scaleSize = genAllocationScaleSize(heap, ity, rewriter)) 1023 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize); 1024 for (mlir::Value opnd : adaptor.getOperands()) 1025 size = rewriter.create<mlir::LLVM::MulOp>( 1026 loc, ity, size, integerCast(loc, rewriter, ity, opnd)); 1027 heap->setAttr("callee", mlir::SymbolRefAttr::get(mallocFunc)); 1028 auto malloc = rewriter.create<mlir::LLVM::CallOp>( 1029 loc, ::getVoidPtrType(heap.getContext()), size, heap->getAttrs()); 1030 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(heap, ty, 1031 malloc.getResult(0)); 1032 return mlir::success(); 1033 } 1034 1035 // Compute the (allocation) size of the allocmem type in bytes. 1036 mlir::Value genTypeSizeInBytes(mlir::Location loc, mlir::Type idxTy, 1037 mlir::ConversionPatternRewriter &rewriter, 1038 mlir::Type llTy) const { 1039 // Use the primitive size, if available. 1040 auto ptrTy = llTy.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1041 if (auto size = 1042 mlir::LLVM::getPrimitiveTypeSizeInBits(ptrTy.getElementType())) 1043 return genConstantIndex(loc, idxTy, rewriter, size / 8); 1044 1045 // Otherwise, generate the GEP trick in LLVM IR to compute the size. 1046 return computeDerivedTypeSize(loc, ptrTy, idxTy, rewriter); 1047 } 1048 }; 1049 } // namespace 1050 1051 /// Return the LLVMFuncOp corresponding to the standard free call. 1052 static mlir::LLVM::LLVMFuncOp 1053 getFree(fir::FreeMemOp op, mlir::ConversionPatternRewriter &rewriter) { 1054 auto module = op->getParentOfType<mlir::ModuleOp>(); 1055 if (mlir::LLVM::LLVMFuncOp freeFunc = 1056 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("free")) 1057 return freeFunc; 1058 mlir::OpBuilder moduleBuilder(module.getBodyRegion()); 1059 auto voidType = mlir::LLVM::LLVMVoidType::get(op.getContext()); 1060 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 1061 rewriter.getUnknownLoc(), "free", 1062 mlir::LLVM::LLVMFunctionType::get(voidType, 1063 getVoidPtrType(op.getContext()), 1064 /*isVarArg=*/false)); 1065 } 1066 1067 namespace { 1068 /// Lower a `fir.freemem` instruction into `llvm.call @free` 1069 struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> { 1070 using FIROpConversion::FIROpConversion; 1071 1072 mlir::LogicalResult 1073 matchAndRewrite(fir::FreeMemOp freemem, OpAdaptor adaptor, 1074 mlir::ConversionPatternRewriter &rewriter) const override { 1075 mlir::LLVM::LLVMFuncOp freeFunc = getFree(freemem, rewriter); 1076 mlir::Location loc = freemem.getLoc(); 1077 auto bitcast = rewriter.create<mlir::LLVM::BitcastOp>( 1078 freemem.getLoc(), voidPtrTy(), adaptor.getOperands()[0]); 1079 freemem->setAttr("callee", mlir::SymbolRefAttr::get(freeFunc)); 1080 rewriter.create<mlir::LLVM::CallOp>( 1081 loc, mlir::TypeRange{}, mlir::ValueRange{bitcast}, freemem->getAttrs()); 1082 rewriter.eraseOp(freemem); 1083 return mlir::success(); 1084 } 1085 }; 1086 } // namespace 1087 1088 /// Common base class for embox to descriptor conversion. 1089 template <typename OP> 1090 struct EmboxCommonConversion : public FIROpConversion<OP> { 1091 using FIROpConversion<OP>::FIROpConversion; 1092 1093 // Find the LLVMFuncOp in whose entry block the alloca should be inserted. 1094 // The order to find the LLVMFuncOp is as follows: 1095 // 1. The parent operation of the current block if it is a LLVMFuncOp. 1096 // 2. The first ancestor that is a LLVMFuncOp. 1097 mlir::LLVM::LLVMFuncOp 1098 getFuncForAllocaInsert(mlir::ConversionPatternRewriter &rewriter) const { 1099 mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp(); 1100 return mlir::isa<mlir::LLVM::LLVMFuncOp>(parentOp) 1101 ? mlir::cast<mlir::LLVM::LLVMFuncOp>(parentOp) 1102 : parentOp->getParentOfType<mlir::LLVM::LLVMFuncOp>(); 1103 } 1104 1105 // Generate an alloca of size 1 and type \p toTy. 1106 mlir::LLVM::AllocaOp 1107 genAllocaWithType(mlir::Location loc, mlir::Type toTy, unsigned alignment, 1108 mlir::ConversionPatternRewriter &rewriter) const { 1109 auto thisPt = rewriter.saveInsertionPoint(); 1110 mlir::LLVM::LLVMFuncOp func = getFuncForAllocaInsert(rewriter); 1111 rewriter.setInsertionPointToStart(&func.front()); 1112 auto size = this->genI32Constant(loc, rewriter, 1); 1113 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, toTy, size, alignment); 1114 rewriter.restoreInsertionPoint(thisPt); 1115 return al; 1116 } 1117 1118 static int getCFIAttr(fir::BoxType boxTy) { 1119 auto eleTy = boxTy.getEleTy(); 1120 if (eleTy.isa<fir::PointerType>()) 1121 return CFI_attribute_pointer; 1122 if (eleTy.isa<fir::HeapType>()) 1123 return CFI_attribute_allocatable; 1124 return CFI_attribute_other; 1125 } 1126 1127 static fir::RecordType unwrapIfDerived(fir::BoxType boxTy) { 1128 return fir::unwrapSequenceType(fir::dyn_cast_ptrOrBoxEleTy(boxTy)) 1129 .template dyn_cast<fir::RecordType>(); 1130 } 1131 static bool isDerivedTypeWithLenParams(fir::BoxType boxTy) { 1132 auto recTy = unwrapIfDerived(boxTy); 1133 return recTy && recTy.getNumLenParams() > 0; 1134 } 1135 static bool isDerivedType(fir::BoxType boxTy) { 1136 return static_cast<bool>(unwrapIfDerived(boxTy)); 1137 } 1138 1139 // Get the element size and CFI type code of the boxed value. 1140 std::tuple<mlir::Value, mlir::Value> getSizeAndTypeCode( 1141 mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 1142 mlir::Type boxEleTy, mlir::ValueRange lenParams = {}) const { 1143 auto doInteger = 1144 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1145 int typeCode = fir::integerBitsToTypeCode(width); 1146 return {this->genConstantOffset(loc, rewriter, width / 8), 1147 this->genConstantOffset(loc, rewriter, typeCode)}; 1148 }; 1149 auto doLogical = 1150 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1151 int typeCode = fir::logicalBitsToTypeCode(width); 1152 return {this->genConstantOffset(loc, rewriter, width / 8), 1153 this->genConstantOffset(loc, rewriter, typeCode)}; 1154 }; 1155 auto doFloat = [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1156 int typeCode = fir::realBitsToTypeCode(width); 1157 return {this->genConstantOffset(loc, rewriter, width / 8), 1158 this->genConstantOffset(loc, rewriter, typeCode)}; 1159 }; 1160 auto doComplex = 1161 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1162 auto typeCode = fir::complexBitsToTypeCode(width); 1163 return {this->genConstantOffset(loc, rewriter, width / 8 * 2), 1164 this->genConstantOffset(loc, rewriter, typeCode)}; 1165 }; 1166 auto doCharacter = 1167 [&](unsigned width, 1168 mlir::Value len) -> std::tuple<mlir::Value, mlir::Value> { 1169 auto typeCode = fir::characterBitsToTypeCode(width); 1170 auto typeCodeVal = this->genConstantOffset(loc, rewriter, typeCode); 1171 if (width == 8) 1172 return {len, typeCodeVal}; 1173 auto i64Ty = mlir::IntegerType::get(&this->lowerTy().getContext(), 64); 1174 auto byteWidth = genConstantIndex(loc, i64Ty, rewriter, width / 8); 1175 auto len64 = FIROpConversion<OP>::integerCast(loc, rewriter, i64Ty, len); 1176 auto size = 1177 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len64); 1178 return {size, typeCodeVal}; 1179 }; 1180 auto getKindMap = [&]() -> fir::KindMapping & { 1181 return this->lowerTy().getKindMap(); 1182 }; 1183 // Pointer-like types. 1184 if (auto eleTy = fir::dyn_cast_ptrEleTy(boxEleTy)) 1185 boxEleTy = eleTy; 1186 // Integer types. 1187 if (fir::isa_integer(boxEleTy)) { 1188 if (auto ty = boxEleTy.dyn_cast<mlir::IntegerType>()) 1189 return doInteger(ty.getWidth()); 1190 auto ty = boxEleTy.cast<fir::IntegerType>(); 1191 return doInteger(getKindMap().getIntegerBitsize(ty.getFKind())); 1192 } 1193 // Floating point types. 1194 if (fir::isa_real(boxEleTy)) { 1195 if (auto ty = boxEleTy.dyn_cast<mlir::FloatType>()) 1196 return doFloat(ty.getWidth()); 1197 auto ty = boxEleTy.cast<fir::RealType>(); 1198 return doFloat(getKindMap().getRealBitsize(ty.getFKind())); 1199 } 1200 // Complex types. 1201 if (fir::isa_complex(boxEleTy)) { 1202 if (auto ty = boxEleTy.dyn_cast<mlir::ComplexType>()) 1203 return doComplex( 1204 ty.getElementType().cast<mlir::FloatType>().getWidth()); 1205 auto ty = boxEleTy.cast<fir::ComplexType>(); 1206 return doComplex(getKindMap().getRealBitsize(ty.getFKind())); 1207 } 1208 // Character types. 1209 if (auto ty = boxEleTy.dyn_cast<fir::CharacterType>()) { 1210 auto charWidth = getKindMap().getCharacterBitsize(ty.getFKind()); 1211 if (ty.getLen() != fir::CharacterType::unknownLen()) { 1212 auto len = this->genConstantOffset(loc, rewriter, ty.getLen()); 1213 return doCharacter(charWidth, len); 1214 } 1215 assert(!lenParams.empty()); 1216 return doCharacter(charWidth, lenParams.back()); 1217 } 1218 // Logical type. 1219 if (auto ty = boxEleTy.dyn_cast<fir::LogicalType>()) 1220 return doLogical(getKindMap().getLogicalBitsize(ty.getFKind())); 1221 // Array types. 1222 if (auto seqTy = boxEleTy.dyn_cast<fir::SequenceType>()) 1223 return getSizeAndTypeCode(loc, rewriter, seqTy.getEleTy(), lenParams); 1224 // Derived-type types. 1225 if (boxEleTy.isa<fir::RecordType>()) { 1226 auto ptrTy = mlir::LLVM::LLVMPointerType::get( 1227 this->lowerTy().convertType(boxEleTy)); 1228 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 1229 auto one = 1230 genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1); 1231 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, 1232 mlir::ValueRange{one}); 1233 auto eleSize = rewriter.create<mlir::LLVM::PtrToIntOp>( 1234 loc, this->lowerTy().indexType(), gep); 1235 return {eleSize, 1236 this->genConstantOffset(loc, rewriter, fir::derivedToTypeCode())}; 1237 } 1238 // Reference type. 1239 if (fir::isa_ref_type(boxEleTy)) { 1240 // FIXME: use the target pointer size rather than sizeof(void*) 1241 return {this->genConstantOffset(loc, rewriter, sizeof(void *)), 1242 this->genConstantOffset(loc, rewriter, CFI_type_cptr)}; 1243 } 1244 fir::emitFatalError(loc, "unhandled type in fir.box code generation"); 1245 } 1246 1247 /// Basic pattern to write a field in the descriptor 1248 mlir::Value insertField(mlir::ConversionPatternRewriter &rewriter, 1249 mlir::Location loc, mlir::Value dest, 1250 llvm::ArrayRef<unsigned> fldIndexes, 1251 mlir::Value value, bool bitcast = false) const { 1252 auto boxTy = dest.getType(); 1253 auto fldTy = this->getBoxEleTy(boxTy, fldIndexes); 1254 if (bitcast) 1255 value = rewriter.create<mlir::LLVM::BitcastOp>(loc, fldTy, value); 1256 else 1257 value = this->integerCast(loc, rewriter, fldTy, value); 1258 llvm::SmallVector<mlir::Attribute, 2> attrs; 1259 for (auto i : fldIndexes) 1260 attrs.push_back(rewriter.getI32IntegerAttr(i)); 1261 auto indexesAttr = mlir::ArrayAttr::get(rewriter.getContext(), attrs); 1262 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, boxTy, dest, value, 1263 indexesAttr); 1264 } 1265 1266 inline mlir::Value 1267 insertBaseAddress(mlir::ConversionPatternRewriter &rewriter, 1268 mlir::Location loc, mlir::Value dest, 1269 mlir::Value base) const { 1270 return insertField(rewriter, loc, dest, {kAddrPosInBox}, base, 1271 /*bitCast=*/true); 1272 } 1273 1274 inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter, 1275 mlir::Location loc, mlir::Value dest, 1276 unsigned dim, mlir::Value lb) const { 1277 return insertField(rewriter, loc, dest, 1278 {kDimsPosInBox, dim, kDimLowerBoundPos}, lb); 1279 } 1280 1281 inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter, 1282 mlir::Location loc, mlir::Value dest, 1283 unsigned dim, mlir::Value extent) const { 1284 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos}, 1285 extent); 1286 } 1287 1288 inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter, 1289 mlir::Location loc, mlir::Value dest, 1290 unsigned dim, mlir::Value stride) const { 1291 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos}, 1292 stride); 1293 } 1294 1295 /// Get the address of the type descriptor global variable that was created by 1296 /// lowering for derived type \p recType. 1297 template <typename BOX> 1298 mlir::Value 1299 getTypeDescriptor(BOX box, mlir::ConversionPatternRewriter &rewriter, 1300 mlir::Location loc, fir::RecordType recType) const { 1301 std::string name = 1302 fir::NameUniquer::getTypeDescriptorName(recType.getName()); 1303 auto module = box->template getParentOfType<mlir::ModuleOp>(); 1304 if (auto global = module.template lookupSymbol<fir::GlobalOp>(name)) { 1305 auto ty = mlir::LLVM::LLVMPointerType::get( 1306 this->lowerTy().convertType(global.getType())); 1307 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1308 global.getSymName()); 1309 } 1310 if (auto global = 1311 module.template lookupSymbol<mlir::LLVM::GlobalOp>(name)) { 1312 // The global may have already been translated to LLVM. 1313 auto ty = mlir::LLVM::LLVMPointerType::get(global.getType()); 1314 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1315 global.getSymName()); 1316 } 1317 // Type info derived types do not have type descriptors since they are the 1318 // types defining type descriptors. 1319 if (!this->options.ignoreMissingTypeDescriptors && 1320 !fir::NameUniquer::belongsToModule( 1321 name, Fortran::semantics::typeInfoBuiltinModule)) 1322 fir::emitFatalError( 1323 loc, "runtime derived type info descriptor was not generated"); 1324 return rewriter.create<mlir::LLVM::NullOp>( 1325 loc, ::getVoidPtrType(box.getContext())); 1326 } 1327 1328 template <typename BOX> 1329 std::tuple<fir::BoxType, mlir::Value, mlir::Value> 1330 consDescriptorPrefix(BOX box, mlir::ConversionPatternRewriter &rewriter, 1331 unsigned rank, mlir::ValueRange lenParams) const { 1332 auto loc = box.getLoc(); 1333 auto boxTy = box.getType().template dyn_cast<fir::BoxType>(); 1334 auto convTy = this->lowerTy().convertBoxType(boxTy, rank); 1335 auto llvmBoxPtrTy = convTy.template cast<mlir::LLVM::LLVMPointerType>(); 1336 auto llvmBoxTy = llvmBoxPtrTy.getElementType(); 1337 mlir::Value descriptor = 1338 rewriter.create<mlir::LLVM::UndefOp>(loc, llvmBoxTy); 1339 1340 llvm::SmallVector<mlir::Value> typeparams = lenParams; 1341 if constexpr (!std::is_same_v<BOX, fir::EmboxOp>) { 1342 if (!box.substr().empty() && fir::hasDynamicSize(boxTy.getEleTy())) 1343 typeparams.push_back(box.substr()[1]); 1344 } 1345 1346 // Write each of the fields with the appropriate values 1347 auto [eleSize, cfiTy] = 1348 getSizeAndTypeCode(loc, rewriter, boxTy.getEleTy(), typeparams); 1349 descriptor = 1350 insertField(rewriter, loc, descriptor, {kElemLenPosInBox}, eleSize); 1351 descriptor = insertField(rewriter, loc, descriptor, {kVersionPosInBox}, 1352 this->genI32Constant(loc, rewriter, CFI_VERSION)); 1353 descriptor = insertField(rewriter, loc, descriptor, {kRankPosInBox}, 1354 this->genI32Constant(loc, rewriter, rank)); 1355 descriptor = insertField(rewriter, loc, descriptor, {kTypePosInBox}, cfiTy); 1356 descriptor = 1357 insertField(rewriter, loc, descriptor, {kAttributePosInBox}, 1358 this->genI32Constant(loc, rewriter, getCFIAttr(boxTy))); 1359 const bool hasAddendum = isDerivedType(boxTy); 1360 descriptor = 1361 insertField(rewriter, loc, descriptor, {kF18AddendumPosInBox}, 1362 this->genI32Constant(loc, rewriter, hasAddendum ? 1 : 0)); 1363 1364 if (hasAddendum) { 1365 auto isArray = 1366 fir::dyn_cast_ptrOrBoxEleTy(boxTy).template isa<fir::SequenceType>(); 1367 unsigned typeDescFieldId = isArray ? kOptTypePtrPosInBox : kDimsPosInBox; 1368 auto typeDesc = 1369 getTypeDescriptor(box, rewriter, loc, unwrapIfDerived(boxTy)); 1370 descriptor = 1371 insertField(rewriter, loc, descriptor, {typeDescFieldId}, typeDesc, 1372 /*bitCast=*/true); 1373 } 1374 1375 return {boxTy, descriptor, eleSize}; 1376 } 1377 1378 /// Compute the base address of a substring given the base address of a scalar 1379 /// string and the zero based string lower bound. 1380 mlir::Value shiftSubstringBase(mlir::ConversionPatternRewriter &rewriter, 1381 mlir::Location loc, mlir::Value base, 1382 mlir::Value lowerBound) const { 1383 llvm::SmallVector<mlir::Value> gepOperands; 1384 auto baseType = 1385 base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1386 if (baseType.isa<mlir::LLVM::LLVMArrayType>()) { 1387 auto idxTy = this->lowerTy().indexType(); 1388 gepOperands.push_back(genConstantIndex(loc, idxTy, rewriter, 0)); 1389 gepOperands.push_back(lowerBound); 1390 } else { 1391 gepOperands.push_back(lowerBound); 1392 } 1393 return this->genGEP(loc, base.getType(), rewriter, base, gepOperands); 1394 } 1395 1396 /// If the embox is not in a globalOp body, allocate storage for the box; 1397 /// store the value inside and return the generated alloca. Return the input 1398 /// value otherwise. 1399 mlir::Value 1400 placeInMemoryIfNotGlobalInit(mlir::ConversionPatternRewriter &rewriter, 1401 mlir::Location loc, mlir::Value boxValue) const { 1402 auto *thisBlock = rewriter.getInsertionBlock(); 1403 if (thisBlock && mlir::isa<mlir::LLVM::GlobalOp>(thisBlock->getParentOp())) 1404 return boxValue; 1405 auto boxPtrTy = mlir::LLVM::LLVMPointerType::get(boxValue.getType()); 1406 auto alloca = genAllocaWithType(loc, boxPtrTy, defaultAlign, rewriter); 1407 rewriter.create<mlir::LLVM::StoreOp>(loc, boxValue, alloca); 1408 return alloca; 1409 } 1410 }; 1411 1412 /// Compute the extent of a triplet slice (lb:ub:step). 1413 static mlir::Value 1414 computeTripletExtent(mlir::ConversionPatternRewriter &rewriter, 1415 mlir::Location loc, mlir::Value lb, mlir::Value ub, 1416 mlir::Value step, mlir::Value zero, mlir::Type type) { 1417 mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb); 1418 extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step); 1419 extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step); 1420 // If the resulting extent is negative (`ub-lb` and `step` have different 1421 // signs), zero must be returned instead. 1422 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1423 loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero); 1424 return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero); 1425 } 1426 1427 /// Create a generic box on a memory reference. This conversions lowers the 1428 /// abstract box to the appropriate, initialized descriptor. 1429 struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> { 1430 using EmboxCommonConversion::EmboxCommonConversion; 1431 1432 mlir::LogicalResult 1433 matchAndRewrite(fir::EmboxOp embox, OpAdaptor adaptor, 1434 mlir::ConversionPatternRewriter &rewriter) const override { 1435 mlir::ValueRange operands = adaptor.getOperands(); 1436 assert(!embox.getShape() && "There should be no dims on this embox op"); 1437 auto [boxTy, dest, eleSize] = consDescriptorPrefix( 1438 embox, rewriter, /*rank=*/0, /*lenParams=*/operands.drop_front(1)); 1439 dest = insertBaseAddress(rewriter, embox.getLoc(), dest, operands[0]); 1440 if (isDerivedTypeWithLenParams(boxTy)) { 1441 TODO(embox.getLoc(), 1442 "fir.embox codegen of derived with length parameters"); 1443 return mlir::failure(); 1444 } 1445 auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest); 1446 rewriter.replaceOp(embox, result); 1447 return mlir::success(); 1448 } 1449 }; 1450 1451 /// Create a generic box on a memory reference. 1452 struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> { 1453 using EmboxCommonConversion::EmboxCommonConversion; 1454 1455 mlir::LogicalResult 1456 matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor, 1457 mlir::ConversionPatternRewriter &rewriter) const override { 1458 mlir::ValueRange operands = adaptor.getOperands(); 1459 auto [boxTy, dest, eleSize] = 1460 consDescriptorPrefix(xbox, rewriter, xbox.getOutRank(), 1461 operands.drop_front(xbox.lenParamOffset())); 1462 // Generate the triples in the dims field of the descriptor 1463 auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64); 1464 mlir::Value base = operands[0]; 1465 assert(!xbox.shape().empty() && "must have a shape"); 1466 unsigned shapeOffset = xbox.shapeOffset(); 1467 bool hasShift = !xbox.shift().empty(); 1468 unsigned shiftOffset = xbox.shiftOffset(); 1469 bool hasSlice = !xbox.slice().empty(); 1470 unsigned sliceOffset = xbox.sliceOffset(); 1471 mlir::Location loc = xbox.getLoc(); 1472 mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0); 1473 mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1); 1474 mlir::Value prevPtrOff = one; 1475 mlir::Type eleTy = boxTy.getEleTy(); 1476 const unsigned rank = xbox.getRank(); 1477 llvm::SmallVector<mlir::Value> gepArgs; 1478 unsigned constRows = 0; 1479 mlir::Value ptrOffset = zero; 1480 mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()); 1481 assert(memEleTy.isa<fir::SequenceType>()); 1482 auto seqTy = memEleTy.cast<fir::SequenceType>(); 1483 mlir::Type seqEleTy = seqTy.getEleTy(); 1484 // Adjust the element scaling factor if the element is a dependent type. 1485 if (fir::hasDynamicSize(seqEleTy)) { 1486 if (auto charTy = seqEleTy.dyn_cast<fir::CharacterType>()) { 1487 assert(xbox.lenParams().size() == 1); 1488 mlir::LLVM::ConstantOp charSize = genConstantIndex( 1489 loc, i64Ty, rewriter, lowerTy().characterBitsize(charTy) / 8); 1490 mlir::Value castedLen = 1491 integerCast(loc, rewriter, i64Ty, operands[xbox.lenParamOffset()]); 1492 auto byteOffset = 1493 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, charSize, castedLen); 1494 prevPtrOff = integerCast(loc, rewriter, i64Ty, byteOffset); 1495 } else if (seqEleTy.isa<fir::RecordType>()) { 1496 // prevPtrOff = ; 1497 TODO(loc, "generate call to calculate size of PDT"); 1498 } else { 1499 fir::emitFatalError(loc, "unexpected dynamic type"); 1500 } 1501 } else { 1502 constRows = seqTy.getConstantRows(); 1503 } 1504 1505 const auto hasSubcomp = !xbox.subcomponent().empty(); 1506 const bool hasSubstr = !xbox.substr().empty(); 1507 /// Compute initial element stride that will be use to compute the step in 1508 /// each dimension. 1509 mlir::Value prevDimByteStride = integerCast(loc, rewriter, i64Ty, eleSize); 1510 if (hasSubcomp) { 1511 // We have a subcomponent. The step value needs to be the number of 1512 // bytes per element (which is a derived type). 1513 auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy)); 1514 prevDimByteStride = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter); 1515 } else if (hasSubstr) { 1516 // We have a substring. The step value needs to be the number of bytes 1517 // per CHARACTER element. 1518 auto charTy = seqEleTy.cast<fir::CharacterType>(); 1519 if (fir::hasDynamicSize(charTy)) { 1520 prevDimByteStride = prevPtrOff; 1521 } else { 1522 prevDimByteStride = genConstantIndex( 1523 loc, i64Ty, rewriter, 1524 charTy.getLen() * lowerTy().characterBitsize(charTy) / 8); 1525 } 1526 } 1527 1528 // Process the array subspace arguments (shape, shift, etc.), if any, 1529 // translating everything to values in the descriptor wherever the entity 1530 // has a dynamic array dimension. 1531 for (unsigned di = 0, descIdx = 0; di < rank; ++di) { 1532 mlir::Value extent = operands[shapeOffset]; 1533 mlir::Value outerExtent = extent; 1534 bool skipNext = false; 1535 if (hasSlice) { 1536 mlir::Value off = operands[sliceOffset]; 1537 mlir::Value adj = one; 1538 if (hasShift) 1539 adj = operands[shiftOffset]; 1540 auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj); 1541 if (constRows > 0) { 1542 gepArgs.push_back(ao); 1543 } else { 1544 auto dimOff = 1545 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff); 1546 ptrOffset = 1547 rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset); 1548 } 1549 if (mlir::isa_and_nonnull<fir::UndefOp>( 1550 xbox.slice()[3 * di + 1].getDefiningOp())) { 1551 // This dimension contains a scalar expression in the array slice op. 1552 // The dimension is loop invariant, will be dropped, and will not 1553 // appear in the descriptor. 1554 skipNext = true; 1555 } 1556 } 1557 if (!skipNext) { 1558 // store extent 1559 if (hasSlice) 1560 extent = computeTripletExtent(rewriter, loc, operands[sliceOffset], 1561 operands[sliceOffset + 1], 1562 operands[sliceOffset + 2], zero, i64Ty); 1563 // Lower bound is normalized to 0 for BIND(C) interoperability. 1564 mlir::Value lb = zero; 1565 const bool isaPointerOrAllocatable = 1566 eleTy.isa<fir::PointerType>() || eleTy.isa<fir::HeapType>(); 1567 // Lower bound is defaults to 1 for POINTER, ALLOCATABLE, and 1568 // denormalized descriptors. 1569 if (isaPointerOrAllocatable || !normalizedLowerBound(xbox)) 1570 lb = one; 1571 // If there is a shifted origin, and no fir.slice, and this is not 1572 // a normalized descriptor then use the value from the shift op as 1573 // the lower bound. 1574 if (hasShift && !(hasSlice || hasSubcomp || hasSubstr) && 1575 (isaPointerOrAllocatable || !normalizedLowerBound(xbox))) { 1576 lb = operands[shiftOffset]; 1577 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1578 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1579 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, 1580 lb); 1581 } 1582 dest = insertLowerBound(rewriter, loc, dest, descIdx, lb); 1583 1584 dest = insertExtent(rewriter, loc, dest, descIdx, extent); 1585 1586 // store step (scaled by shaped extent) 1587 mlir::Value step = prevDimByteStride; 1588 if (hasSlice) 1589 step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step, 1590 operands[sliceOffset + 2]); 1591 dest = insertStride(rewriter, loc, dest, descIdx, step); 1592 ++descIdx; 1593 } 1594 1595 // compute the stride and offset for the next natural dimension 1596 prevDimByteStride = rewriter.create<mlir::LLVM::MulOp>( 1597 loc, i64Ty, prevDimByteStride, outerExtent); 1598 if (constRows == 0) 1599 prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff, 1600 outerExtent); 1601 else 1602 --constRows; 1603 1604 // increment iterators 1605 ++shapeOffset; 1606 if (hasShift) 1607 ++shiftOffset; 1608 if (hasSlice) 1609 sliceOffset += 3; 1610 } 1611 if (hasSlice || hasSubcomp || hasSubstr) { 1612 llvm::SmallVector<mlir::Value> args = {ptrOffset}; 1613 args.append(gepArgs.rbegin(), gepArgs.rend()); 1614 if (hasSubcomp) { 1615 // For each field in the path add the offset to base via the args list. 1616 // In the most general case, some offsets must be computed since 1617 // they are not be known until runtime. 1618 if (fir::hasDynamicSize(fir::unwrapSequenceType( 1619 fir::unwrapPassByRefType(xbox.memref().getType())))) 1620 TODO(loc, "fir.embox codegen dynamic size component in derived type"); 1621 args.append(operands.begin() + xbox.subcomponentOffset(), 1622 operands.begin() + xbox.subcomponentOffset() + 1623 xbox.subcomponent().size()); 1624 } 1625 base = 1626 rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), base, args); 1627 if (hasSubstr) 1628 base = shiftSubstringBase(rewriter, loc, base, 1629 operands[xbox.substrOffset()]); 1630 } 1631 dest = insertBaseAddress(rewriter, loc, dest, base); 1632 if (isDerivedTypeWithLenParams(boxTy)) 1633 TODO(loc, "fir.embox codegen of derived with length parameters"); 1634 1635 mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest); 1636 rewriter.replaceOp(xbox, result); 1637 return mlir::success(); 1638 } 1639 1640 /// Return true if `xbox` has a normalized lower bounds attribute. A box value 1641 /// that is neither a POINTER nor an ALLOCATABLE should be normalized to a 1642 /// zero origin lower bound for interoperability with BIND(C). 1643 inline static bool normalizedLowerBound(fir::cg::XEmboxOp xbox) { 1644 return xbox->hasAttr(fir::getNormalizedLowerBoundAttrName()); 1645 } 1646 }; 1647 1648 /// Create a new box given a box reference. 1649 struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> { 1650 using EmboxCommonConversion::EmboxCommonConversion; 1651 1652 mlir::LogicalResult 1653 matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor, 1654 mlir::ConversionPatternRewriter &rewriter) const override { 1655 mlir::Location loc = rebox.getLoc(); 1656 mlir::Type idxTy = lowerTy().indexType(); 1657 mlir::Value loweredBox = adaptor.getOperands()[0]; 1658 mlir::ValueRange operands = adaptor.getOperands(); 1659 1660 // Create new descriptor and fill its non-shape related data. 1661 llvm::SmallVector<mlir::Value, 2> lenParams; 1662 mlir::Type inputEleTy = getInputEleTy(rebox); 1663 if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) { 1664 mlir::Value len = 1665 loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter); 1666 if (charTy.getFKind() != 1) { 1667 mlir::Value width = 1668 genConstantIndex(loc, idxTy, rewriter, charTy.getFKind()); 1669 len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width); 1670 } 1671 lenParams.emplace_back(len); 1672 } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) { 1673 if (recTy.getNumLenParams() != 0) 1674 TODO(loc, "reboxing descriptor of derived type with length parameters"); 1675 } 1676 auto [boxTy, dest, eleSize] = 1677 consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams); 1678 1679 // Read input extents, strides, and base address 1680 llvm::SmallVector<mlir::Value> inputExtents; 1681 llvm::SmallVector<mlir::Value> inputStrides; 1682 const unsigned inputRank = rebox.getRank(); 1683 for (unsigned i = 0; i < inputRank; ++i) { 1684 mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i); 1685 llvm::SmallVector<mlir::Value, 3> dimInfo = 1686 getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter); 1687 inputExtents.emplace_back(dimInfo[1]); 1688 inputStrides.emplace_back(dimInfo[2]); 1689 } 1690 1691 mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType()); 1692 mlir::Value baseAddr = 1693 loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter); 1694 1695 if (!rebox.slice().empty() || !rebox.subcomponent().empty()) 1696 return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1697 operands, rewriter); 1698 return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1699 operands, rewriter); 1700 } 1701 1702 private: 1703 /// Write resulting shape and base address in descriptor, and replace rebox 1704 /// op. 1705 mlir::LogicalResult 1706 finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1707 mlir::ValueRange lbounds, mlir::ValueRange extents, 1708 mlir::ValueRange strides, 1709 mlir::ConversionPatternRewriter &rewriter) const { 1710 mlir::Location loc = rebox.getLoc(); 1711 mlir::Value zero = 1712 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 1713 mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1); 1714 for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) { 1715 mlir::Value extent = std::get<0>(iter.value()); 1716 unsigned dim = iter.index(); 1717 mlir::Value lb = one; 1718 if (!lbounds.empty()) { 1719 lb = lbounds[dim]; 1720 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1721 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1722 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, lb); 1723 }; 1724 dest = insertLowerBound(rewriter, loc, dest, dim, lb); 1725 dest = insertExtent(rewriter, loc, dest, dim, extent); 1726 dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value())); 1727 } 1728 dest = insertBaseAddress(rewriter, loc, dest, base); 1729 mlir::Value result = 1730 placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest); 1731 rewriter.replaceOp(rebox, result); 1732 return mlir::success(); 1733 } 1734 1735 // Apply slice given the base address, extents and strides of the input box. 1736 mlir::LogicalResult 1737 sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1738 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1739 mlir::ValueRange operands, 1740 mlir::ConversionPatternRewriter &rewriter) const { 1741 mlir::Location loc = rebox.getLoc(); 1742 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1743 mlir::Type idxTy = lowerTy().indexType(); 1744 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1745 // Apply subcomponent and substring shift on base address. 1746 if (!rebox.subcomponent().empty() || !rebox.substr().empty()) { 1747 // Cast to inputEleTy* so that a GEP can be used. 1748 mlir::Type inputEleTy = getInputEleTy(rebox); 1749 auto llvmElePtrTy = 1750 mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy)); 1751 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base); 1752 1753 if (!rebox.subcomponent().empty()) { 1754 llvm::SmallVector<mlir::Value> gepOperands = {zero}; 1755 for (unsigned i = 0; i < rebox.subcomponent().size(); ++i) 1756 gepOperands.push_back(operands[rebox.subcomponentOffset() + i]); 1757 base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands); 1758 } 1759 if (!rebox.substr().empty()) 1760 base = shiftSubstringBase(rewriter, loc, base, 1761 operands[rebox.substrOffset()]); 1762 } 1763 1764 if (rebox.slice().empty()) 1765 // The array section is of the form array[%component][substring], keep 1766 // the input array extents and strides. 1767 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1768 inputExtents, inputStrides, rewriter); 1769 1770 // Strides from the fir.box are in bytes. 1771 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1772 1773 // The slice is of the form array(i:j:k)[%component]. Compute new extents 1774 // and strides. 1775 llvm::SmallVector<mlir::Value> slicedExtents; 1776 llvm::SmallVector<mlir::Value> slicedStrides; 1777 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 1778 const bool sliceHasOrigins = !rebox.shift().empty(); 1779 unsigned sliceOps = rebox.sliceOffset(); 1780 unsigned shiftOps = rebox.shiftOffset(); 1781 auto strideOps = inputStrides.begin(); 1782 const unsigned inputRank = inputStrides.size(); 1783 for (unsigned i = 0; i < inputRank; 1784 ++i, ++strideOps, ++shiftOps, sliceOps += 3) { 1785 mlir::Value sliceLb = 1786 integerCast(loc, rewriter, idxTy, operands[sliceOps]); 1787 mlir::Value inputStride = *strideOps; // already idxTy 1788 // Apply origin shift: base += (lb-shift)*input_stride 1789 mlir::Value sliceOrigin = 1790 sliceHasOrigins 1791 ? integerCast(loc, rewriter, idxTy, operands[shiftOps]) 1792 : one; 1793 mlir::Value diff = 1794 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin); 1795 mlir::Value offset = 1796 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride); 1797 base = genGEP(loc, voidPtrTy, rewriter, base, offset); 1798 // Apply upper bound and step if this is a triplet. Otherwise, the 1799 // dimension is dropped and no extents/strides are computed. 1800 mlir::Value upper = operands[sliceOps + 1]; 1801 const bool isTripletSlice = 1802 !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp()); 1803 if (isTripletSlice) { 1804 mlir::Value step = 1805 integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]); 1806 // extent = ub-lb+step/step 1807 mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper); 1808 mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb, 1809 sliceUb, step, zero, idxTy); 1810 slicedExtents.emplace_back(extent); 1811 // stride = step*input_stride 1812 mlir::Value stride = 1813 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride); 1814 slicedStrides.emplace_back(stride); 1815 } 1816 } 1817 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1818 slicedExtents, slicedStrides, rewriter); 1819 } 1820 1821 /// Apply a new shape to the data described by a box given the base address, 1822 /// extents and strides of the box. 1823 mlir::LogicalResult 1824 reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1825 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1826 mlir::ValueRange operands, 1827 mlir::ConversionPatternRewriter &rewriter) const { 1828 mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(), 1829 operands.begin() + rebox.shiftOffset() + 1830 rebox.shift().size()}; 1831 if (rebox.shape().empty()) { 1832 // Only setting new lower bounds. 1833 return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents, 1834 inputStrides, rewriter); 1835 } 1836 1837 mlir::Location loc = rebox.getLoc(); 1838 // Strides from the fir.box are in bytes. 1839 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1840 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1841 1842 llvm::SmallVector<mlir::Value> newStrides; 1843 llvm::SmallVector<mlir::Value> newExtents; 1844 mlir::Type idxTy = lowerTy().indexType(); 1845 // First stride from input box is kept. The rest is assumed contiguous 1846 // (it is not possible to reshape otherwise). If the input is scalar, 1847 // which may be OK if all new extents are ones, the stride does not 1848 // matter, use one. 1849 mlir::Value stride = inputStrides.empty() 1850 ? genConstantIndex(loc, idxTy, rewriter, 1) 1851 : inputStrides[0]; 1852 for (unsigned i = 0; i < rebox.shape().size(); ++i) { 1853 mlir::Value rawExtent = operands[rebox.shapeOffset() + i]; 1854 mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent); 1855 newExtents.emplace_back(extent); 1856 newStrides.emplace_back(stride); 1857 // nextStride = extent * stride; 1858 stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride); 1859 } 1860 return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides, 1861 rewriter); 1862 } 1863 1864 /// Return scalar element type of the input box. 1865 static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) { 1866 auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType()); 1867 if (auto seqTy = ty.dyn_cast<fir::SequenceType>()) 1868 return seqTy.getEleTy(); 1869 return ty; 1870 } 1871 }; 1872 1873 /// Lower `fir.emboxproc` operation. Creates a procedure box. 1874 /// TODO: Part of supporting Fortran 2003 procedure pointers. 1875 struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> { 1876 using FIROpConversion::FIROpConversion; 1877 1878 mlir::LogicalResult 1879 matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor, 1880 mlir::ConversionPatternRewriter &rewriter) const override { 1881 TODO(emboxproc.getLoc(), "fir.emboxproc codegen"); 1882 return mlir::failure(); 1883 } 1884 }; 1885 1886 // Code shared between insert_value and extract_value Ops. 1887 struct ValueOpCommon { 1888 // Translate the arguments pertaining to any multidimensional array to 1889 // row-major order for LLVM-IR. 1890 static void toRowMajor(llvm::SmallVectorImpl<mlir::Attribute> &attrs, 1891 mlir::Type ty) { 1892 assert(ty && "type is null"); 1893 const auto end = attrs.size(); 1894 for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) { 1895 if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 1896 const auto dim = getDimension(seq); 1897 if (dim > 1) { 1898 auto ub = std::min(i + dim, end); 1899 std::reverse(attrs.begin() + i, attrs.begin() + ub); 1900 i += dim - 1; 1901 } 1902 ty = getArrayElementType(seq); 1903 } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) { 1904 ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()]; 1905 } else { 1906 llvm_unreachable("index into invalid type"); 1907 } 1908 } 1909 } 1910 1911 static llvm::SmallVector<mlir::Attribute> 1912 collectIndices(mlir::ConversionPatternRewriter &rewriter, 1913 mlir::ArrayAttr arrAttr) { 1914 llvm::SmallVector<mlir::Attribute> attrs; 1915 for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) { 1916 if (i->isa<mlir::IntegerAttr>()) { 1917 attrs.push_back(*i); 1918 } else { 1919 auto fieldName = i->cast<mlir::StringAttr>().getValue(); 1920 ++i; 1921 auto ty = i->cast<mlir::TypeAttr>().getValue(); 1922 auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName); 1923 attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index)); 1924 } 1925 } 1926 return attrs; 1927 } 1928 1929 private: 1930 static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) { 1931 unsigned result = 1; 1932 for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>(); 1933 eleTy; 1934 eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>()) 1935 ++result; 1936 return result; 1937 } 1938 1939 static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) { 1940 auto eleTy = ty.getElementType(); 1941 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 1942 eleTy = arrTy.getElementType(); 1943 return eleTy; 1944 } 1945 }; 1946 1947 namespace { 1948 /// Extract a subobject value from an ssa-value of aggregate type 1949 struct ExtractValueOpConversion 1950 : public FIROpAndTypeConversion<fir::ExtractValueOp>, 1951 public ValueOpCommon { 1952 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1953 1954 mlir::LogicalResult 1955 doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor, 1956 mlir::ConversionPatternRewriter &rewriter) const override { 1957 mlir::ValueRange operands = adaptor.getOperands(); 1958 auto attrs = collectIndices(rewriter, extractVal.getCoor()); 1959 toRowMajor(attrs, operands[0].getType()); 1960 auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs); 1961 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>( 1962 extractVal, ty, operands[0], position); 1963 return mlir::success(); 1964 } 1965 }; 1966 1967 /// InsertValue is the generalized instruction for the composition of new 1968 /// aggregate type values. 1969 struct InsertValueOpConversion 1970 : public FIROpAndTypeConversion<fir::InsertValueOp>, 1971 public ValueOpCommon { 1972 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1973 1974 mlir::LogicalResult 1975 doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor, 1976 mlir::ConversionPatternRewriter &rewriter) const override { 1977 mlir::ValueRange operands = adaptor.getOperands(); 1978 auto attrs = collectIndices(rewriter, insertVal.getCoor()); 1979 toRowMajor(attrs, operands[0].getType()); 1980 auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs); 1981 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 1982 insertVal, ty, operands[0], operands[1], position); 1983 return mlir::success(); 1984 } 1985 }; 1986 1987 /// InsertOnRange inserts a value into a sequence over a range of offsets. 1988 struct InsertOnRangeOpConversion 1989 : public FIROpAndTypeConversion<fir::InsertOnRangeOp> { 1990 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1991 1992 // Increments an array of subscripts in a row major fasion. 1993 void incrementSubscripts(const llvm::SmallVector<uint64_t> &dims, 1994 llvm::SmallVector<uint64_t> &subscripts) const { 1995 for (size_t i = dims.size(); i > 0; --i) { 1996 if (++subscripts[i - 1] < dims[i - 1]) { 1997 return; 1998 } 1999 subscripts[i - 1] = 0; 2000 } 2001 } 2002 2003 mlir::LogicalResult 2004 doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor, 2005 mlir::ConversionPatternRewriter &rewriter) const override { 2006 2007 llvm::SmallVector<uint64_t> dims; 2008 auto type = adaptor.getOperands()[0].getType(); 2009 2010 // Iteratively extract the array dimensions from the type. 2011 while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 2012 dims.push_back(t.getNumElements()); 2013 type = t.getElementType(); 2014 } 2015 2016 llvm::SmallVector<std::uint64_t> lBounds; 2017 llvm::SmallVector<std::uint64_t> uBounds; 2018 2019 // Unzip the upper and lower bound and convert to a row major format. 2020 mlir::DenseIntElementsAttr coor = range.getCoor(); 2021 auto reversedCoor = llvm::reverse(coor.getValues<int64_t>()); 2022 for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) { 2023 uBounds.push_back(*i++); 2024 lBounds.push_back(*i); 2025 } 2026 2027 auto &subscripts = lBounds; 2028 auto loc = range.getLoc(); 2029 mlir::Value lastOp = adaptor.getOperands()[0]; 2030 mlir::Value insertVal = adaptor.getOperands()[1]; 2031 2032 auto i64Ty = rewriter.getI64Type(); 2033 while (subscripts != uBounds) { 2034 // Convert uint64_t's to Attribute's. 2035 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2036 for (const auto &subscript : subscripts) 2037 subscriptAttrs.push_back(mlir::IntegerAttr::get(i64Ty, subscript)); 2038 lastOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2039 loc, ty, lastOp, insertVal, 2040 mlir::ArrayAttr::get(range.getContext(), subscriptAttrs)); 2041 2042 incrementSubscripts(dims, subscripts); 2043 } 2044 2045 // Convert uint64_t's to Attribute's. 2046 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2047 for (const auto &subscript : subscripts) 2048 subscriptAttrs.push_back( 2049 mlir::IntegerAttr::get(rewriter.getI64Type(), subscript)); 2050 mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs); 2051 2052 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2053 range, ty, lastOp, insertVal, 2054 mlir::ArrayAttr::get(range.getContext(), arrayRef)); 2055 2056 return mlir::success(); 2057 } 2058 }; 2059 } // namespace 2060 2061 namespace { 2062 /// XArrayCoor is the address arithmetic on a dynamically shaped, sliced, 2063 /// shifted etc. array. 2064 /// (See the static restriction on coordinate_of.) array_coor determines the 2065 /// coordinate (location) of a specific element. 2066 struct XArrayCoorOpConversion 2067 : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> { 2068 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2069 2070 mlir::LogicalResult 2071 doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor, 2072 mlir::ConversionPatternRewriter &rewriter) const override { 2073 auto loc = coor.getLoc(); 2074 mlir::ValueRange operands = adaptor.getOperands(); 2075 unsigned rank = coor.getRank(); 2076 assert(coor.indices().size() == rank); 2077 assert(coor.shape().empty() || coor.shape().size() == rank); 2078 assert(coor.shift().empty() || coor.shift().size() == rank); 2079 assert(coor.slice().empty() || coor.slice().size() == 3 * rank); 2080 mlir::Type idxTy = lowerTy().indexType(); 2081 unsigned indexOffset = coor.indicesOffset(); 2082 unsigned shapeOffset = coor.shapeOffset(); 2083 unsigned shiftOffset = coor.shiftOffset(); 2084 unsigned sliceOffset = coor.sliceOffset(); 2085 auto sliceOps = coor.slice().begin(); 2086 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2087 mlir::Value prevExt = one; 2088 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2089 mlir::Value offset = zero; 2090 const bool isShifted = !coor.shift().empty(); 2091 const bool isSliced = !coor.slice().empty(); 2092 const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>(); 2093 2094 // For each dimension of the array, generate the offset calculation. 2095 for (unsigned i = 0; i < rank; ++i, ++indexOffset, ++shapeOffset, 2096 ++shiftOffset, sliceOffset += 3, sliceOps += 3) { 2097 mlir::Value index = 2098 integerCast(loc, rewriter, idxTy, operands[indexOffset]); 2099 mlir::Value lb = 2100 isShifted ? integerCast(loc, rewriter, idxTy, operands[shiftOffset]) 2101 : one; 2102 mlir::Value step = one; 2103 bool normalSlice = isSliced; 2104 // Compute zero based index in dimension i of the element, applying 2105 // potential triplets and lower bounds. 2106 if (isSliced) { 2107 mlir::Value originalUb = *(sliceOps + 1); 2108 normalSlice = 2109 !mlir::isa_and_nonnull<fir::UndefOp>(originalUb.getDefiningOp()); 2110 if (normalSlice) 2111 step = integerCast(loc, rewriter, idxTy, operands[sliceOffset + 2]); 2112 } 2113 auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb); 2114 mlir::Value diff = 2115 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step); 2116 if (normalSlice) { 2117 mlir::Value sliceLb = 2118 integerCast(loc, rewriter, idxTy, operands[sliceOffset]); 2119 auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb); 2120 diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj); 2121 } 2122 // Update the offset given the stride and the zero based index `diff` 2123 // that was just computed. 2124 if (baseIsBoxed) { 2125 // Use stride in bytes from the descriptor. 2126 mlir::Value stride = loadStrideFromBox(loc, operands[0], i, rewriter); 2127 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride); 2128 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2129 } else { 2130 // Use stride computed at last iteration. 2131 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt); 2132 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2133 // Compute next stride assuming contiguity of the base array 2134 // (in element number). 2135 auto nextExt = integerCast(loc, rewriter, idxTy, operands[shapeOffset]); 2136 prevExt = 2137 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt); 2138 } 2139 } 2140 2141 // Add computed offset to the base address. 2142 if (baseIsBoxed) { 2143 // Working with byte offsets. The base address is read from the fir.box. 2144 // and need to be casted to i8* to do the pointer arithmetic. 2145 mlir::Type baseTy = getBaseAddrTypeFromBox(operands[0].getType()); 2146 mlir::Value base = 2147 loadBaseAddrFromBox(loc, baseTy, operands[0], rewriter); 2148 mlir::Type voidPtrTy = getVoidPtrType(); 2149 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2150 llvm::SmallVector<mlir::Value> args{offset}; 2151 auto addr = 2152 rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args); 2153 if (coor.subcomponent().empty()) { 2154 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, addr); 2155 return mlir::success(); 2156 } 2157 auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr); 2158 args.clear(); 2159 args.push_back(zero); 2160 if (!coor.lenParams().empty()) { 2161 // If type parameters are present, then we don't want to use a GEPOp 2162 // as below, as the LLVM struct type cannot be statically defined. 2163 TODO(loc, "derived type with type parameters"); 2164 } 2165 // TODO: array offset subcomponents must be converted to LLVM's 2166 // row-major layout here. 2167 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2168 args.push_back(operands[i]); 2169 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, casted, args); 2170 return mlir::success(); 2171 } 2172 2173 // The array was not boxed, so it must be contiguous. offset is therefore an 2174 // element offset and the base type is kept in the GEP unless the element 2175 // type size is itself dynamic. 2176 mlir::Value base; 2177 if (coor.subcomponent().empty()) { 2178 // No subcomponent. 2179 if (!coor.lenParams().empty()) { 2180 // Type parameters. Adjust element size explicitly. 2181 auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType()); 2182 assert(eleTy && "result must be a reference-like type"); 2183 if (fir::characterWithDynamicLen(eleTy)) { 2184 assert(coor.lenParams().size() == 1); 2185 auto length = integerCast(loc, rewriter, idxTy, 2186 operands[coor.lenParamsOffset()]); 2187 offset = 2188 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, length); 2189 2190 } else { 2191 TODO(loc, "compute size of derived type with type parameters"); 2192 } 2193 } 2194 // Cast the base address to a pointer to T. 2195 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty, operands[0]); 2196 } else { 2197 // Operand #0 must have a pointer type. For subcomponent slicing, we 2198 // want to cast away the array type and have a plain struct type. 2199 mlir::Type ty0 = operands[0].getType(); 2200 auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 2201 assert(ptrTy && "expected pointer type"); 2202 mlir::Type eleTy = ptrTy.getElementType(); 2203 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2204 eleTy = arrTy.getElementType(); 2205 auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy); 2206 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy, operands[0]); 2207 } 2208 llvm::SmallVector<mlir::Value> args = {offset}; 2209 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2210 args.push_back(operands[i]); 2211 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args); 2212 return mlir::success(); 2213 } 2214 }; 2215 } // namespace 2216 2217 /// Convert to (memory) reference to a reference to a subobject. 2218 /// The coordinate_of op is a Swiss army knife operation that can be used on 2219 /// (memory) references to records, arrays, complex, etc. as well as boxes. 2220 /// With unboxed arrays, there is the restriction that the array have a static 2221 /// shape in all but the last column. 2222 struct CoordinateOpConversion 2223 : public FIROpAndTypeConversion<fir::CoordinateOp> { 2224 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2225 2226 mlir::LogicalResult 2227 doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor, 2228 mlir::ConversionPatternRewriter &rewriter) const override { 2229 mlir::ValueRange operands = adaptor.getOperands(); 2230 2231 mlir::Location loc = coor.getLoc(); 2232 mlir::Value base = operands[0]; 2233 mlir::Type baseObjectTy = coor.getBaseType(); 2234 mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2235 assert(objectTy && "fir.coordinate_of expects a reference type"); 2236 2237 // Complex type - basically, extract the real or imaginary part 2238 if (fir::isa_complex(objectTy)) { 2239 mlir::LLVM::ConstantOp c0 = 2240 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2241 llvm::SmallVector<mlir::Value> offs = {c0, operands[1]}; 2242 mlir::Value gep = genGEP(loc, ty, rewriter, base, offs); 2243 rewriter.replaceOp(coor, gep); 2244 return mlir::success(); 2245 } 2246 2247 // Boxed type - get the base pointer from the box 2248 if (baseObjectTy.dyn_cast<fir::BoxType>()) 2249 return doRewriteBox(coor, ty, operands, loc, rewriter); 2250 2251 // Reference, pointer or a heap type 2252 if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType, fir::HeapType>()) 2253 return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter); 2254 2255 return rewriter.notifyMatchFailure( 2256 coor, "fir.coordinate_of base operand has unsupported type"); 2257 } 2258 2259 static unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) { 2260 return fir::hasDynamicSize(ty) 2261 ? op.getDefiningOp() 2262 ->getAttrOfType<mlir::IntegerAttr>("field") 2263 .getInt() 2264 : getIntValue(op); 2265 } 2266 2267 static int64_t getIntValue(mlir::Value val) { 2268 assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value"); 2269 mlir::Operation *defop = val.getDefiningOp(); 2270 2271 if (auto constOp = mlir::dyn_cast<mlir::arith::ConstantIntOp>(defop)) 2272 return constOp.value(); 2273 if (auto llConstOp = mlir::dyn_cast<mlir::LLVM::ConstantOp>(defop)) 2274 if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>()) 2275 return attr.getValue().getSExtValue(); 2276 fir::emitFatalError(val.getLoc(), "must be a constant"); 2277 } 2278 2279 static bool hasSubDimensions(mlir::Type type) { 2280 return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>(); 2281 } 2282 2283 /// Check whether this form of `!fir.coordinate_of` is supported. These 2284 /// additional checks are required, because we are not yet able to convert 2285 /// all valid forms of `!fir.coordinate_of`. 2286 /// TODO: Either implement the unsupported cases or extend the verifier 2287 /// in FIROps.cpp instead. 2288 static bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) { 2289 const std::size_t numOfCoors = coors.size(); 2290 std::size_t i = 0; 2291 bool subEle = false; 2292 bool ptrEle = false; 2293 for (; i < numOfCoors; ++i) { 2294 mlir::Value nxtOpnd = coors[i]; 2295 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2296 subEle = true; 2297 i += arrTy.getDimension() - 1; 2298 type = arrTy.getEleTy(); 2299 } else if (auto recTy = type.dyn_cast<fir::RecordType>()) { 2300 subEle = true; 2301 type = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2302 } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) { 2303 subEle = true; 2304 type = tupTy.getType(getIntValue(nxtOpnd)); 2305 } else { 2306 ptrEle = true; 2307 } 2308 } 2309 if (ptrEle) 2310 return (!subEle) && (numOfCoors == 1); 2311 return subEle && (i >= numOfCoors); 2312 } 2313 2314 /// Walk the abstract memory layout and determine if the path traverses any 2315 /// array types with unknown shape. Return true iff all the array types have a 2316 /// constant shape along the path. 2317 static bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) { 2318 for (std::size_t i = 0, sz = coors.size(); i < sz; ++i) { 2319 mlir::Value nxtOpnd = coors[i]; 2320 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2321 if (fir::sequenceWithNonConstantShape(arrTy)) 2322 return false; 2323 i += arrTy.getDimension() - 1; 2324 type = arrTy.getEleTy(); 2325 } else if (auto strTy = type.dyn_cast<fir::RecordType>()) { 2326 type = strTy.getType(getFieldNumber(strTy, nxtOpnd)); 2327 } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) { 2328 type = strTy.getType(getIntValue(nxtOpnd)); 2329 } else { 2330 return true; 2331 } 2332 } 2333 return true; 2334 } 2335 2336 private: 2337 mlir::LogicalResult 2338 doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands, 2339 mlir::Location loc, 2340 mlir::ConversionPatternRewriter &rewriter) const { 2341 mlir::Type boxObjTy = coor.getBaseType(); 2342 assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`"); 2343 2344 mlir::Value boxBaseAddr = operands[0]; 2345 2346 // 1. SPECIAL CASE (uses `fir.len_param_index`): 2347 // %box = ... : !fir.box<!fir.type<derived{len1:i32}>> 2348 // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}> 2349 // %addr = coordinate_of %box, %lenp 2350 if (coor.getNumOperands() == 2) { 2351 mlir::Operation *coordinateDef = 2352 (*coor.getCoor().begin()).getDefiningOp(); 2353 if (mlir::isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef)) 2354 TODO(loc, 2355 "fir.coordinate_of - fir.len_param_index is not supported yet"); 2356 } 2357 2358 // 2. GENERAL CASE: 2359 // 2.1. (`fir.array`) 2360 // %box = ... : !fix.box<!fir.array<?xU>> 2361 // %idx = ... : index 2362 // %resultAddr = coordinate_of %box, %idx : !fir.ref<U> 2363 // 2.2 (`fir.derived`) 2364 // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>> 2365 // %idx = ... : i32 2366 // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32> 2367 // 2.3 (`fir.derived` inside `fir.array`) 2368 // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32, 2369 // field_2:f32}>>> %idx1 = ... : index %idx2 = ... : i32 %resultAddr = 2370 // coordinate_of %box, %idx1, %idx2 : !fir.ref<f32> 2371 // 2.4. TODO: Either document or disable any other case that the following 2372 // implementation might convert. 2373 mlir::LLVM::ConstantOp c0 = 2374 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2375 mlir::Value resultAddr = 2376 loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()), 2377 boxBaseAddr, rewriter); 2378 // Component Type 2379 auto cpnTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy); 2380 mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext()); 2381 2382 for (unsigned i = 1, last = operands.size(); i < last; ++i) { 2383 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2384 if (i != 1) 2385 TODO(loc, "fir.array nested inside other array and/or derived type"); 2386 // Applies byte strides from the box. Ignore lower bound from box 2387 // since fir.coordinate_of indexes are zero based. Lowering takes care 2388 // of lower bound aspects. This both accounts for dynamically sized 2389 // types and non contiguous arrays. 2390 auto idxTy = lowerTy().indexType(); 2391 mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0); 2392 for (unsigned index = i, lastIndex = i + arrTy.getDimension(); 2393 index < lastIndex; ++index) { 2394 mlir::Value stride = 2395 loadStrideFromBox(loc, operands[0], index - i, rewriter); 2396 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, 2397 operands[index], stride); 2398 off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off); 2399 } 2400 auto voidPtrBase = 2401 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr); 2402 llvm::SmallVector<mlir::Value> args = {off}; 2403 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, 2404 voidPtrBase, args); 2405 i += arrTy.getDimension() - 1; 2406 cpnTy = arrTy.getEleTy(); 2407 } else if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) { 2408 auto recRefTy = 2409 mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy)); 2410 mlir::Value nxtOpnd = operands[i]; 2411 auto memObj = 2412 rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr); 2413 llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd}; 2414 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2415 auto llvmCurrentObjTy = lowerTy().convertType(cpnTy); 2416 auto gep = rewriter.create<mlir::LLVM::GEPOp>( 2417 loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj, 2418 args); 2419 resultAddr = 2420 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep); 2421 } else { 2422 fir::emitFatalError(loc, "unexpected type in coordinate_of"); 2423 } 2424 } 2425 2426 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr); 2427 return mlir::success(); 2428 } 2429 2430 mlir::LogicalResult 2431 doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty, 2432 mlir::ValueRange operands, mlir::Location loc, 2433 mlir::ConversionPatternRewriter &rewriter) const { 2434 mlir::Type baseObjectTy = coor.getBaseType(); 2435 2436 // Component Type 2437 mlir::Type cpnTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2438 bool hasSubdimension = hasSubDimensions(cpnTy); 2439 bool columnIsDeferred = !hasSubdimension; 2440 2441 if (!supportedCoordinate(cpnTy, operands.drop_front(1))) 2442 TODO(loc, "unsupported combination of coordinate operands"); 2443 2444 const bool hasKnownShape = 2445 arraysHaveKnownShape(cpnTy, operands.drop_front(1)); 2446 2447 // If only the column is `?`, then we can simply place the column value in 2448 // the 0-th GEP position. 2449 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2450 if (!hasKnownShape) { 2451 const unsigned sz = arrTy.getDimension(); 2452 if (arraysHaveKnownShape(arrTy.getEleTy(), 2453 operands.drop_front(1 + sz))) { 2454 fir::SequenceType::ShapeRef shape = arrTy.getShape(); 2455 bool allConst = true; 2456 for (unsigned i = 0; i < sz - 1; ++i) { 2457 if (shape[i] < 0) { 2458 allConst = false; 2459 break; 2460 } 2461 } 2462 if (allConst) 2463 columnIsDeferred = true; 2464 } 2465 } 2466 } 2467 2468 if (fir::hasDynamicSize(fir::unwrapSequenceType(cpnTy))) 2469 return mlir::emitError( 2470 loc, "fir.coordinate_of with a dynamic element size is unsupported"); 2471 2472 if (hasKnownShape || columnIsDeferred) { 2473 llvm::SmallVector<mlir::Value> offs; 2474 if (hasKnownShape && hasSubdimension) { 2475 mlir::LLVM::ConstantOp c0 = 2476 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2477 offs.push_back(c0); 2478 } 2479 llvm::Optional<int> dims; 2480 llvm::SmallVector<mlir::Value> arrIdx; 2481 for (std::size_t i = 1, sz = operands.size(); i < sz; ++i) { 2482 mlir::Value nxtOpnd = operands[i]; 2483 2484 if (!cpnTy) 2485 return mlir::emitError(loc, "invalid coordinate/check failed"); 2486 2487 // check if the i-th coordinate relates to an array 2488 if (dims) { 2489 arrIdx.push_back(nxtOpnd); 2490 int dimsLeft = *dims; 2491 if (dimsLeft > 1) { 2492 dims = dimsLeft - 1; 2493 continue; 2494 } 2495 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2496 // append array range in reverse (FIR arrays are column-major) 2497 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2498 arrIdx.clear(); 2499 dims.reset(); 2500 continue; 2501 } 2502 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2503 int d = arrTy.getDimension() - 1; 2504 if (d > 0) { 2505 dims = d; 2506 arrIdx.push_back(nxtOpnd); 2507 continue; 2508 } 2509 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2510 offs.push_back(nxtOpnd); 2511 continue; 2512 } 2513 2514 // check if the i-th coordinate relates to a field 2515 if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) 2516 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2517 else if (auto tupTy = cpnTy.dyn_cast<mlir::TupleType>()) 2518 cpnTy = tupTy.getType(getIntValue(nxtOpnd)); 2519 else 2520 cpnTy = nullptr; 2521 2522 offs.push_back(nxtOpnd); 2523 } 2524 if (dims) 2525 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2526 mlir::Value base = operands[0]; 2527 mlir::Value retval = genGEP(loc, ty, rewriter, base, offs); 2528 rewriter.replaceOp(coor, retval); 2529 return mlir::success(); 2530 } 2531 2532 return mlir::emitError( 2533 loc, "fir.coordinate_of base operand has unsupported type"); 2534 } 2535 }; 2536 2537 /// Convert `fir.field_index`. The conversion depends on whether the size of 2538 /// the record is static or dynamic. 2539 struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> { 2540 using FIROpConversion::FIROpConversion; 2541 2542 // NB: most field references should be resolved by this point 2543 mlir::LogicalResult 2544 matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor, 2545 mlir::ConversionPatternRewriter &rewriter) const override { 2546 auto recTy = field.getOnType().cast<fir::RecordType>(); 2547 unsigned index = recTy.getFieldIndex(field.getFieldId()); 2548 2549 if (!fir::hasDynamicSize(recTy)) { 2550 // Derived type has compile-time constant layout. Return index of the 2551 // component type in the parent type (to be used in GEP). 2552 rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset( 2553 field.getLoc(), rewriter, index)}); 2554 return mlir::success(); 2555 } 2556 2557 // Derived type has compile-time constant layout. Call the compiler 2558 // generated function to determine the byte offset of the field at runtime. 2559 // This returns a non-constant. 2560 mlir::FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get( 2561 field.getContext(), getOffsetMethodName(recTy, field.getFieldId())); 2562 mlir::NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr); 2563 mlir::NamedAttribute fieldAttr = rewriter.getNamedAttr( 2564 "field", mlir::IntegerAttr::get(lowerTy().indexType(), index)); 2565 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 2566 field, lowerTy().offsetType(), adaptor.getOperands(), 2567 llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr}); 2568 return mlir::success(); 2569 } 2570 2571 // Re-Construct the name of the compiler generated method that calculates the 2572 // offset 2573 inline static std::string getOffsetMethodName(fir::RecordType recTy, 2574 llvm::StringRef field) { 2575 return recTy.getName().str() + "P." + field.str() + ".offset"; 2576 } 2577 }; 2578 2579 /// Convert `fir.end` 2580 struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> { 2581 using FIROpConversion::FIROpConversion; 2582 2583 mlir::LogicalResult 2584 matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor, 2585 mlir::ConversionPatternRewriter &rewriter) const override { 2586 TODO(firEnd.getLoc(), "fir.end codegen"); 2587 return mlir::failure(); 2588 } 2589 }; 2590 2591 /// Lower `fir.gentypedesc` to a global constant. 2592 struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> { 2593 using FIROpConversion::FIROpConversion; 2594 2595 mlir::LogicalResult 2596 matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor, 2597 mlir::ConversionPatternRewriter &rewriter) const override { 2598 TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen"); 2599 return mlir::failure(); 2600 } 2601 }; 2602 2603 /// Lower `fir.has_value` operation to `llvm.return` operation. 2604 struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> { 2605 using FIROpConversion::FIROpConversion; 2606 2607 mlir::LogicalResult 2608 matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor, 2609 mlir::ConversionPatternRewriter &rewriter) const override { 2610 rewriter.replaceOpWithNewOp<mlir::LLVM::ReturnOp>(op, 2611 adaptor.getOperands()); 2612 return mlir::success(); 2613 } 2614 }; 2615 2616 /// Lower `fir.global` operation to `llvm.global` operation. 2617 /// `fir.insert_on_range` operations are replaced with constant dense attribute 2618 /// if they are applied on the full range. 2619 struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> { 2620 using FIROpConversion::FIROpConversion; 2621 2622 mlir::LogicalResult 2623 matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor, 2624 mlir::ConversionPatternRewriter &rewriter) const override { 2625 auto tyAttr = convertType(global.getType()); 2626 if (global.getType().isa<fir::BoxType>()) 2627 tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType(); 2628 auto loc = global.getLoc(); 2629 mlir::Attribute initAttr; 2630 if (global.getInitVal()) 2631 initAttr = global.getInitVal().getValue(); 2632 auto linkage = convertLinkage(global.getLinkName()); 2633 auto isConst = global.getConstant().hasValue(); 2634 auto g = rewriter.create<mlir::LLVM::GlobalOp>( 2635 loc, tyAttr, isConst, linkage, global.getSymName(), initAttr); 2636 auto &gr = g.getInitializerRegion(); 2637 rewriter.inlineRegionBefore(global.getRegion(), gr, gr.end()); 2638 if (!gr.empty()) { 2639 // Replace insert_on_range with a constant dense attribute if the 2640 // initialization is on the full range. 2641 auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>(); 2642 for (auto insertOp : insertOnRangeOps) { 2643 if (isFullRange(insertOp.getCoor(), insertOp.getType())) { 2644 auto seqTyAttr = convertType(insertOp.getType()); 2645 auto *op = insertOp.getVal().getDefiningOp(); 2646 auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op); 2647 if (!constant) { 2648 auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op); 2649 if (!convertOp) 2650 continue; 2651 constant = mlir::cast<mlir::arith::ConstantOp>( 2652 convertOp.getValue().getDefiningOp()); 2653 } 2654 mlir::Type vecType = mlir::VectorType::get( 2655 insertOp.getType().getShape(), constant.getType()); 2656 auto denseAttr = mlir::DenseElementsAttr::get( 2657 vecType.cast<mlir::ShapedType>(), constant.getValue()); 2658 rewriter.setInsertionPointAfter(insertOp); 2659 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>( 2660 insertOp, seqTyAttr, denseAttr); 2661 } 2662 } 2663 } 2664 rewriter.eraseOp(global); 2665 return mlir::success(); 2666 } 2667 2668 bool isFullRange(mlir::DenseIntElementsAttr indexes, 2669 fir::SequenceType seqTy) const { 2670 auto extents = seqTy.getShape(); 2671 if (indexes.size() / 2 != static_cast<int64_t>(extents.size())) 2672 return false; 2673 auto cur_index = indexes.value_begin<int64_t>(); 2674 for (unsigned i = 0; i < indexes.size(); i += 2) { 2675 if (*(cur_index++) != 0) 2676 return false; 2677 if (*(cur_index++) != extents[i / 2] - 1) 2678 return false; 2679 } 2680 return true; 2681 } 2682 2683 // TODO: String comparaison should be avoided. Replace linkName with an 2684 // enumeration. 2685 mlir::LLVM::Linkage 2686 convertLinkage(llvm::Optional<llvm::StringRef> optLinkage) const { 2687 if (optLinkage.hasValue()) { 2688 auto name = optLinkage.getValue(); 2689 if (name == "internal") 2690 return mlir::LLVM::Linkage::Internal; 2691 if (name == "linkonce") 2692 return mlir::LLVM::Linkage::Linkonce; 2693 if (name == "linkonce_odr") 2694 return mlir::LLVM::Linkage::LinkonceODR; 2695 if (name == "common") 2696 return mlir::LLVM::Linkage::Common; 2697 if (name == "weak") 2698 return mlir::LLVM::Linkage::Weak; 2699 } 2700 return mlir::LLVM::Linkage::External; 2701 } 2702 }; 2703 2704 /// `fir.load` --> `llvm.load` 2705 struct LoadOpConversion : public FIROpConversion<fir::LoadOp> { 2706 using FIROpConversion::FIROpConversion; 2707 2708 mlir::LogicalResult 2709 matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor, 2710 mlir::ConversionPatternRewriter &rewriter) const override { 2711 // fir.box is a special case because it is considered as an ssa values in 2712 // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box> 2713 // and fir.box end up being the same llvm types and loading a 2714 // fir.ref<fir.box> is actually a no op in LLVM. 2715 if (load.getType().isa<fir::BoxType>()) { 2716 rewriter.replaceOp(load, adaptor.getOperands()[0]); 2717 } else { 2718 rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>( 2719 load, convertType(load.getType()), adaptor.getOperands(), 2720 load->getAttrs()); 2721 } 2722 return mlir::success(); 2723 } 2724 }; 2725 2726 /// Lower `fir.no_reassoc` to LLVM IR dialect. 2727 /// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast 2728 /// math flags? 2729 struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> { 2730 using FIROpConversion::FIROpConversion; 2731 2732 mlir::LogicalResult 2733 matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor, 2734 mlir::ConversionPatternRewriter &rewriter) const override { 2735 rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]); 2736 return mlir::success(); 2737 } 2738 }; 2739 2740 static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest, 2741 llvm::Optional<mlir::ValueRange> destOps, 2742 mlir::ConversionPatternRewriter &rewriter, 2743 mlir::Block *newBlock) { 2744 if (destOps.hasValue()) 2745 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, destOps.getValue(), 2746 newBlock, mlir::ValueRange()); 2747 else 2748 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock); 2749 } 2750 2751 template <typename A, typename B> 2752 static void genBrOp(A caseOp, mlir::Block *dest, llvm::Optional<B> destOps, 2753 mlir::ConversionPatternRewriter &rewriter) { 2754 if (destOps.hasValue()) 2755 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, destOps.getValue(), 2756 dest); 2757 else 2758 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest); 2759 } 2760 2761 static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp, 2762 mlir::Block *dest, 2763 llvm::Optional<mlir::ValueRange> destOps, 2764 mlir::ConversionPatternRewriter &rewriter) { 2765 auto *thisBlock = rewriter.getInsertionBlock(); 2766 auto *newBlock = createBlock(rewriter, dest); 2767 rewriter.setInsertionPointToEnd(thisBlock); 2768 genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock); 2769 rewriter.setInsertionPointToEnd(newBlock); 2770 } 2771 2772 /// Conversion of `fir.select_case` 2773 /// 2774 /// The `fir.select_case` operation is converted to a if-then-else ladder. 2775 /// Depending on the case condition type, one or several comparison and 2776 /// conditional branching can be generated. 2777 /// 2778 /// A a point value case such as `case(4)`, a lower bound case such as 2779 /// `case(5:)` or an upper bound case such as `case(:3)` are converted to a 2780 /// simple comparison between the selector value and the constant value in the 2781 /// case. The block associated with the case condition is then executed if 2782 /// the comparison succeed otherwise it branch to the next block with the 2783 /// comparison for the the next case conditon. 2784 /// 2785 /// A closed interval case condition such as `case(7:10)` is converted with a 2786 /// first comparison and conditional branching for the lower bound. If 2787 /// successful, it branch to a second block with the comparison for the 2788 /// upper bound in the same case condition. 2789 /// 2790 /// TODO: lowering of CHARACTER type cases is not handled yet. 2791 struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> { 2792 using FIROpConversion::FIROpConversion; 2793 2794 mlir::LogicalResult 2795 matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor, 2796 mlir::ConversionPatternRewriter &rewriter) const override { 2797 unsigned conds = caseOp.getNumConditions(); 2798 llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue(); 2799 // Type can be CHARACTER, INTEGER, or LOGICAL (C1145) 2800 auto ty = caseOp.getSelector().getType(); 2801 if (ty.isa<fir::CharacterType>()) { 2802 TODO(caseOp.getLoc(), "fir.select_case codegen with character type"); 2803 return mlir::failure(); 2804 } 2805 mlir::Value selector = caseOp.getSelector(adaptor.getOperands()); 2806 auto loc = caseOp.getLoc(); 2807 for (unsigned t = 0; t != conds; ++t) { 2808 mlir::Block *dest = caseOp.getSuccessor(t); 2809 llvm::Optional<mlir::ValueRange> destOps = 2810 caseOp.getSuccessorOperands(adaptor.getOperands(), t); 2811 llvm::Optional<mlir::ValueRange> cmpOps = 2812 *caseOp.getCompareOperands(adaptor.getOperands(), t); 2813 mlir::Value caseArg = *(cmpOps.value().begin()); 2814 mlir::Attribute attr = cases[t]; 2815 if (attr.isa<fir::PointIntervalAttr>()) { 2816 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2817 loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg); 2818 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2819 continue; 2820 } 2821 if (attr.isa<fir::LowerBoundAttr>()) { 2822 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2823 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2824 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2825 continue; 2826 } 2827 if (attr.isa<fir::UpperBoundAttr>()) { 2828 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2829 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg); 2830 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2831 continue; 2832 } 2833 if (attr.isa<fir::ClosedIntervalAttr>()) { 2834 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2835 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2836 auto *thisBlock = rewriter.getInsertionBlock(); 2837 auto *newBlock1 = createBlock(rewriter, dest); 2838 auto *newBlock2 = createBlock(rewriter, dest); 2839 rewriter.setInsertionPointToEnd(thisBlock); 2840 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2); 2841 rewriter.setInsertionPointToEnd(newBlock1); 2842 mlir::Value caseArg0 = *(cmpOps.value().begin() + 1); 2843 auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>( 2844 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0); 2845 genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2); 2846 rewriter.setInsertionPointToEnd(newBlock2); 2847 continue; 2848 } 2849 assert(attr.isa<mlir::UnitAttr>()); 2850 assert((t + 1 == conds) && "unit must be last"); 2851 genBrOp(caseOp, dest, destOps, rewriter); 2852 } 2853 return mlir::success(); 2854 } 2855 }; 2856 2857 template <typename OP> 2858 static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select, 2859 typename OP::Adaptor adaptor, 2860 mlir::ConversionPatternRewriter &rewriter) { 2861 unsigned conds = select.getNumConditions(); 2862 auto cases = select.getCases().getValue(); 2863 mlir::Value selector = adaptor.getSelector(); 2864 auto loc = select.getLoc(); 2865 assert(conds > 0 && "select must have cases"); 2866 2867 llvm::SmallVector<mlir::Block *> destinations; 2868 llvm::SmallVector<mlir::ValueRange> destinationsOperands; 2869 mlir::Block *defaultDestination; 2870 mlir::ValueRange defaultOperands; 2871 llvm::SmallVector<int32_t> caseValues; 2872 2873 for (unsigned t = 0; t != conds; ++t) { 2874 mlir::Block *dest = select.getSuccessor(t); 2875 auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t); 2876 const mlir::Attribute &attr = cases[t]; 2877 if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) { 2878 destinations.push_back(dest); 2879 destinationsOperands.push_back(destOps.hasValue() ? *destOps 2880 : mlir::ValueRange{}); 2881 caseValues.push_back(intAttr.getInt()); 2882 continue; 2883 } 2884 assert(attr.template dyn_cast_or_null<mlir::UnitAttr>()); 2885 assert((t + 1 == conds) && "unit must be last"); 2886 defaultDestination = dest; 2887 defaultOperands = destOps.hasValue() ? *destOps : mlir::ValueRange{}; 2888 } 2889 2890 // LLVM::SwitchOp takes a i32 type for the selector. 2891 if (select.getSelector().getType() != rewriter.getI32Type()) 2892 selector = rewriter.create<mlir::LLVM::TruncOp>(loc, rewriter.getI32Type(), 2893 selector); 2894 2895 rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>( 2896 select, selector, 2897 /*defaultDestination=*/defaultDestination, 2898 /*defaultOperands=*/defaultOperands, 2899 /*caseValues=*/caseValues, 2900 /*caseDestinations=*/destinations, 2901 /*caseOperands=*/destinationsOperands, 2902 /*branchWeights=*/llvm::ArrayRef<std::int32_t>()); 2903 } 2904 2905 /// conversion of fir::SelectOp to an if-then-else ladder 2906 struct SelectOpConversion : public FIROpConversion<fir::SelectOp> { 2907 using FIROpConversion::FIROpConversion; 2908 2909 mlir::LogicalResult 2910 matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor, 2911 mlir::ConversionPatternRewriter &rewriter) const override { 2912 selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter); 2913 return mlir::success(); 2914 } 2915 }; 2916 2917 /// conversion of fir::SelectRankOp to an if-then-else ladder 2918 struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> { 2919 using FIROpConversion::FIROpConversion; 2920 2921 mlir::LogicalResult 2922 matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor, 2923 mlir::ConversionPatternRewriter &rewriter) const override { 2924 selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter); 2925 return mlir::success(); 2926 } 2927 }; 2928 2929 /// Lower `fir.select_type` to LLVM IR dialect. 2930 struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> { 2931 using FIROpConversion::FIROpConversion; 2932 2933 mlir::LogicalResult 2934 matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor, 2935 mlir::ConversionPatternRewriter &rewriter) const override { 2936 mlir::emitError(select.getLoc(), 2937 "fir.select_type should have already been converted"); 2938 return mlir::failure(); 2939 } 2940 }; 2941 2942 /// `fir.store` --> `llvm.store` 2943 struct StoreOpConversion : public FIROpConversion<fir::StoreOp> { 2944 using FIROpConversion::FIROpConversion; 2945 2946 mlir::LogicalResult 2947 matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor, 2948 mlir::ConversionPatternRewriter &rewriter) const override { 2949 if (store.getValue().getType().isa<fir::BoxType>()) { 2950 // fir.box value is actually in memory, load it first before storing it. 2951 mlir::Location loc = store.getLoc(); 2952 mlir::Type boxPtrTy = adaptor.getOperands()[0].getType(); 2953 auto val = rewriter.create<mlir::LLVM::LoadOp>( 2954 loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(), 2955 adaptor.getOperands()[0]); 2956 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2957 store, val, adaptor.getOperands()[1]); 2958 } else { 2959 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2960 store, adaptor.getOperands()[0], adaptor.getOperands()[1]); 2961 } 2962 return mlir::success(); 2963 } 2964 }; 2965 2966 namespace { 2967 2968 /// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for 2969 /// the character buffer and one for the buffer length. 2970 struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> { 2971 using FIROpConversion::FIROpConversion; 2972 2973 mlir::LogicalResult 2974 matchAndRewrite(fir::UnboxCharOp unboxchar, OpAdaptor adaptor, 2975 mlir::ConversionPatternRewriter &rewriter) const override { 2976 auto *ctx = unboxchar.getContext(); 2977 2978 mlir::Type lenTy = convertType(unboxchar.getType(1)); 2979 mlir::Value tuple = adaptor.getOperands()[0]; 2980 mlir::Type tupleTy = tuple.getType(); 2981 2982 mlir::Location loc = unboxchar.getLoc(); 2983 mlir::Value ptrToBuffer = 2984 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 0); 2985 2986 mlir::LLVM::ExtractValueOp len = 2987 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 1); 2988 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, len); 2989 2990 rewriter.replaceOp(unboxchar, 2991 llvm::ArrayRef<mlir::Value>{ptrToBuffer, lenAfterCast}); 2992 return mlir::success(); 2993 } 2994 }; 2995 2996 /// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its 2997 /// components. 2998 /// TODO: Part of supporting Fortran 2003 procedure pointers. 2999 struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> { 3000 using FIROpConversion::FIROpConversion; 3001 3002 mlir::LogicalResult 3003 matchAndRewrite(fir::UnboxProcOp unboxproc, OpAdaptor adaptor, 3004 mlir::ConversionPatternRewriter &rewriter) const override { 3005 TODO(unboxproc.getLoc(), "fir.unboxproc codegen"); 3006 return mlir::failure(); 3007 } 3008 }; 3009 3010 /// convert to LLVM IR dialect `undef` 3011 struct UndefOpConversion : public FIROpConversion<fir::UndefOp> { 3012 using FIROpConversion::FIROpConversion; 3013 3014 mlir::LogicalResult 3015 matchAndRewrite(fir::UndefOp undef, OpAdaptor, 3016 mlir::ConversionPatternRewriter &rewriter) const override { 3017 rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>( 3018 undef, convertType(undef.getType())); 3019 return mlir::success(); 3020 } 3021 }; 3022 3023 struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> { 3024 using FIROpConversion::FIROpConversion; 3025 3026 mlir::LogicalResult 3027 matchAndRewrite(fir::ZeroOp zero, OpAdaptor, 3028 mlir::ConversionPatternRewriter &rewriter) const override { 3029 mlir::Type ty = convertType(zero.getType()); 3030 if (ty.isa<mlir::LLVM::LLVMPointerType>()) { 3031 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty); 3032 } else if (ty.isa<mlir::IntegerType>()) { 3033 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3034 zero, ty, mlir::IntegerAttr::get(zero.getType(), 0)); 3035 } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) { 3036 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3037 zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0)); 3038 } else { 3039 // TODO: create ConstantAggregateZero for FIR aggregate/array types. 3040 return rewriter.notifyMatchFailure( 3041 zero, 3042 "conversion of fir.zero with aggregate type not implemented yet"); 3043 } 3044 return mlir::success(); 3045 } 3046 }; 3047 3048 /// `fir.unreachable` --> `llvm.unreachable` 3049 struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> { 3050 using FIROpConversion::FIROpConversion; 3051 3052 mlir::LogicalResult 3053 matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor, 3054 mlir::ConversionPatternRewriter &rewriter) const override { 3055 rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach); 3056 return mlir::success(); 3057 } 3058 }; 3059 3060 /// `fir.is_present` --> 3061 /// ``` 3062 /// %0 = llvm.mlir.constant(0 : i64) 3063 /// %1 = llvm.ptrtoint %0 3064 /// %2 = llvm.icmp "ne" %1, %0 : i64 3065 /// ``` 3066 struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> { 3067 using FIROpConversion::FIROpConversion; 3068 3069 mlir::LogicalResult 3070 matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor, 3071 mlir::ConversionPatternRewriter &rewriter) const override { 3072 mlir::Type idxTy = lowerTy().indexType(); 3073 mlir::Location loc = isPresent.getLoc(); 3074 auto ptr = adaptor.getOperands()[0]; 3075 3076 if (isPresent.getVal().getType().isa<fir::BoxCharType>()) { 3077 auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>(); 3078 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3079 3080 mlir::Type ty = structTy.getBody()[0]; 3081 mlir::MLIRContext *ctx = isPresent.getContext(); 3082 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3083 ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0); 3084 } 3085 mlir::LLVM::ConstantOp c0 = 3086 genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0); 3087 auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr); 3088 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 3089 isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0); 3090 3091 return mlir::success(); 3092 } 3093 }; 3094 3095 /// Create value signaling an absent optional argument in a call, e.g. 3096 /// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>` 3097 struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> { 3098 using FIROpConversion::FIROpConversion; 3099 3100 mlir::LogicalResult 3101 matchAndRewrite(fir::AbsentOp absent, OpAdaptor, 3102 mlir::ConversionPatternRewriter &rewriter) const override { 3103 mlir::Type ty = convertType(absent.getType()); 3104 mlir::Location loc = absent.getLoc(); 3105 3106 if (absent.getType().isa<fir::BoxCharType>()) { 3107 auto structTy = ty.cast<mlir::LLVM::LLVMStructType>(); 3108 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3109 auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3110 auto nullField = 3111 rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]); 3112 mlir::MLIRContext *ctx = absent.getContext(); 3113 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3114 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 3115 absent, ty, undefStruct, nullField, c0); 3116 } else { 3117 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty); 3118 } 3119 return mlir::success(); 3120 } 3121 }; 3122 3123 // 3124 // Primitive operations on Complex types 3125 // 3126 3127 /// Generate inline code for complex addition/subtraction 3128 template <typename LLVMOP, typename OPTY> 3129 static mlir::LLVM::InsertValueOp 3130 complexSum(OPTY sumop, mlir::ValueRange opnds, 3131 mlir::ConversionPatternRewriter &rewriter, 3132 fir::LLVMTypeConverter &lowering) { 3133 mlir::Value a = opnds[0]; 3134 mlir::Value b = opnds[1]; 3135 auto loc = sumop.getLoc(); 3136 auto ctx = sumop.getContext(); 3137 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3138 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3139 mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType())); 3140 mlir::Type ty = lowering.convertType(sumop.getType()); 3141 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3142 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3143 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3144 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3145 auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1); 3146 auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1); 3147 auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3148 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0); 3149 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1); 3150 } 3151 } // namespace 3152 3153 namespace { 3154 struct AddcOpConversion : public FIROpConversion<fir::AddcOp> { 3155 using FIROpConversion::FIROpConversion; 3156 3157 mlir::LogicalResult 3158 matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor, 3159 mlir::ConversionPatternRewriter &rewriter) const override { 3160 // given: (x + iy) + (x' + iy') 3161 // result: (x + x') + i(y + y') 3162 auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(), 3163 rewriter, lowerTy()); 3164 rewriter.replaceOp(addc, r.getResult()); 3165 return mlir::success(); 3166 } 3167 }; 3168 3169 struct SubcOpConversion : public FIROpConversion<fir::SubcOp> { 3170 using FIROpConversion::FIROpConversion; 3171 3172 mlir::LogicalResult 3173 matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor, 3174 mlir::ConversionPatternRewriter &rewriter) const override { 3175 // given: (x + iy) - (x' + iy') 3176 // result: (x - x') + i(y - y') 3177 auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(), 3178 rewriter, lowerTy()); 3179 rewriter.replaceOp(subc, r.getResult()); 3180 return mlir::success(); 3181 } 3182 }; 3183 3184 /// Inlined complex multiply 3185 struct MulcOpConversion : public FIROpConversion<fir::MulcOp> { 3186 using FIROpConversion::FIROpConversion; 3187 3188 mlir::LogicalResult 3189 matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor, 3190 mlir::ConversionPatternRewriter &rewriter) const override { 3191 // TODO: Can we use a call to __muldc3 ? 3192 // given: (x + iy) * (x' + iy') 3193 // result: (xx'-yy')+i(xy'+yx') 3194 mlir::Value a = adaptor.getOperands()[0]; 3195 mlir::Value b = adaptor.getOperands()[1]; 3196 auto loc = mulc.getLoc(); 3197 auto *ctx = mulc.getContext(); 3198 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3199 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3200 mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType())); 3201 mlir::Type ty = convertType(mulc.getType()); 3202 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3203 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3204 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3205 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3206 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3207 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3208 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3209 auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx); 3210 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3211 auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy); 3212 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3213 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3214 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3215 rewriter.replaceOp(mulc, r0.getResult()); 3216 return mlir::success(); 3217 } 3218 }; 3219 3220 /// Inlined complex division 3221 struct DivcOpConversion : public FIROpConversion<fir::DivcOp> { 3222 using FIROpConversion::FIROpConversion; 3223 3224 mlir::LogicalResult 3225 matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor, 3226 mlir::ConversionPatternRewriter &rewriter) const override { 3227 // TODO: Can we use a call to __divdc3 instead? 3228 // Just generate inline code for now. 3229 // given: (x + iy) / (x' + iy') 3230 // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y' 3231 mlir::Value a = adaptor.getOperands()[0]; 3232 mlir::Value b = adaptor.getOperands()[1]; 3233 auto loc = divc.getLoc(); 3234 auto *ctx = divc.getContext(); 3235 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3236 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3237 mlir::Type eleTy = convertType(getComplexEleTy(divc.getType())); 3238 mlir::Type ty = convertType(divc.getType()); 3239 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3240 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3241 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3242 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3243 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3244 auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1); 3245 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3246 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3247 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3248 auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1); 3249 auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1); 3250 auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy); 3251 auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy); 3252 auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d); 3253 auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d); 3254 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3255 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3256 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3257 rewriter.replaceOp(divc, r0.getResult()); 3258 return mlir::success(); 3259 } 3260 }; 3261 3262 /// Inlined complex negation 3263 struct NegcOpConversion : public FIROpConversion<fir::NegcOp> { 3264 using FIROpConversion::FIROpConversion; 3265 3266 mlir::LogicalResult 3267 matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor, 3268 mlir::ConversionPatternRewriter &rewriter) const override { 3269 // given: -(x + iy) 3270 // result: -x - iy 3271 auto *ctxt = neg.getContext(); 3272 auto eleTy = convertType(getComplexEleTy(neg.getType())); 3273 auto ty = convertType(neg.getType()); 3274 auto loc = neg.getLoc(); 3275 mlir::Value o0 = adaptor.getOperands()[0]; 3276 auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 3277 auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 3278 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0); 3279 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1); 3280 auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp); 3281 auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip); 3282 auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0); 3283 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1); 3284 return mlir::success(); 3285 } 3286 }; 3287 3288 /// Conversion pattern for operation that must be dead. The information in these 3289 /// operations is used by other operation. At this point they should not have 3290 /// anymore uses. 3291 /// These operations are normally dead after the pre-codegen pass. 3292 template <typename FromOp> 3293 struct MustBeDeadConversion : public FIROpConversion<FromOp> { 3294 explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering, 3295 const fir::FIRToLLVMPassOptions &options) 3296 : FIROpConversion<FromOp>(lowering, options) {} 3297 using OpAdaptor = typename FromOp::Adaptor; 3298 3299 mlir::LogicalResult 3300 matchAndRewrite(FromOp op, OpAdaptor adaptor, 3301 mlir::ConversionPatternRewriter &rewriter) const final { 3302 if (!op->getUses().empty()) 3303 return rewriter.notifyMatchFailure(op, "op must be dead"); 3304 rewriter.eraseOp(op); 3305 return mlir::success(); 3306 } 3307 }; 3308 3309 struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> { 3310 using MustBeDeadConversion::MustBeDeadConversion; 3311 }; 3312 3313 struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> { 3314 using MustBeDeadConversion::MustBeDeadConversion; 3315 }; 3316 3317 struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> { 3318 using MustBeDeadConversion::MustBeDeadConversion; 3319 }; 3320 3321 struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> { 3322 using MustBeDeadConversion::MustBeDeadConversion; 3323 }; 3324 3325 } // namespace 3326 3327 namespace { 3328 /// Convert FIR dialect to LLVM dialect 3329 /// 3330 /// This pass lowers all FIR dialect operations to LLVM IR dialect. An 3331 /// MLIR pass is used to lower residual Std dialect to LLVM IR dialect. 3332 class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> { 3333 public: 3334 FIRToLLVMLowering() = default; 3335 FIRToLLVMLowering(fir::FIRToLLVMPassOptions options) : options{options} {} 3336 mlir::ModuleOp getModule() { return getOperation(); } 3337 3338 void runOnOperation() override final { 3339 auto mod = getModule(); 3340 if (!forcedTargetTriple.empty()) 3341 fir::setTargetTriple(mod, forcedTargetTriple); 3342 3343 auto *context = getModule().getContext(); 3344 fir::LLVMTypeConverter typeConverter{getModule()}; 3345 mlir::RewritePatternSet pattern(context); 3346 pattern.insert< 3347 AbsentOpConversion, AddcOpConversion, AddrOfOpConversion, 3348 AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion, 3349 BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion, 3350 BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion, 3351 BoxProcHostOpConversion, BoxRankOpConversion, BoxTypeDescOpConversion, 3352 CallOpConversion, CmpcOpConversion, ConstcOpConversion, 3353 ConvertOpConversion, CoordinateOpConversion, DispatchOpConversion, 3354 DispatchTableOpConversion, DTEntryOpConversion, DivcOpConversion, 3355 EmboxOpConversion, EmboxCharOpConversion, EmboxProcOpConversion, 3356 ExtractValueOpConversion, FieldIndexOpConversion, FirEndOpConversion, 3357 FreeMemOpConversion, GenTypeDescOpConversion, GlobalLenOpConversion, 3358 GlobalOpConversion, HasValueOpConversion, InsertOnRangeOpConversion, 3359 InsertValueOpConversion, IsPresentOpConversion, 3360 LenParamIndexOpConversion, LoadOpConversion, MulcOpConversion, 3361 NegcOpConversion, NoReassocOpConversion, SelectCaseOpConversion, 3362 SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion, 3363 ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion, 3364 SliceOpConversion, StoreOpConversion, StringLitOpConversion, 3365 SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion, 3366 UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion, 3367 XEmboxOpConversion, XReboxOpConversion, ZeroOpConversion>(typeConverter, 3368 options); 3369 mlir::populateFuncToLLVMConversionPatterns(typeConverter, pattern); 3370 mlir::populateOpenMPToLLVMConversionPatterns(typeConverter, pattern); 3371 mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter, 3372 pattern); 3373 mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter, 3374 pattern); 3375 // Convert math-like dialect operations, which can be produced 3376 // when late math lowering mode is used, into llvm dialect. 3377 mlir::populateMathToLLVMConversionPatterns(typeConverter, pattern); 3378 mlir::populateMathToLibmConversionPatterns(pattern, /*benefit=*/0); 3379 mlir::ConversionTarget target{*context}; 3380 target.addLegalDialect<mlir::LLVM::LLVMDialect>(); 3381 // The OpenMP dialect is legal for Operations without regions, for those 3382 // which contains regions it is legal if the region contains only the 3383 // LLVM dialect. Add OpenMP dialect as a legal dialect for conversion and 3384 // legalize conversion of OpenMP operations without regions. 3385 mlir::configureOpenMPToLLVMConversionLegality(target, typeConverter); 3386 target.addLegalDialect<mlir::omp::OpenMPDialect>(); 3387 3388 // required NOPs for applying a full conversion 3389 target.addLegalOp<mlir::ModuleOp>(); 3390 3391 // apply the patterns 3392 if (mlir::failed(mlir::applyFullConversion(getModule(), target, 3393 std::move(pattern)))) { 3394 signalPassFailure(); 3395 } 3396 } 3397 3398 private: 3399 fir::FIRToLLVMPassOptions options; 3400 }; 3401 3402 /// Lower from LLVM IR dialect to proper LLVM-IR and dump the module 3403 struct LLVMIRLoweringPass 3404 : public mlir::PassWrapper<LLVMIRLoweringPass, 3405 mlir::OperationPass<mlir::ModuleOp>> { 3406 MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(LLVMIRLoweringPass) 3407 3408 LLVMIRLoweringPass(llvm::raw_ostream &output, fir::LLVMIRLoweringPrinter p) 3409 : output{output}, printer{p} {} 3410 3411 mlir::ModuleOp getModule() { return getOperation(); } 3412 3413 void runOnOperation() override final { 3414 auto *ctx = getModule().getContext(); 3415 auto optName = getModule().getName(); 3416 llvm::LLVMContext llvmCtx; 3417 if (auto llvmModule = mlir::translateModuleToLLVMIR( 3418 getModule(), llvmCtx, optName ? *optName : "FIRModule")) { 3419 printer(*llvmModule, output); 3420 return; 3421 } 3422 3423 mlir::emitError(mlir::UnknownLoc::get(ctx), "could not emit LLVM-IR\n"); 3424 signalPassFailure(); 3425 } 3426 3427 private: 3428 llvm::raw_ostream &output; 3429 fir::LLVMIRLoweringPrinter printer; 3430 }; 3431 3432 } // namespace 3433 3434 std::unique_ptr<mlir::Pass> fir::createFIRToLLVMPass() { 3435 return std::make_unique<FIRToLLVMLowering>(); 3436 } 3437 3438 std::unique_ptr<mlir::Pass> 3439 fir::createFIRToLLVMPass(fir::FIRToLLVMPassOptions options) { 3440 return std::make_unique<FIRToLLVMLowering>(options); 3441 } 3442 3443 std::unique_ptr<mlir::Pass> 3444 fir::createLLVMDialectToLLVMPass(llvm::raw_ostream &output, 3445 fir::LLVMIRLoweringPrinter printer) { 3446 return std::make_unique<LLVMIRLoweringPass>(output, printer); 3447 } 3448