1 //===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/ 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "flang/Optimizer/CodeGen/CodeGen.h" 14 #include "CGOps.h" 15 #include "PassDetail.h" 16 #include "flang/ISO_Fortran_binding.h" 17 #include "flang/Optimizer/Dialect/FIRAttr.h" 18 #include "flang/Optimizer/Dialect/FIROps.h" 19 #include "flang/Optimizer/Support/InternalNames.h" 20 #include "flang/Optimizer/Support/TypeCode.h" 21 #include "flang/Semantics/runtime-type-info.h" 22 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" 23 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" 24 #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" 25 #include "mlir/Conversion/LLVMCommon/Pattern.h" 26 #include "mlir/Conversion/MathToLLVM/MathToLLVM.h" 27 #include "mlir/Conversion/MathToLibm/MathToLibm.h" 28 #include "mlir/Conversion/OpenMPToLLVM/ConvertOpenMPToLLVM.h" 29 #include "mlir/IR/BuiltinTypes.h" 30 #include "mlir/IR/Matchers.h" 31 #include "mlir/Pass/Pass.h" 32 #include "mlir/Target/LLVMIR/ModuleTranslation.h" 33 #include "llvm/ADT/ArrayRef.h" 34 35 #define DEBUG_TYPE "flang-codegen" 36 37 // fir::LLVMTypeConverter for converting to LLVM IR dialect types. 38 #include "TypeConverter.h" 39 40 // TODO: This should really be recovered from the specified target. 41 static constexpr unsigned defaultAlign = 8; 42 43 /// `fir.box` attribute values as defined for CFI_attribute_t in 44 /// flang/ISO_Fortran_binding.h. 45 static constexpr unsigned kAttrPointer = CFI_attribute_pointer; 46 static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable; 47 48 static inline mlir::Type getVoidPtrType(mlir::MLIRContext *context) { 49 return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8)); 50 } 51 52 static mlir::LLVM::ConstantOp 53 genConstantIndex(mlir::Location loc, mlir::Type ity, 54 mlir::ConversionPatternRewriter &rewriter, 55 std::int64_t offset) { 56 auto cattr = rewriter.getI64IntegerAttr(offset); 57 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 58 } 59 60 static mlir::Block *createBlock(mlir::ConversionPatternRewriter &rewriter, 61 mlir::Block *insertBefore) { 62 assert(insertBefore && "expected valid insertion block"); 63 return rewriter.createBlock(insertBefore->getParent(), 64 mlir::Region::iterator(insertBefore)); 65 } 66 67 namespace { 68 /// FIR conversion pattern template 69 template <typename FromOp> 70 class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> { 71 public: 72 explicit FIROpConversion(fir::LLVMTypeConverter &lowering, 73 const fir::FIRToLLVMPassOptions &options) 74 : mlir::ConvertOpToLLVMPattern<FromOp>(lowering), options(options) {} 75 76 protected: 77 mlir::Type convertType(mlir::Type ty) const { 78 return lowerTy().convertType(ty); 79 } 80 mlir::Type voidPtrTy() const { return getVoidPtrType(); } 81 82 mlir::Type getVoidPtrType() const { 83 return mlir::LLVM::LLVMPointerType::get( 84 mlir::IntegerType::get(&lowerTy().getContext(), 8)); 85 } 86 87 mlir::LLVM::ConstantOp 88 genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 89 int value) const { 90 mlir::Type i32Ty = rewriter.getI32Type(); 91 mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value); 92 return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr); 93 } 94 95 mlir::LLVM::ConstantOp 96 genConstantOffset(mlir::Location loc, 97 mlir::ConversionPatternRewriter &rewriter, 98 int offset) const { 99 mlir::Type ity = lowerTy().offsetType(); 100 mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset); 101 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 102 } 103 104 /// Perform an extension or truncation as needed on an integer value. Lowering 105 /// to the specific target may involve some sign-extending or truncation of 106 /// values, particularly to fit them from abstract box types to the 107 /// appropriate reified structures. 108 mlir::Value integerCast(mlir::Location loc, 109 mlir::ConversionPatternRewriter &rewriter, 110 mlir::Type ty, mlir::Value val) const { 111 auto valTy = val.getType(); 112 // If the value was not yet lowered, lower its type so that it can 113 // be used in getPrimitiveTypeSizeInBits. 114 if (!valTy.isa<mlir::IntegerType>()) 115 valTy = convertType(valTy); 116 auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 117 auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy); 118 if (toSize < fromSize) 119 return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val); 120 if (toSize > fromSize) 121 return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val); 122 return val; 123 } 124 125 /// Construct code sequence to extract the specifc value from a `fir.box`. 126 mlir::Value getValueFromBox(mlir::Location loc, mlir::Value box, 127 mlir::Type resultTy, 128 mlir::ConversionPatternRewriter &rewriter, 129 unsigned boxValue) const { 130 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 131 mlir::LLVM::ConstantOp cValuePos = 132 genConstantOffset(loc, rewriter, boxValue); 133 auto pty = mlir::LLVM::LLVMPointerType::get(resultTy); 134 auto p = rewriter.create<mlir::LLVM::GEPOp>( 135 loc, pty, box, mlir::ValueRange{c0, cValuePos}); 136 return rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p); 137 } 138 139 /// Method to construct code sequence to get the triple for dimension `dim` 140 /// from a box. 141 llvm::SmallVector<mlir::Value, 3> 142 getDimsFromBox(mlir::Location loc, llvm::ArrayRef<mlir::Type> retTys, 143 mlir::Value box, mlir::Value dim, 144 mlir::ConversionPatternRewriter &rewriter) const { 145 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 146 mlir::LLVM::ConstantOp cDims = 147 genConstantOffset(loc, rewriter, kDimsPosInBox); 148 mlir::LLVM::LoadOp l0 = 149 loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter); 150 mlir::LLVM::LoadOp l1 = 151 loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter); 152 mlir::LLVM::LoadOp l2 = 153 loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter); 154 return {l0.getResult(), l1.getResult(), l2.getResult()}; 155 } 156 157 mlir::LLVM::LoadOp 158 loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0, 159 mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off, 160 mlir::Type ty, 161 mlir::ConversionPatternRewriter &rewriter) const { 162 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 163 mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off); 164 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c); 165 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 166 } 167 168 mlir::Value 169 loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim, 170 mlir::ConversionPatternRewriter &rewriter) const { 171 auto idxTy = lowerTy().indexType(); 172 auto c0 = genConstantOffset(loc, rewriter, 0); 173 auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox); 174 auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim); 175 return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy, 176 rewriter); 177 } 178 179 /// Read base address from a fir.box. Returned address has type ty. 180 mlir::Value 181 loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 182 mlir::ConversionPatternRewriter &rewriter) const { 183 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 184 mlir::LLVM::ConstantOp cAddr = 185 genConstantOffset(loc, rewriter, kAddrPosInBox); 186 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 187 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr); 188 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 189 } 190 191 mlir::Value 192 loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 193 mlir::ConversionPatternRewriter &rewriter) const { 194 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 195 mlir::LLVM::ConstantOp cElemLen = 196 genConstantOffset(loc, rewriter, kElemLenPosInBox); 197 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 198 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen); 199 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 200 } 201 202 // Get the element type given an LLVM type that is of the form 203 // [llvm.ptr](array|struct|vector)+ and the provided indexes. 204 static mlir::Type getBoxEleTy(mlir::Type type, 205 llvm::ArrayRef<unsigned> indexes) { 206 if (auto t = type.dyn_cast<mlir::LLVM::LLVMPointerType>()) 207 type = t.getElementType(); 208 for (auto i : indexes) { 209 if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) { 210 assert(!t.isOpaque() && i < t.getBody().size()); 211 type = t.getBody()[i]; 212 } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 213 type = t.getElementType(); 214 } else if (auto t = type.dyn_cast<mlir::VectorType>()) { 215 type = t.getElementType(); 216 } else { 217 fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()), 218 "request for invalid box element type"); 219 } 220 } 221 return type; 222 } 223 224 // Return LLVM type of the base address given the LLVM type 225 // of the related descriptor (lowered fir.box type). 226 static mlir::Type getBaseAddrTypeFromBox(mlir::Type type) { 227 return getBoxEleTy(type, {kAddrPosInBox}); 228 } 229 230 // Load the attribute from the \p box and perform a check against \p maskValue 231 // The final comparison is implemented as `(attribute & maskValue) != 0`. 232 mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box, 233 mlir::ConversionPatternRewriter &rewriter, 234 unsigned maskValue) const { 235 mlir::Type attrTy = rewriter.getI32Type(); 236 mlir::Value attribute = 237 getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox); 238 mlir::LLVM::ConstantOp attrMask = 239 genConstantOffset(loc, rewriter, maskValue); 240 auto maskRes = 241 rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask); 242 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 243 return rewriter.create<mlir::LLVM::ICmpOp>( 244 loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0); 245 } 246 247 template <typename... ARGS> 248 mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty, 249 mlir::ConversionPatternRewriter &rewriter, 250 mlir::Value base, ARGS... args) const { 251 llvm::SmallVector<mlir::Value> cv = {args...}; 252 return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv); 253 } 254 255 fir::LLVMTypeConverter &lowerTy() const { 256 return *static_cast<fir::LLVMTypeConverter *>(this->getTypeConverter()); 257 } 258 259 const fir::FIRToLLVMPassOptions &options; 260 }; 261 262 /// FIR conversion pattern template 263 template <typename FromOp> 264 class FIROpAndTypeConversion : public FIROpConversion<FromOp> { 265 public: 266 using FIROpConversion<FromOp>::FIROpConversion; 267 using OpAdaptor = typename FromOp::Adaptor; 268 269 mlir::LogicalResult 270 matchAndRewrite(FromOp op, OpAdaptor adaptor, 271 mlir::ConversionPatternRewriter &rewriter) const final { 272 mlir::Type ty = this->convertType(op.getType()); 273 return doRewrite(op, ty, adaptor, rewriter); 274 } 275 276 virtual mlir::LogicalResult 277 doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor, 278 mlir::ConversionPatternRewriter &rewriter) const = 0; 279 }; 280 } // namespace 281 282 namespace { 283 /// Lower `fir.address_of` operation to `llvm.address_of` operation. 284 struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> { 285 using FIROpConversion::FIROpConversion; 286 287 mlir::LogicalResult 288 matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor, 289 mlir::ConversionPatternRewriter &rewriter) const override { 290 auto ty = convertType(addr.getType()); 291 rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>( 292 addr, ty, addr.getSymbol().getRootReference().getValue()); 293 return mlir::success(); 294 } 295 }; 296 } // namespace 297 298 /// Lookup the function to compute the memory size of this parametric derived 299 /// type. The size of the object may depend on the LEN type parameters of the 300 /// derived type. 301 static mlir::LLVM::LLVMFuncOp 302 getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op, 303 mlir::ConversionPatternRewriter &rewriter) { 304 auto module = op->getParentOfType<mlir::ModuleOp>(); 305 std::string name = recTy.getName().str() + "P.mem.size"; 306 if (auto memSizeFunc = module.lookupSymbol<mlir::LLVM::LLVMFuncOp>(name)) 307 return memSizeFunc; 308 TODO(op.getLoc(), "did not find allocation function"); 309 } 310 311 // Compute the alloc scale size (constant factors encoded in the array type). 312 // We do this for arrays without a constant interior or arrays of character with 313 // dynamic length arrays, since those are the only ones that get decayed to a 314 // pointer to the element type. 315 template <typename OP> 316 static mlir::Value 317 genAllocationScaleSize(OP op, mlir::Type ity, 318 mlir::ConversionPatternRewriter &rewriter) { 319 mlir::Location loc = op.getLoc(); 320 mlir::Type dataTy = op.getInType(); 321 mlir::Type scalarType = fir::unwrapSequenceType(dataTy); 322 auto seqTy = dataTy.dyn_cast<fir::SequenceType>(); 323 if ((op.hasShapeOperands() && seqTy && !seqTy.hasConstantInterior()) || 324 (seqTy && fir::characterWithDynamicLen(scalarType))) { 325 fir::SequenceType::Extent constSize = 1; 326 for (auto extent : seqTy.getShape()) 327 if (extent != fir::SequenceType::getUnknownExtent()) 328 constSize *= extent; 329 if (constSize != 1) { 330 mlir::Value constVal{ 331 genConstantIndex(loc, ity, rewriter, constSize).getResult()}; 332 return constVal; 333 } 334 } 335 return nullptr; 336 } 337 338 namespace { 339 /// convert to LLVM IR dialect `alloca` 340 struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> { 341 using FIROpConversion::FIROpConversion; 342 343 mlir::LogicalResult 344 matchAndRewrite(fir::AllocaOp alloc, OpAdaptor adaptor, 345 mlir::ConversionPatternRewriter &rewriter) const override { 346 mlir::ValueRange operands = adaptor.getOperands(); 347 auto loc = alloc.getLoc(); 348 mlir::Type ity = lowerTy().indexType(); 349 unsigned i = 0; 350 mlir::Value size = genConstantIndex(loc, ity, rewriter, 1).getResult(); 351 mlir::Type ty = convertType(alloc.getType()); 352 mlir::Type resultTy = ty; 353 if (alloc.hasLenParams()) { 354 unsigned end = alloc.numLenParams(); 355 llvm::SmallVector<mlir::Value> lenParams; 356 for (; i < end; ++i) 357 lenParams.push_back(operands[i]); 358 mlir::Type scalarType = fir::unwrapSequenceType(alloc.getInType()); 359 if (auto chrTy = scalarType.dyn_cast<fir::CharacterType>()) { 360 fir::CharacterType rawCharTy = fir::CharacterType::getUnknownLen( 361 chrTy.getContext(), chrTy.getFKind()); 362 ty = mlir::LLVM::LLVMPointerType::get(convertType(rawCharTy)); 363 assert(end == 1); 364 size = integerCast(loc, rewriter, ity, lenParams[0]); 365 } else if (auto recTy = scalarType.dyn_cast<fir::RecordType>()) { 366 mlir::LLVM::LLVMFuncOp memSizeFn = 367 getDependentTypeMemSizeFn(recTy, alloc, rewriter); 368 if (!memSizeFn) 369 emitError(loc, "did not find allocation function"); 370 mlir::NamedAttribute attr = rewriter.getNamedAttr( 371 "callee", mlir::SymbolRefAttr::get(memSizeFn)); 372 auto call = rewriter.create<mlir::LLVM::CallOp>( 373 loc, ity, lenParams, llvm::ArrayRef<mlir::NamedAttribute>{attr}); 374 size = call.getResult(0); 375 ty = ::getVoidPtrType(alloc.getContext()); 376 } else { 377 return emitError(loc, "unexpected type ") 378 << scalarType << " with type parameters"; 379 } 380 } 381 if (auto scaleSize = genAllocationScaleSize(alloc, ity, rewriter)) 382 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize); 383 if (alloc.hasShapeOperands()) { 384 unsigned end = operands.size(); 385 for (; i < end; ++i) 386 size = rewriter.create<mlir::LLVM::MulOp>( 387 loc, ity, size, integerCast(loc, rewriter, ity, operands[i])); 388 } 389 if (ty == resultTy) { 390 // Do not emit the bitcast if ty and resultTy are the same. 391 rewriter.replaceOpWithNewOp<mlir::LLVM::AllocaOp>(alloc, ty, size, 392 alloc->getAttrs()); 393 } else { 394 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, ty, size, 395 alloc->getAttrs()); 396 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(alloc, resultTy, al); 397 } 398 return mlir::success(); 399 } 400 }; 401 } // namespace 402 403 /// Construct an `llvm.extractvalue` instruction. It will return value at 404 /// element \p x from \p tuple. 405 static mlir::LLVM::ExtractValueOp 406 genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty, 407 mlir::ConversionPatternRewriter &rewriter, 408 mlir::MLIRContext *ctx, int x) { 409 auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x)); 410 auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x]; 411 return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx); 412 } 413 414 namespace { 415 /// Lower `fir.box_addr` to the sequence of operations to extract the first 416 /// element of the box. 417 struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> { 418 using FIROpConversion::FIROpConversion; 419 420 mlir::LogicalResult 421 matchAndRewrite(fir::BoxAddrOp boxaddr, OpAdaptor adaptor, 422 mlir::ConversionPatternRewriter &rewriter) const override { 423 mlir::Value a = adaptor.getOperands()[0]; 424 auto loc = boxaddr.getLoc(); 425 mlir::Type ty = convertType(boxaddr.getType()); 426 if (auto argty = boxaddr.getVal().getType().dyn_cast<fir::BoxType>()) { 427 rewriter.replaceOp(boxaddr, loadBaseAddrFromBox(loc, ty, a, rewriter)); 428 } else { 429 auto c0attr = rewriter.getI32IntegerAttr(0); 430 auto c0 = mlir::ArrayAttr::get(boxaddr.getContext(), c0attr); 431 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(boxaddr, ty, a, 432 c0); 433 } 434 return mlir::success(); 435 } 436 }; 437 438 /// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the 439 /// boxchar. 440 struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> { 441 using FIROpConversion::FIROpConversion; 442 443 mlir::LogicalResult 444 matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor, 445 mlir::ConversionPatternRewriter &rewriter) const override { 446 mlir::Value boxChar = adaptor.getOperands()[0]; 447 mlir::Location loc = boxChar.getLoc(); 448 mlir::MLIRContext *ctx = boxChar.getContext(); 449 mlir::Type returnValTy = boxCharLen.getResult().getType(); 450 451 constexpr int boxcharLenIdx = 1; 452 mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex( 453 loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx); 454 mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len); 455 rewriter.replaceOp(boxCharLen, lenAfterCast); 456 457 return mlir::success(); 458 } 459 }; 460 461 /// Lower `fir.box_dims` to a sequence of operations to extract the requested 462 /// dimension infomartion from the boxed value. 463 /// Result in a triple set of GEPs and loads. 464 struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> { 465 using FIROpConversion::FIROpConversion; 466 467 mlir::LogicalResult 468 matchAndRewrite(fir::BoxDimsOp boxdims, OpAdaptor adaptor, 469 mlir::ConversionPatternRewriter &rewriter) const override { 470 llvm::SmallVector<mlir::Type, 3> resultTypes = { 471 convertType(boxdims.getResult(0).getType()), 472 convertType(boxdims.getResult(1).getType()), 473 convertType(boxdims.getResult(2).getType()), 474 }; 475 auto results = 476 getDimsFromBox(boxdims.getLoc(), resultTypes, adaptor.getOperands()[0], 477 adaptor.getOperands()[1], rewriter); 478 rewriter.replaceOp(boxdims, results); 479 return mlir::success(); 480 } 481 }; 482 483 /// Lower `fir.box_elesize` to a sequence of operations ro extract the size of 484 /// an element in the boxed value. 485 struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> { 486 using FIROpConversion::FIROpConversion; 487 488 mlir::LogicalResult 489 matchAndRewrite(fir::BoxEleSizeOp boxelesz, OpAdaptor adaptor, 490 mlir::ConversionPatternRewriter &rewriter) const override { 491 mlir::Value a = adaptor.getOperands()[0]; 492 auto loc = boxelesz.getLoc(); 493 auto ty = convertType(boxelesz.getType()); 494 auto elemSize = getValueFromBox(loc, a, ty, rewriter, kElemLenPosInBox); 495 rewriter.replaceOp(boxelesz, elemSize); 496 return mlir::success(); 497 } 498 }; 499 500 /// Lower `fir.box_isalloc` to a sequence of operations to determine if the 501 /// boxed value was from an ALLOCATABLE entity. 502 struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> { 503 using FIROpConversion::FIROpConversion; 504 505 mlir::LogicalResult 506 matchAndRewrite(fir::BoxIsAllocOp boxisalloc, OpAdaptor adaptor, 507 mlir::ConversionPatternRewriter &rewriter) const override { 508 mlir::Value box = adaptor.getOperands()[0]; 509 auto loc = boxisalloc.getLoc(); 510 mlir::Value check = 511 genBoxAttributeCheck(loc, box, rewriter, kAttrAllocatable); 512 rewriter.replaceOp(boxisalloc, check); 513 return mlir::success(); 514 } 515 }; 516 517 /// Lower `fir.box_isarray` to a sequence of operations to determine if the 518 /// boxed is an array. 519 struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> { 520 using FIROpConversion::FIROpConversion; 521 522 mlir::LogicalResult 523 matchAndRewrite(fir::BoxIsArrayOp boxisarray, OpAdaptor adaptor, 524 mlir::ConversionPatternRewriter &rewriter) const override { 525 mlir::Value a = adaptor.getOperands()[0]; 526 auto loc = boxisarray.getLoc(); 527 auto rank = 528 getValueFromBox(loc, a, rewriter.getI32Type(), rewriter, kRankPosInBox); 529 auto c0 = genConstantOffset(loc, rewriter, 0); 530 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 531 boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0); 532 return mlir::success(); 533 } 534 }; 535 536 /// Lower `fir.box_isptr` to a sequence of operations to determined if the 537 /// boxed value was from a POINTER entity. 538 struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> { 539 using FIROpConversion::FIROpConversion; 540 541 mlir::LogicalResult 542 matchAndRewrite(fir::BoxIsPtrOp boxisptr, OpAdaptor adaptor, 543 mlir::ConversionPatternRewriter &rewriter) const override { 544 mlir::Value box = adaptor.getOperands()[0]; 545 auto loc = boxisptr.getLoc(); 546 mlir::Value check = genBoxAttributeCheck(loc, box, rewriter, kAttrPointer); 547 rewriter.replaceOp(boxisptr, check); 548 return mlir::success(); 549 } 550 }; 551 552 /// Lower `fir.box_rank` to the sequence of operation to extract the rank from 553 /// the box. 554 struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> { 555 using FIROpConversion::FIROpConversion; 556 557 mlir::LogicalResult 558 matchAndRewrite(fir::BoxRankOp boxrank, OpAdaptor adaptor, 559 mlir::ConversionPatternRewriter &rewriter) const override { 560 mlir::Value a = adaptor.getOperands()[0]; 561 auto loc = boxrank.getLoc(); 562 mlir::Type ty = convertType(boxrank.getType()); 563 auto result = getValueFromBox(loc, a, ty, rewriter, kRankPosInBox); 564 rewriter.replaceOp(boxrank, result); 565 return mlir::success(); 566 } 567 }; 568 569 /// Lower `fir.boxproc_host` operation. Extracts the host pointer from the 570 /// boxproc. 571 /// TODO: Part of supporting Fortran 2003 procedure pointers. 572 struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> { 573 using FIROpConversion::FIROpConversion; 574 575 mlir::LogicalResult 576 matchAndRewrite(fir::BoxProcHostOp boxprochost, OpAdaptor adaptor, 577 mlir::ConversionPatternRewriter &rewriter) const override { 578 TODO(boxprochost.getLoc(), "fir.boxproc_host codegen"); 579 return mlir::failure(); 580 } 581 }; 582 583 /// Lower `fir.box_tdesc` to the sequence of operations to extract the type 584 /// descriptor from the box. 585 struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> { 586 using FIROpConversion::FIROpConversion; 587 588 mlir::LogicalResult 589 matchAndRewrite(fir::BoxTypeDescOp boxtypedesc, OpAdaptor adaptor, 590 mlir::ConversionPatternRewriter &rewriter) const override { 591 mlir::Value box = adaptor.getOperands()[0]; 592 auto loc = boxtypedesc.getLoc(); 593 mlir::Type typeTy = 594 fir::getDescFieldTypeModel<kTypePosInBox>()(boxtypedesc.getContext()); 595 auto result = getValueFromBox(loc, box, typeTy, rewriter, kTypePosInBox); 596 auto typePtrTy = mlir::LLVM::LLVMPointerType::get(typeTy); 597 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(boxtypedesc, typePtrTy, 598 result); 599 return mlir::success(); 600 } 601 }; 602 603 /// Lower `fir.string_lit` to LLVM IR dialect operation. 604 struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> { 605 using FIROpConversion::FIROpConversion; 606 607 mlir::LogicalResult 608 matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor, 609 mlir::ConversionPatternRewriter &rewriter) const override { 610 auto ty = convertType(constop.getType()); 611 auto attr = constop.getValue(); 612 if (attr.isa<mlir::StringAttr>()) { 613 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr); 614 return mlir::success(); 615 } 616 617 auto charTy = constop.getType().cast<fir::CharacterType>(); 618 unsigned bits = lowerTy().characterBitsize(charTy); 619 mlir::Type intTy = rewriter.getIntegerType(bits); 620 mlir::Location loc = constop.getLoc(); 621 mlir::Value cst = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 622 if (auto arr = attr.dyn_cast<mlir::DenseElementsAttr>()) { 623 cst = rewriter.create<mlir::LLVM::ConstantOp>(loc, ty, arr); 624 } else if (auto arr = attr.dyn_cast<mlir::ArrayAttr>()) { 625 for (auto a : llvm::enumerate(arr.getValue())) { 626 // convert each character to a precise bitsize 627 auto elemAttr = mlir::IntegerAttr::get( 628 intTy, 629 a.value().cast<mlir::IntegerAttr>().getValue().zextOrTrunc(bits)); 630 auto elemCst = 631 rewriter.create<mlir::LLVM::ConstantOp>(loc, intTy, elemAttr); 632 auto index = mlir::ArrayAttr::get( 633 constop.getContext(), rewriter.getI32IntegerAttr(a.index())); 634 cst = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, cst, elemCst, 635 index); 636 } 637 } else { 638 return mlir::failure(); 639 } 640 rewriter.replaceOp(constop, cst); 641 return mlir::success(); 642 } 643 }; 644 645 /// `fir.call` -> `llvm.call` 646 struct CallOpConversion : public FIROpConversion<fir::CallOp> { 647 using FIROpConversion::FIROpConversion; 648 649 mlir::LogicalResult 650 matchAndRewrite(fir::CallOp call, OpAdaptor adaptor, 651 mlir::ConversionPatternRewriter &rewriter) const override { 652 llvm::SmallVector<mlir::Type> resultTys; 653 for (auto r : call.getResults()) 654 resultTys.push_back(convertType(r.getType())); 655 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 656 call, resultTys, adaptor.getOperands(), call->getAttrs()); 657 return mlir::success(); 658 } 659 }; 660 } // namespace 661 662 static mlir::Type getComplexEleTy(mlir::Type complex) { 663 if (auto cc = complex.dyn_cast<mlir::ComplexType>()) 664 return cc.getElementType(); 665 return complex.cast<fir::ComplexType>().getElementType(); 666 } 667 668 namespace { 669 /// Compare complex values 670 /// 671 /// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une). 672 /// 673 /// For completeness, all other comparison are done on the real component only. 674 struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> { 675 using FIROpConversion::FIROpConversion; 676 677 mlir::LogicalResult 678 matchAndRewrite(fir::CmpcOp cmp, OpAdaptor adaptor, 679 mlir::ConversionPatternRewriter &rewriter) const override { 680 mlir::ValueRange operands = adaptor.getOperands(); 681 mlir::MLIRContext *ctxt = cmp.getContext(); 682 mlir::Type eleTy = convertType(getComplexEleTy(cmp.getLhs().getType())); 683 mlir::Type resTy = convertType(cmp.getType()); 684 mlir::Location loc = cmp.getLoc(); 685 auto pos0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 686 llvm::SmallVector<mlir::Value, 2> rp = { 687 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 688 pos0), 689 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 690 pos0)}; 691 auto rcp = 692 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, rp, cmp->getAttrs()); 693 auto pos1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 694 llvm::SmallVector<mlir::Value, 2> ip = { 695 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 696 pos1), 697 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 698 pos1)}; 699 auto icp = 700 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, ip, cmp->getAttrs()); 701 llvm::SmallVector<mlir::Value, 2> cp = {rcp, icp}; 702 switch (cmp.getPredicate()) { 703 case mlir::arith::CmpFPredicate::OEQ: // .EQ. 704 rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(cmp, resTy, cp); 705 break; 706 case mlir::arith::CmpFPredicate::UNE: // .NE. 707 rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(cmp, resTy, cp); 708 break; 709 default: 710 rewriter.replaceOp(cmp, rcp.getResult()); 711 break; 712 } 713 return mlir::success(); 714 } 715 }; 716 717 /// Lower complex constants 718 struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> { 719 using FIROpConversion::FIROpConversion; 720 721 mlir::LogicalResult 722 matchAndRewrite(fir::ConstcOp conc, OpAdaptor, 723 mlir::ConversionPatternRewriter &rewriter) const override { 724 mlir::Location loc = conc.getLoc(); 725 mlir::MLIRContext *ctx = conc.getContext(); 726 mlir::Type ty = convertType(conc.getType()); 727 mlir::Type ety = convertType(getComplexEleTy(conc.getType())); 728 auto realFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getReal())); 729 auto realPart = 730 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, realFloatAttr); 731 auto imFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getImaginary())); 732 auto imPart = 733 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, imFloatAttr); 734 auto realIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 735 auto imIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 736 auto undef = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 737 auto setReal = rewriter.create<mlir::LLVM::InsertValueOp>( 738 loc, ty, undef, realPart, realIndex); 739 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(conc, ty, setReal, 740 imPart, imIndex); 741 return mlir::success(); 742 } 743 744 inline llvm::APFloat getValue(mlir::Attribute attr) const { 745 return attr.cast<fir::RealAttr>().getValue(); 746 } 747 }; 748 749 /// convert value of from-type to value of to-type 750 struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> { 751 using FIROpConversion::FIROpConversion; 752 753 static bool isFloatingPointTy(mlir::Type ty) { 754 return ty.isa<mlir::FloatType>(); 755 } 756 757 mlir::LogicalResult 758 matchAndRewrite(fir::ConvertOp convert, OpAdaptor adaptor, 759 mlir::ConversionPatternRewriter &rewriter) const override { 760 auto fromFirTy = convert.getValue().getType(); 761 auto toFirTy = convert.getRes().getType(); 762 auto fromTy = convertType(fromFirTy); 763 auto toTy = convertType(toFirTy); 764 mlir::Value op0 = adaptor.getOperands()[0]; 765 if (fromTy == toTy) { 766 rewriter.replaceOp(convert, op0); 767 return mlir::success(); 768 } 769 auto loc = convert.getLoc(); 770 auto convertFpToFp = [&](mlir::Value val, unsigned fromBits, 771 unsigned toBits, mlir::Type toTy) -> mlir::Value { 772 if (fromBits == toBits) { 773 // TODO: Converting between two floating-point representations with the 774 // same bitwidth is not allowed for now. 775 mlir::emitError(loc, 776 "cannot implicitly convert between two floating-point " 777 "representations of the same bitwidth"); 778 return {}; 779 } 780 if (fromBits > toBits) 781 return rewriter.create<mlir::LLVM::FPTruncOp>(loc, toTy, val); 782 return rewriter.create<mlir::LLVM::FPExtOp>(loc, toTy, val); 783 }; 784 // Complex to complex conversion. 785 if (fir::isa_complex(fromFirTy) && fir::isa_complex(toFirTy)) { 786 // Special case: handle the conversion of a complex such that both the 787 // real and imaginary parts are converted together. 788 auto zero = mlir::ArrayAttr::get(convert.getContext(), 789 rewriter.getI32IntegerAttr(0)); 790 auto one = mlir::ArrayAttr::get(convert.getContext(), 791 rewriter.getI32IntegerAttr(1)); 792 auto ty = convertType(getComplexEleTy(convert.getValue().getType())); 793 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, zero); 794 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, one); 795 auto nt = convertType(getComplexEleTy(convert.getRes().getType())); 796 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 797 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(nt); 798 auto rc = convertFpToFp(rp, fromBits, toBits, nt); 799 auto ic = convertFpToFp(ip, fromBits, toBits, nt); 800 auto un = rewriter.create<mlir::LLVM::UndefOp>(loc, toTy); 801 auto i1 = 802 rewriter.create<mlir::LLVM::InsertValueOp>(loc, toTy, un, rc, zero); 803 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(convert, toTy, i1, 804 ic, one); 805 return mlir::success(); 806 } 807 808 // Follow UNIX F77 convention for logicals: 809 // 1. underlying integer is not zero => logical is .TRUE. 810 // 2. logical is .TRUE. => set underlying integer to 1. 811 auto i1Type = mlir::IntegerType::get(convert.getContext(), 1); 812 if (fromFirTy.isa<fir::LogicalType>() && toFirTy == i1Type) { 813 mlir::Value zero = genConstantIndex(loc, fromTy, rewriter, 0); 814 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 815 convert, mlir::LLVM::ICmpPredicate::ne, op0, zero); 816 return mlir::success(); 817 } 818 if (fromFirTy == i1Type && toFirTy.isa<fir::LogicalType>()) { 819 rewriter.replaceOpWithNewOp<mlir::LLVM::ZExtOp>(convert, toTy, op0); 820 return mlir::success(); 821 } 822 823 // Floating point to floating point conversion. 824 if (isFloatingPointTy(fromTy)) { 825 if (isFloatingPointTy(toTy)) { 826 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 827 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 828 auto v = convertFpToFp(op0, fromBits, toBits, toTy); 829 rewriter.replaceOp(convert, v); 830 return mlir::success(); 831 } 832 if (toTy.isa<mlir::IntegerType>()) { 833 rewriter.replaceOpWithNewOp<mlir::LLVM::FPToSIOp>(convert, toTy, op0); 834 return mlir::success(); 835 } 836 } else if (fromTy.isa<mlir::IntegerType>()) { 837 // Integer to integer conversion. 838 if (toTy.isa<mlir::IntegerType>()) { 839 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 840 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 841 assert(fromBits != toBits); 842 if (fromBits > toBits) { 843 rewriter.replaceOpWithNewOp<mlir::LLVM::TruncOp>(convert, toTy, op0); 844 return mlir::success(); 845 } 846 rewriter.replaceOpWithNewOp<mlir::LLVM::SExtOp>(convert, toTy, op0); 847 return mlir::success(); 848 } 849 // Integer to floating point conversion. 850 if (isFloatingPointTy(toTy)) { 851 rewriter.replaceOpWithNewOp<mlir::LLVM::SIToFPOp>(convert, toTy, op0); 852 return mlir::success(); 853 } 854 // Integer to pointer conversion. 855 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 856 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(convert, toTy, op0); 857 return mlir::success(); 858 } 859 } else if (fromTy.isa<mlir::LLVM::LLVMPointerType>()) { 860 // Pointer to integer conversion. 861 if (toTy.isa<mlir::IntegerType>()) { 862 rewriter.replaceOpWithNewOp<mlir::LLVM::PtrToIntOp>(convert, toTy, op0); 863 return mlir::success(); 864 } 865 // Pointer to pointer conversion. 866 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 867 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(convert, toTy, op0); 868 return mlir::success(); 869 } 870 } 871 return emitError(loc) << "cannot convert " << fromTy << " to " << toTy; 872 } 873 }; 874 875 /// Lower `fir.dispatch` operation. A virtual call to a method in a dispatch 876 /// table. 877 struct DispatchOpConversion : public FIROpConversion<fir::DispatchOp> { 878 using FIROpConversion::FIROpConversion; 879 880 mlir::LogicalResult 881 matchAndRewrite(fir::DispatchOp dispatch, OpAdaptor adaptor, 882 mlir::ConversionPatternRewriter &rewriter) const override { 883 TODO(dispatch.getLoc(), "fir.dispatch codegen"); 884 return mlir::failure(); 885 } 886 }; 887 888 /// Lower `fir.dispatch_table` operation. The dispatch table for a Fortran 889 /// derived type. 890 struct DispatchTableOpConversion 891 : public FIROpConversion<fir::DispatchTableOp> { 892 using FIROpConversion::FIROpConversion; 893 894 mlir::LogicalResult 895 matchAndRewrite(fir::DispatchTableOp dispTab, OpAdaptor adaptor, 896 mlir::ConversionPatternRewriter &rewriter) const override { 897 TODO(dispTab.getLoc(), "fir.dispatch_table codegen"); 898 return mlir::failure(); 899 } 900 }; 901 902 /// Lower `fir.dt_entry` operation. An entry in a dispatch table; binds a 903 /// method-name to a function. 904 struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> { 905 using FIROpConversion::FIROpConversion; 906 907 mlir::LogicalResult 908 matchAndRewrite(fir::DTEntryOp dtEnt, OpAdaptor adaptor, 909 mlir::ConversionPatternRewriter &rewriter) const override { 910 TODO(dtEnt.getLoc(), "fir.dt_entry codegen"); 911 return mlir::failure(); 912 } 913 }; 914 915 /// Lower `fir.global_len` operation. 916 struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> { 917 using FIROpConversion::FIROpConversion; 918 919 mlir::LogicalResult 920 matchAndRewrite(fir::GlobalLenOp globalLen, OpAdaptor adaptor, 921 mlir::ConversionPatternRewriter &rewriter) const override { 922 TODO(globalLen.getLoc(), "fir.global_len codegen"); 923 return mlir::failure(); 924 } 925 }; 926 927 /// Lower fir.len_param_index 928 struct LenParamIndexOpConversion 929 : public FIROpConversion<fir::LenParamIndexOp> { 930 using FIROpConversion::FIROpConversion; 931 932 // FIXME: this should be specialized by the runtime target 933 mlir::LogicalResult 934 matchAndRewrite(fir::LenParamIndexOp lenp, OpAdaptor, 935 mlir::ConversionPatternRewriter &rewriter) const override { 936 TODO(lenp.getLoc(), "fir.len_param_index codegen"); 937 } 938 }; 939 940 /// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of 941 /// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element 942 /// in this struct is a pointer. Its type is determined from `KIND`. The 2nd 943 /// element is the length of the character buffer (`#n`). 944 struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> { 945 using FIROpConversion::FIROpConversion; 946 947 mlir::LogicalResult 948 matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor, 949 mlir::ConversionPatternRewriter &rewriter) const override { 950 mlir::ValueRange operands = adaptor.getOperands(); 951 auto *ctx = emboxChar.getContext(); 952 953 mlir::Value charBuffer = operands[0]; 954 mlir::Value charBufferLen = operands[1]; 955 956 mlir::Location loc = emboxChar.getLoc(); 957 mlir::Type llvmStructTy = convertType(emboxChar.getType()); 958 auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy); 959 960 mlir::Type lenTy = 961 llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1]; 962 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen); 963 964 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 965 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 966 auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>( 967 loc, llvmStructTy, llvmStruct, charBuffer, c0); 968 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 969 emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1); 970 971 return mlir::success(); 972 } 973 }; 974 } // namespace 975 976 /// Return the LLVMFuncOp corresponding to the standard malloc call. 977 static mlir::LLVM::LLVMFuncOp 978 getMalloc(fir::AllocMemOp op, mlir::ConversionPatternRewriter &rewriter) { 979 auto module = op->getParentOfType<mlir::ModuleOp>(); 980 if (mlir::LLVM::LLVMFuncOp mallocFunc = 981 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("malloc")) 982 return mallocFunc; 983 mlir::OpBuilder moduleBuilder( 984 op->getParentOfType<mlir::ModuleOp>().getBodyRegion()); 985 auto indexType = mlir::IntegerType::get(op.getContext(), 64); 986 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 987 rewriter.getUnknownLoc(), "malloc", 988 mlir::LLVM::LLVMFunctionType::get(getVoidPtrType(op.getContext()), 989 indexType, 990 /*isVarArg=*/false)); 991 } 992 993 /// Helper function for generating the LLVM IR that computes the size 994 /// in bytes for a derived type. 995 static mlir::Value 996 computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy, 997 mlir::ConversionPatternRewriter &rewriter) { 998 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 999 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 1000 llvm::SmallVector<mlir::Value> args = {one}; 1001 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, args); 1002 return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep); 1003 } 1004 1005 namespace { 1006 /// Lower a `fir.allocmem` instruction into `llvm.call @malloc` 1007 struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> { 1008 using FIROpConversion::FIROpConversion; 1009 1010 mlir::LogicalResult 1011 matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor, 1012 mlir::ConversionPatternRewriter &rewriter) const override { 1013 mlir::Type heapTy = heap.getType(); 1014 mlir::Type ty = convertType(heapTy); 1015 mlir::LLVM::LLVMFuncOp mallocFunc = getMalloc(heap, rewriter); 1016 mlir::Location loc = heap.getLoc(); 1017 auto ity = lowerTy().indexType(); 1018 mlir::Type dataTy = fir::unwrapRefType(heapTy); 1019 if (fir::isRecordWithTypeParameters(fir::unwrapSequenceType(dataTy))) 1020 TODO(loc, "fir.allocmem codegen of derived type with length parameters"); 1021 mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, ty); 1022 if (auto scaleSize = genAllocationScaleSize(heap, ity, rewriter)) 1023 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize); 1024 for (mlir::Value opnd : adaptor.getOperands()) 1025 size = rewriter.create<mlir::LLVM::MulOp>( 1026 loc, ity, size, integerCast(loc, rewriter, ity, opnd)); 1027 heap->setAttr("callee", mlir::SymbolRefAttr::get(mallocFunc)); 1028 auto malloc = rewriter.create<mlir::LLVM::CallOp>( 1029 loc, ::getVoidPtrType(heap.getContext()), size, heap->getAttrs()); 1030 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(heap, ty, 1031 malloc.getResult(0)); 1032 return mlir::success(); 1033 } 1034 1035 // Compute the (allocation) size of the allocmem type in bytes. 1036 mlir::Value genTypeSizeInBytes(mlir::Location loc, mlir::Type idxTy, 1037 mlir::ConversionPatternRewriter &rewriter, 1038 mlir::Type llTy) const { 1039 // Use the primitive size, if available. 1040 auto ptrTy = llTy.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1041 if (auto size = 1042 mlir::LLVM::getPrimitiveTypeSizeInBits(ptrTy.getElementType())) 1043 return genConstantIndex(loc, idxTy, rewriter, size / 8); 1044 1045 // Otherwise, generate the GEP trick in LLVM IR to compute the size. 1046 return computeDerivedTypeSize(loc, ptrTy, idxTy, rewriter); 1047 } 1048 }; 1049 } // namespace 1050 1051 /// Return the LLVMFuncOp corresponding to the standard free call. 1052 static mlir::LLVM::LLVMFuncOp 1053 getFree(fir::FreeMemOp op, mlir::ConversionPatternRewriter &rewriter) { 1054 auto module = op->getParentOfType<mlir::ModuleOp>(); 1055 if (mlir::LLVM::LLVMFuncOp freeFunc = 1056 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("free")) 1057 return freeFunc; 1058 mlir::OpBuilder moduleBuilder(module.getBodyRegion()); 1059 auto voidType = mlir::LLVM::LLVMVoidType::get(op.getContext()); 1060 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 1061 rewriter.getUnknownLoc(), "free", 1062 mlir::LLVM::LLVMFunctionType::get(voidType, 1063 getVoidPtrType(op.getContext()), 1064 /*isVarArg=*/false)); 1065 } 1066 1067 namespace { 1068 /// Lower a `fir.freemem` instruction into `llvm.call @free` 1069 struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> { 1070 using FIROpConversion::FIROpConversion; 1071 1072 mlir::LogicalResult 1073 matchAndRewrite(fir::FreeMemOp freemem, OpAdaptor adaptor, 1074 mlir::ConversionPatternRewriter &rewriter) const override { 1075 mlir::LLVM::LLVMFuncOp freeFunc = getFree(freemem, rewriter); 1076 mlir::Location loc = freemem.getLoc(); 1077 auto bitcast = rewriter.create<mlir::LLVM::BitcastOp>( 1078 freemem.getLoc(), voidPtrTy(), adaptor.getOperands()[0]); 1079 freemem->setAttr("callee", mlir::SymbolRefAttr::get(freeFunc)); 1080 rewriter.create<mlir::LLVM::CallOp>( 1081 loc, mlir::TypeRange{}, mlir::ValueRange{bitcast}, freemem->getAttrs()); 1082 rewriter.eraseOp(freemem); 1083 return mlir::success(); 1084 } 1085 }; 1086 } // namespace 1087 1088 /// Common base class for embox to descriptor conversion. 1089 template <typename OP> 1090 struct EmboxCommonConversion : public FIROpConversion<OP> { 1091 using FIROpConversion<OP>::FIROpConversion; 1092 1093 // Find the LLVMFuncOp in whose entry block the alloca should be inserted. 1094 // The order to find the LLVMFuncOp is as follows: 1095 // 1. The parent operation of the current block if it is a LLVMFuncOp. 1096 // 2. The first ancestor that is a LLVMFuncOp. 1097 mlir::LLVM::LLVMFuncOp 1098 getFuncForAllocaInsert(mlir::ConversionPatternRewriter &rewriter) const { 1099 mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp(); 1100 return mlir::isa<mlir::LLVM::LLVMFuncOp>(parentOp) 1101 ? mlir::cast<mlir::LLVM::LLVMFuncOp>(parentOp) 1102 : parentOp->getParentOfType<mlir::LLVM::LLVMFuncOp>(); 1103 } 1104 1105 // Generate an alloca of size 1 and type \p toTy. 1106 mlir::LLVM::AllocaOp 1107 genAllocaWithType(mlir::Location loc, mlir::Type toTy, unsigned alignment, 1108 mlir::ConversionPatternRewriter &rewriter) const { 1109 auto thisPt = rewriter.saveInsertionPoint(); 1110 mlir::LLVM::LLVMFuncOp func = getFuncForAllocaInsert(rewriter); 1111 rewriter.setInsertionPointToStart(&func.front()); 1112 auto size = this->genI32Constant(loc, rewriter, 1); 1113 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, toTy, size, alignment); 1114 rewriter.restoreInsertionPoint(thisPt); 1115 return al; 1116 } 1117 1118 static int getCFIAttr(fir::BoxType boxTy) { 1119 auto eleTy = boxTy.getEleTy(); 1120 if (eleTy.isa<fir::PointerType>()) 1121 return CFI_attribute_pointer; 1122 if (eleTy.isa<fir::HeapType>()) 1123 return CFI_attribute_allocatable; 1124 return CFI_attribute_other; 1125 } 1126 1127 static fir::RecordType unwrapIfDerived(fir::BoxType boxTy) { 1128 return fir::unwrapSequenceType(fir::dyn_cast_ptrOrBoxEleTy(boxTy)) 1129 .template dyn_cast<fir::RecordType>(); 1130 } 1131 static bool isDerivedTypeWithLenParams(fir::BoxType boxTy) { 1132 auto recTy = unwrapIfDerived(boxTy); 1133 return recTy && recTy.getNumLenParams() > 0; 1134 } 1135 static bool isDerivedType(fir::BoxType boxTy) { 1136 return static_cast<bool>(unwrapIfDerived(boxTy)); 1137 } 1138 1139 // Get the element size and CFI type code of the boxed value. 1140 std::tuple<mlir::Value, mlir::Value> getSizeAndTypeCode( 1141 mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 1142 mlir::Type boxEleTy, mlir::ValueRange lenParams = {}) const { 1143 auto doInteger = 1144 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1145 int typeCode = fir::integerBitsToTypeCode(width); 1146 return {this->genConstantOffset(loc, rewriter, width / 8), 1147 this->genConstantOffset(loc, rewriter, typeCode)}; 1148 }; 1149 auto doLogical = 1150 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1151 int typeCode = fir::logicalBitsToTypeCode(width); 1152 return {this->genConstantOffset(loc, rewriter, width / 8), 1153 this->genConstantOffset(loc, rewriter, typeCode)}; 1154 }; 1155 auto doFloat = [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1156 int typeCode = fir::realBitsToTypeCode(width); 1157 return {this->genConstantOffset(loc, rewriter, width / 8), 1158 this->genConstantOffset(loc, rewriter, typeCode)}; 1159 }; 1160 auto doComplex = 1161 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1162 auto typeCode = fir::complexBitsToTypeCode(width); 1163 return {this->genConstantOffset(loc, rewriter, width / 8 * 2), 1164 this->genConstantOffset(loc, rewriter, typeCode)}; 1165 }; 1166 auto doCharacter = 1167 [&](unsigned width, 1168 mlir::Value len) -> std::tuple<mlir::Value, mlir::Value> { 1169 auto typeCode = fir::characterBitsToTypeCode(width); 1170 auto typeCodeVal = this->genConstantOffset(loc, rewriter, typeCode); 1171 if (width == 8) 1172 return {len, typeCodeVal}; 1173 auto i64Ty = mlir::IntegerType::get(&this->lowerTy().getContext(), 64); 1174 auto byteWidth = genConstantIndex(loc, i64Ty, rewriter, width / 8); 1175 auto len64 = FIROpConversion<OP>::integerCast(loc, rewriter, i64Ty, len); 1176 auto size = 1177 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len64); 1178 return {size, typeCodeVal}; 1179 }; 1180 auto getKindMap = [&]() -> fir::KindMapping & { 1181 return this->lowerTy().getKindMap(); 1182 }; 1183 // Pointer-like types. 1184 if (auto eleTy = fir::dyn_cast_ptrEleTy(boxEleTy)) 1185 boxEleTy = eleTy; 1186 // Integer types. 1187 if (fir::isa_integer(boxEleTy)) { 1188 if (auto ty = boxEleTy.dyn_cast<mlir::IntegerType>()) 1189 return doInteger(ty.getWidth()); 1190 auto ty = boxEleTy.cast<fir::IntegerType>(); 1191 return doInteger(getKindMap().getIntegerBitsize(ty.getFKind())); 1192 } 1193 // Floating point types. 1194 if (fir::isa_real(boxEleTy)) { 1195 if (auto ty = boxEleTy.dyn_cast<mlir::FloatType>()) 1196 return doFloat(ty.getWidth()); 1197 auto ty = boxEleTy.cast<fir::RealType>(); 1198 return doFloat(getKindMap().getRealBitsize(ty.getFKind())); 1199 } 1200 // Complex types. 1201 if (fir::isa_complex(boxEleTy)) { 1202 if (auto ty = boxEleTy.dyn_cast<mlir::ComplexType>()) 1203 return doComplex( 1204 ty.getElementType().cast<mlir::FloatType>().getWidth()); 1205 auto ty = boxEleTy.cast<fir::ComplexType>(); 1206 return doComplex(getKindMap().getRealBitsize(ty.getFKind())); 1207 } 1208 // Character types. 1209 if (auto ty = boxEleTy.dyn_cast<fir::CharacterType>()) { 1210 auto charWidth = getKindMap().getCharacterBitsize(ty.getFKind()); 1211 if (ty.getLen() != fir::CharacterType::unknownLen()) { 1212 auto len = this->genConstantOffset(loc, rewriter, ty.getLen()); 1213 return doCharacter(charWidth, len); 1214 } 1215 assert(!lenParams.empty()); 1216 return doCharacter(charWidth, lenParams.back()); 1217 } 1218 // Logical type. 1219 if (auto ty = boxEleTy.dyn_cast<fir::LogicalType>()) 1220 return doLogical(getKindMap().getLogicalBitsize(ty.getFKind())); 1221 // Array types. 1222 if (auto seqTy = boxEleTy.dyn_cast<fir::SequenceType>()) 1223 return getSizeAndTypeCode(loc, rewriter, seqTy.getEleTy(), lenParams); 1224 // Derived-type types. 1225 if (boxEleTy.isa<fir::RecordType>()) { 1226 auto ptrTy = mlir::LLVM::LLVMPointerType::get( 1227 this->lowerTy().convertType(boxEleTy)); 1228 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 1229 auto one = 1230 genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1); 1231 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, 1232 mlir::ValueRange{one}); 1233 auto eleSize = rewriter.create<mlir::LLVM::PtrToIntOp>( 1234 loc, this->lowerTy().indexType(), gep); 1235 return {eleSize, 1236 this->genConstantOffset(loc, rewriter, fir::derivedToTypeCode())}; 1237 } 1238 // Reference type. 1239 if (fir::isa_ref_type(boxEleTy)) { 1240 // FIXME: use the target pointer size rather than sizeof(void*) 1241 return {this->genConstantOffset(loc, rewriter, sizeof(void *)), 1242 this->genConstantOffset(loc, rewriter, CFI_type_cptr)}; 1243 } 1244 fir::emitFatalError(loc, "unhandled type in fir.box code generation"); 1245 } 1246 1247 /// Basic pattern to write a field in the descriptor 1248 mlir::Value insertField(mlir::ConversionPatternRewriter &rewriter, 1249 mlir::Location loc, mlir::Value dest, 1250 llvm::ArrayRef<unsigned> fldIndexes, 1251 mlir::Value value, bool bitcast = false) const { 1252 auto boxTy = dest.getType(); 1253 auto fldTy = this->getBoxEleTy(boxTy, fldIndexes); 1254 if (bitcast) 1255 value = rewriter.create<mlir::LLVM::BitcastOp>(loc, fldTy, value); 1256 else 1257 value = this->integerCast(loc, rewriter, fldTy, value); 1258 llvm::SmallVector<mlir::Attribute, 2> attrs; 1259 for (auto i : fldIndexes) 1260 attrs.push_back(rewriter.getI32IntegerAttr(i)); 1261 auto indexesAttr = mlir::ArrayAttr::get(rewriter.getContext(), attrs); 1262 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, boxTy, dest, value, 1263 indexesAttr); 1264 } 1265 1266 inline mlir::Value 1267 insertBaseAddress(mlir::ConversionPatternRewriter &rewriter, 1268 mlir::Location loc, mlir::Value dest, 1269 mlir::Value base) const { 1270 return insertField(rewriter, loc, dest, {kAddrPosInBox}, base, 1271 /*bitCast=*/true); 1272 } 1273 1274 inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter, 1275 mlir::Location loc, mlir::Value dest, 1276 unsigned dim, mlir::Value lb) const { 1277 return insertField(rewriter, loc, dest, 1278 {kDimsPosInBox, dim, kDimLowerBoundPos}, lb); 1279 } 1280 1281 inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter, 1282 mlir::Location loc, mlir::Value dest, 1283 unsigned dim, mlir::Value extent) const { 1284 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos}, 1285 extent); 1286 } 1287 1288 inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter, 1289 mlir::Location loc, mlir::Value dest, 1290 unsigned dim, mlir::Value stride) const { 1291 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos}, 1292 stride); 1293 } 1294 1295 /// Get the address of the type descriptor global variable that was created by 1296 /// lowering for derived type \p recType. 1297 template <typename BOX> 1298 mlir::Value 1299 getTypeDescriptor(BOX box, mlir::ConversionPatternRewriter &rewriter, 1300 mlir::Location loc, fir::RecordType recType) const { 1301 std::string name = 1302 fir::NameUniquer::getTypeDescriptorName(recType.getName()); 1303 auto module = box->template getParentOfType<mlir::ModuleOp>(); 1304 if (auto global = module.template lookupSymbol<fir::GlobalOp>(name)) { 1305 auto ty = mlir::LLVM::LLVMPointerType::get( 1306 this->lowerTy().convertType(global.getType())); 1307 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1308 global.getSymName()); 1309 } 1310 if (auto global = 1311 module.template lookupSymbol<mlir::LLVM::GlobalOp>(name)) { 1312 // The global may have already been translated to LLVM. 1313 auto ty = mlir::LLVM::LLVMPointerType::get(global.getType()); 1314 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1315 global.getSymName()); 1316 } 1317 // Type info derived types do not have type descriptors since they are the 1318 // types defining type descriptors. 1319 if (!this->options.ignoreMissingTypeDescriptors && 1320 !fir::NameUniquer::belongsToModule( 1321 name, Fortran::semantics::typeInfoBuiltinModule)) 1322 fir::emitFatalError( 1323 loc, "runtime derived type info descriptor was not generated"); 1324 return rewriter.create<mlir::LLVM::NullOp>( 1325 loc, ::getVoidPtrType(box.getContext())); 1326 } 1327 1328 template <typename BOX> 1329 std::tuple<fir::BoxType, mlir::Value, mlir::Value> 1330 consDescriptorPrefix(BOX box, mlir::ConversionPatternRewriter &rewriter, 1331 unsigned rank, mlir::ValueRange lenParams) const { 1332 auto loc = box.getLoc(); 1333 auto boxTy = box.getType().template dyn_cast<fir::BoxType>(); 1334 auto convTy = this->lowerTy().convertBoxType(boxTy, rank); 1335 auto llvmBoxPtrTy = convTy.template cast<mlir::LLVM::LLVMPointerType>(); 1336 auto llvmBoxTy = llvmBoxPtrTy.getElementType(); 1337 mlir::Value descriptor = 1338 rewriter.create<mlir::LLVM::UndefOp>(loc, llvmBoxTy); 1339 1340 llvm::SmallVector<mlir::Value> typeparams = lenParams; 1341 if constexpr (!std::is_same_v<BOX, fir::EmboxOp>) { 1342 if (!box.substr().empty() && fir::hasDynamicSize(boxTy.getEleTy())) 1343 typeparams.push_back(box.substr()[1]); 1344 } 1345 1346 // Write each of the fields with the appropriate values 1347 auto [eleSize, cfiTy] = 1348 getSizeAndTypeCode(loc, rewriter, boxTy.getEleTy(), typeparams); 1349 descriptor = 1350 insertField(rewriter, loc, descriptor, {kElemLenPosInBox}, eleSize); 1351 descriptor = insertField(rewriter, loc, descriptor, {kVersionPosInBox}, 1352 this->genI32Constant(loc, rewriter, CFI_VERSION)); 1353 descriptor = insertField(rewriter, loc, descriptor, {kRankPosInBox}, 1354 this->genI32Constant(loc, rewriter, rank)); 1355 descriptor = insertField(rewriter, loc, descriptor, {kTypePosInBox}, cfiTy); 1356 descriptor = 1357 insertField(rewriter, loc, descriptor, {kAttributePosInBox}, 1358 this->genI32Constant(loc, rewriter, getCFIAttr(boxTy))); 1359 const bool hasAddendum = isDerivedType(boxTy); 1360 descriptor = 1361 insertField(rewriter, loc, descriptor, {kF18AddendumPosInBox}, 1362 this->genI32Constant(loc, rewriter, hasAddendum ? 1 : 0)); 1363 1364 if (hasAddendum) { 1365 auto isArray = 1366 fir::dyn_cast_ptrOrBoxEleTy(boxTy).template isa<fir::SequenceType>(); 1367 unsigned typeDescFieldId = isArray ? kOptTypePtrPosInBox : kDimsPosInBox; 1368 auto typeDesc = 1369 getTypeDescriptor(box, rewriter, loc, unwrapIfDerived(boxTy)); 1370 descriptor = 1371 insertField(rewriter, loc, descriptor, {typeDescFieldId}, typeDesc, 1372 /*bitCast=*/true); 1373 } 1374 1375 return {boxTy, descriptor, eleSize}; 1376 } 1377 1378 /// Compute the base address of a substring given the base address of a scalar 1379 /// string and the zero based string lower bound. 1380 mlir::Value shiftSubstringBase(mlir::ConversionPatternRewriter &rewriter, 1381 mlir::Location loc, mlir::Value base, 1382 mlir::Value lowerBound) const { 1383 llvm::SmallVector<mlir::Value> gepOperands; 1384 auto baseType = 1385 base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1386 if (baseType.isa<mlir::LLVM::LLVMArrayType>()) { 1387 auto idxTy = this->lowerTy().indexType(); 1388 gepOperands.push_back(genConstantIndex(loc, idxTy, rewriter, 0)); 1389 gepOperands.push_back(lowerBound); 1390 } else { 1391 gepOperands.push_back(lowerBound); 1392 } 1393 return this->genGEP(loc, base.getType(), rewriter, base, gepOperands); 1394 } 1395 1396 /// If the embox is not in a globalOp body, allocate storage for the box; 1397 /// store the value inside and return the generated alloca. Return the input 1398 /// value otherwise. 1399 mlir::Value 1400 placeInMemoryIfNotGlobalInit(mlir::ConversionPatternRewriter &rewriter, 1401 mlir::Location loc, mlir::Value boxValue) const { 1402 auto *thisBlock = rewriter.getInsertionBlock(); 1403 if (thisBlock && mlir::isa<mlir::LLVM::GlobalOp>(thisBlock->getParentOp())) 1404 return boxValue; 1405 auto boxPtrTy = mlir::LLVM::LLVMPointerType::get(boxValue.getType()); 1406 auto alloca = genAllocaWithType(loc, boxPtrTy, defaultAlign, rewriter); 1407 rewriter.create<mlir::LLVM::StoreOp>(loc, boxValue, alloca); 1408 return alloca; 1409 } 1410 }; 1411 1412 /// Compute the extent of a triplet slice (lb:ub:step). 1413 static mlir::Value 1414 computeTripletExtent(mlir::ConversionPatternRewriter &rewriter, 1415 mlir::Location loc, mlir::Value lb, mlir::Value ub, 1416 mlir::Value step, mlir::Value zero, mlir::Type type) { 1417 mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb); 1418 extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step); 1419 extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step); 1420 // If the resulting extent is negative (`ub-lb` and `step` have different 1421 // signs), zero must be returned instead. 1422 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1423 loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero); 1424 return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero); 1425 } 1426 1427 /// Create a generic box on a memory reference. This conversions lowers the 1428 /// abstract box to the appropriate, initialized descriptor. 1429 struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> { 1430 using EmboxCommonConversion::EmboxCommonConversion; 1431 1432 mlir::LogicalResult 1433 matchAndRewrite(fir::EmboxOp embox, OpAdaptor adaptor, 1434 mlir::ConversionPatternRewriter &rewriter) const override { 1435 assert(!embox.getShape() && "There should be no dims on this embox op"); 1436 auto [boxTy, dest, eleSize] = 1437 consDescriptorPrefix(embox, rewriter, /*rank=*/0, 1438 /*lenParams=*/adaptor.getOperands().drop_front(1)); 1439 dest = insertBaseAddress(rewriter, embox.getLoc(), dest, 1440 adaptor.getOperands()[0]); 1441 if (isDerivedTypeWithLenParams(boxTy)) { 1442 TODO(embox.getLoc(), 1443 "fir.embox codegen of derived with length parameters"); 1444 return mlir::failure(); 1445 } 1446 auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest); 1447 rewriter.replaceOp(embox, result); 1448 return mlir::success(); 1449 } 1450 }; 1451 1452 /// Create a generic box on a memory reference. 1453 struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> { 1454 using EmboxCommonConversion::EmboxCommonConversion; 1455 1456 mlir::LogicalResult 1457 matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor, 1458 mlir::ConversionPatternRewriter &rewriter) const override { 1459 auto [boxTy, dest, eleSize] = consDescriptorPrefix( 1460 xbox, rewriter, xbox.getOutRank(), 1461 adaptor.getOperands().drop_front(xbox.lenParamOffset())); 1462 // Generate the triples in the dims field of the descriptor 1463 mlir::ValueRange operands = adaptor.getOperands(); 1464 auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64); 1465 mlir::Value base = operands[0]; 1466 assert(!xbox.shape().empty() && "must have a shape"); 1467 unsigned shapeOffset = xbox.shapeOffset(); 1468 bool hasShift = !xbox.shift().empty(); 1469 unsigned shiftOffset = xbox.shiftOffset(); 1470 bool hasSlice = !xbox.slice().empty(); 1471 unsigned sliceOffset = xbox.sliceOffset(); 1472 mlir::Location loc = xbox.getLoc(); 1473 mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0); 1474 mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1); 1475 mlir::Value prevPtrOff = one; 1476 mlir::Type eleTy = boxTy.getEleTy(); 1477 const unsigned rank = xbox.getRank(); 1478 llvm::SmallVector<mlir::Value> gepArgs; 1479 unsigned constRows = 0; 1480 mlir::Value ptrOffset = zero; 1481 mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()); 1482 assert(memEleTy.isa<fir::SequenceType>()); 1483 auto seqTy = memEleTy.cast<fir::SequenceType>(); 1484 mlir::Type seqEleTy = seqTy.getEleTy(); 1485 // Adjust the element scaling factor if the element is a dependent type. 1486 if (fir::hasDynamicSize(seqEleTy)) { 1487 if (auto charTy = seqEleTy.dyn_cast<fir::CharacterType>()) { 1488 assert(xbox.lenParams().size() == 1); 1489 mlir::LLVM::ConstantOp charSize = genConstantIndex( 1490 loc, i64Ty, rewriter, lowerTy().characterBitsize(charTy) / 8); 1491 mlir::Value castedLen = 1492 integerCast(loc, rewriter, i64Ty, operands[xbox.lenParamOffset()]); 1493 auto byteOffset = 1494 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, charSize, castedLen); 1495 prevPtrOff = integerCast(loc, rewriter, i64Ty, byteOffset); 1496 } else if (seqEleTy.isa<fir::RecordType>()) { 1497 // prevPtrOff = ; 1498 TODO(loc, "generate call to calculate size of PDT"); 1499 } else { 1500 fir::emitFatalError(loc, "unexpected dynamic type"); 1501 } 1502 } else { 1503 constRows = seqTy.getConstantRows(); 1504 } 1505 1506 const auto hasSubcomp = !xbox.subcomponent().empty(); 1507 const bool hasSubstr = !xbox.substr().empty(); 1508 /// Compute initial element stride that will be use to compute the step in 1509 /// each dimension. 1510 mlir::Value prevDimByteStride = integerCast(loc, rewriter, i64Ty, eleSize); 1511 if (hasSubcomp) { 1512 // We have a subcomponent. The step value needs to be the number of 1513 // bytes per element (which is a derived type). 1514 auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy)); 1515 prevDimByteStride = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter); 1516 } else if (hasSubstr) { 1517 // We have a substring. The step value needs to be the number of bytes 1518 // per CHARACTER element. 1519 auto charTy = seqEleTy.cast<fir::CharacterType>(); 1520 if (fir::hasDynamicSize(charTy)) { 1521 prevDimByteStride = prevPtrOff; 1522 } else { 1523 prevDimByteStride = genConstantIndex( 1524 loc, i64Ty, rewriter, 1525 charTy.getLen() * lowerTy().characterBitsize(charTy) / 8); 1526 } 1527 } 1528 1529 // Process the array subspace arguments (shape, shift, etc.), if any, 1530 // translating everything to values in the descriptor wherever the entity 1531 // has a dynamic array dimension. 1532 for (unsigned di = 0, descIdx = 0; di < rank; ++di) { 1533 mlir::Value extent = operands[shapeOffset]; 1534 mlir::Value outerExtent = extent; 1535 bool skipNext = false; 1536 if (hasSlice) { 1537 mlir::Value off = operands[sliceOffset]; 1538 mlir::Value adj = one; 1539 if (hasShift) 1540 adj = operands[shiftOffset]; 1541 auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj); 1542 if (constRows > 0) { 1543 gepArgs.push_back(ao); 1544 } else { 1545 auto dimOff = 1546 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff); 1547 ptrOffset = 1548 rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset); 1549 } 1550 if (mlir::isa_and_nonnull<fir::UndefOp>( 1551 xbox.slice()[3 * di + 1].getDefiningOp())) { 1552 // This dimension contains a scalar expression in the array slice op. 1553 // The dimension is loop invariant, will be dropped, and will not 1554 // appear in the descriptor. 1555 skipNext = true; 1556 } 1557 } 1558 if (!skipNext) { 1559 // store extent 1560 if (hasSlice) 1561 extent = computeTripletExtent(rewriter, loc, operands[sliceOffset], 1562 operands[sliceOffset + 1], 1563 operands[sliceOffset + 2], zero, i64Ty); 1564 // Lower bound is normalized to 0 for BIND(C) interoperability. 1565 mlir::Value lb = zero; 1566 const bool isaPointerOrAllocatable = 1567 eleTy.isa<fir::PointerType>() || eleTy.isa<fir::HeapType>(); 1568 // Lower bound is defaults to 1 for POINTER, ALLOCATABLE, and 1569 // denormalized descriptors. 1570 if (isaPointerOrAllocatable || !normalizedLowerBound(xbox)) 1571 lb = one; 1572 // If there is a shifted origin, and no fir.slice, and this is not 1573 // a normalized descriptor then use the value from the shift op as 1574 // the lower bound. 1575 if (hasShift && !(hasSlice || hasSubcomp || hasSubstr) && 1576 (isaPointerOrAllocatable || !normalizedLowerBound(xbox))) { 1577 lb = operands[shiftOffset]; 1578 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1579 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1580 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, 1581 lb); 1582 } 1583 dest = insertLowerBound(rewriter, loc, dest, descIdx, lb); 1584 1585 dest = insertExtent(rewriter, loc, dest, descIdx, extent); 1586 1587 // store step (scaled by shaped extent) 1588 mlir::Value step = prevDimByteStride; 1589 if (hasSlice) 1590 step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step, 1591 operands[sliceOffset + 2]); 1592 dest = insertStride(rewriter, loc, dest, descIdx, step); 1593 ++descIdx; 1594 } 1595 1596 // compute the stride and offset for the next natural dimension 1597 prevDimByteStride = rewriter.create<mlir::LLVM::MulOp>( 1598 loc, i64Ty, prevDimByteStride, outerExtent); 1599 if (constRows == 0) 1600 prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff, 1601 outerExtent); 1602 else 1603 --constRows; 1604 1605 // increment iterators 1606 ++shapeOffset; 1607 if (hasShift) 1608 ++shiftOffset; 1609 if (hasSlice) 1610 sliceOffset += 3; 1611 } 1612 if (hasSlice || hasSubcomp || hasSubstr) { 1613 llvm::SmallVector<mlir::Value> args = {ptrOffset}; 1614 args.append(gepArgs.rbegin(), gepArgs.rend()); 1615 if (hasSubcomp) { 1616 // For each field in the path add the offset to base via the args list. 1617 // In the most general case, some offsets must be computed since 1618 // they are not be known until runtime. 1619 if (fir::hasDynamicSize(fir::unwrapSequenceType( 1620 fir::unwrapPassByRefType(xbox.memref().getType())))) 1621 TODO(loc, "fir.embox codegen dynamic size component in derived type"); 1622 args.append(operands.begin() + xbox.subcomponentOffset(), 1623 operands.begin() + xbox.subcomponentOffset() + 1624 xbox.subcomponent().size()); 1625 } 1626 base = 1627 rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), base, args); 1628 if (hasSubstr) 1629 base = shiftSubstringBase(rewriter, loc, base, 1630 operands[xbox.substrOffset()]); 1631 } 1632 dest = insertBaseAddress(rewriter, loc, dest, base); 1633 if (isDerivedTypeWithLenParams(boxTy)) 1634 TODO(loc, "fir.embox codegen of derived with length parameters"); 1635 1636 mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest); 1637 rewriter.replaceOp(xbox, result); 1638 return mlir::success(); 1639 } 1640 1641 /// Return true if `xbox` has a normalized lower bounds attribute. A box value 1642 /// that is neither a POINTER nor an ALLOCATABLE should be normalized to a 1643 /// zero origin lower bound for interoperability with BIND(C). 1644 inline static bool normalizedLowerBound(fir::cg::XEmboxOp xbox) { 1645 return xbox->hasAttr(fir::getNormalizedLowerBoundAttrName()); 1646 } 1647 }; 1648 1649 /// Create a new box given a box reference. 1650 struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> { 1651 using EmboxCommonConversion::EmboxCommonConversion; 1652 1653 mlir::LogicalResult 1654 matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor, 1655 mlir::ConversionPatternRewriter &rewriter) const override { 1656 mlir::Location loc = rebox.getLoc(); 1657 mlir::Type idxTy = lowerTy().indexType(); 1658 mlir::Value loweredBox = adaptor.getOperands()[0]; 1659 mlir::ValueRange operands = adaptor.getOperands(); 1660 1661 // Create new descriptor and fill its non-shape related data. 1662 llvm::SmallVector<mlir::Value, 2> lenParams; 1663 mlir::Type inputEleTy = getInputEleTy(rebox); 1664 if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) { 1665 mlir::Value len = 1666 loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter); 1667 if (charTy.getFKind() != 1) { 1668 mlir::Value width = 1669 genConstantIndex(loc, idxTy, rewriter, charTy.getFKind()); 1670 len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width); 1671 } 1672 lenParams.emplace_back(len); 1673 } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) { 1674 if (recTy.getNumLenParams() != 0) 1675 TODO(loc, "reboxing descriptor of derived type with length parameters"); 1676 } 1677 auto [boxTy, dest, eleSize] = 1678 consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams); 1679 1680 // Read input extents, strides, and base address 1681 llvm::SmallVector<mlir::Value> inputExtents; 1682 llvm::SmallVector<mlir::Value> inputStrides; 1683 const unsigned inputRank = rebox.getRank(); 1684 for (unsigned i = 0; i < inputRank; ++i) { 1685 mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i); 1686 llvm::SmallVector<mlir::Value, 3> dimInfo = 1687 getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter); 1688 inputExtents.emplace_back(dimInfo[1]); 1689 inputStrides.emplace_back(dimInfo[2]); 1690 } 1691 1692 mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType()); 1693 mlir::Value baseAddr = 1694 loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter); 1695 1696 if (!rebox.slice().empty() || !rebox.subcomponent().empty()) 1697 return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1698 operands, rewriter); 1699 return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1700 operands, rewriter); 1701 } 1702 1703 private: 1704 /// Write resulting shape and base address in descriptor, and replace rebox 1705 /// op. 1706 mlir::LogicalResult 1707 finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1708 mlir::ValueRange lbounds, mlir::ValueRange extents, 1709 mlir::ValueRange strides, 1710 mlir::ConversionPatternRewriter &rewriter) const { 1711 mlir::Location loc = rebox.getLoc(); 1712 mlir::Value zero = 1713 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 1714 mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1); 1715 for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) { 1716 mlir::Value extent = std::get<0>(iter.value()); 1717 unsigned dim = iter.index(); 1718 mlir::Value lb = one; 1719 if (!lbounds.empty()) { 1720 lb = lbounds[dim]; 1721 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1722 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1723 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, lb); 1724 }; 1725 dest = insertLowerBound(rewriter, loc, dest, dim, lb); 1726 dest = insertExtent(rewriter, loc, dest, dim, extent); 1727 dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value())); 1728 } 1729 dest = insertBaseAddress(rewriter, loc, dest, base); 1730 mlir::Value result = 1731 placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest); 1732 rewriter.replaceOp(rebox, result); 1733 return mlir::success(); 1734 } 1735 1736 // Apply slice given the base address, extents and strides of the input box. 1737 mlir::LogicalResult 1738 sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1739 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1740 mlir::ValueRange operands, 1741 mlir::ConversionPatternRewriter &rewriter) const { 1742 mlir::Location loc = rebox.getLoc(); 1743 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1744 mlir::Type idxTy = lowerTy().indexType(); 1745 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1746 // Apply subcomponent and substring shift on base address. 1747 if (!rebox.subcomponent().empty() || !rebox.substr().empty()) { 1748 // Cast to inputEleTy* so that a GEP can be used. 1749 mlir::Type inputEleTy = getInputEleTy(rebox); 1750 auto llvmElePtrTy = 1751 mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy)); 1752 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base); 1753 1754 if (!rebox.subcomponent().empty()) { 1755 llvm::SmallVector<mlir::Value> gepOperands = {zero}; 1756 for (unsigned i = 0; i < rebox.subcomponent().size(); ++i) 1757 gepOperands.push_back(operands[rebox.subcomponentOffset() + i]); 1758 base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands); 1759 } 1760 if (!rebox.substr().empty()) 1761 base = shiftSubstringBase(rewriter, loc, base, 1762 operands[rebox.substrOffset()]); 1763 } 1764 1765 if (rebox.slice().empty()) 1766 // The array section is of the form array[%component][substring], keep 1767 // the input array extents and strides. 1768 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1769 inputExtents, inputStrides, rewriter); 1770 1771 // Strides from the fir.box are in bytes. 1772 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1773 1774 // The slice is of the form array(i:j:k)[%component]. Compute new extents 1775 // and strides. 1776 llvm::SmallVector<mlir::Value> slicedExtents; 1777 llvm::SmallVector<mlir::Value> slicedStrides; 1778 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 1779 const bool sliceHasOrigins = !rebox.shift().empty(); 1780 unsigned sliceOps = rebox.sliceOffset(); 1781 unsigned shiftOps = rebox.shiftOffset(); 1782 auto strideOps = inputStrides.begin(); 1783 const unsigned inputRank = inputStrides.size(); 1784 for (unsigned i = 0; i < inputRank; 1785 ++i, ++strideOps, ++shiftOps, sliceOps += 3) { 1786 mlir::Value sliceLb = 1787 integerCast(loc, rewriter, idxTy, operands[sliceOps]); 1788 mlir::Value inputStride = *strideOps; // already idxTy 1789 // Apply origin shift: base += (lb-shift)*input_stride 1790 mlir::Value sliceOrigin = 1791 sliceHasOrigins 1792 ? integerCast(loc, rewriter, idxTy, operands[shiftOps]) 1793 : one; 1794 mlir::Value diff = 1795 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin); 1796 mlir::Value offset = 1797 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride); 1798 base = genGEP(loc, voidPtrTy, rewriter, base, offset); 1799 // Apply upper bound and step if this is a triplet. Otherwise, the 1800 // dimension is dropped and no extents/strides are computed. 1801 mlir::Value upper = operands[sliceOps + 1]; 1802 const bool isTripletSlice = 1803 !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp()); 1804 if (isTripletSlice) { 1805 mlir::Value step = 1806 integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]); 1807 // extent = ub-lb+step/step 1808 mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper); 1809 mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb, 1810 sliceUb, step, zero, idxTy); 1811 slicedExtents.emplace_back(extent); 1812 // stride = step*input_stride 1813 mlir::Value stride = 1814 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride); 1815 slicedStrides.emplace_back(stride); 1816 } 1817 } 1818 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1819 slicedExtents, slicedStrides, rewriter); 1820 } 1821 1822 /// Apply a new shape to the data described by a box given the base address, 1823 /// extents and strides of the box. 1824 mlir::LogicalResult 1825 reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1826 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1827 mlir::ValueRange operands, 1828 mlir::ConversionPatternRewriter &rewriter) const { 1829 mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(), 1830 operands.begin() + rebox.shiftOffset() + 1831 rebox.shift().size()}; 1832 if (rebox.shape().empty()) { 1833 // Only setting new lower bounds. 1834 return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents, 1835 inputStrides, rewriter); 1836 } 1837 1838 mlir::Location loc = rebox.getLoc(); 1839 // Strides from the fir.box are in bytes. 1840 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1841 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1842 1843 llvm::SmallVector<mlir::Value> newStrides; 1844 llvm::SmallVector<mlir::Value> newExtents; 1845 mlir::Type idxTy = lowerTy().indexType(); 1846 // First stride from input box is kept. The rest is assumed contiguous 1847 // (it is not possible to reshape otherwise). If the input is scalar, 1848 // which may be OK if all new extents are ones, the stride does not 1849 // matter, use one. 1850 mlir::Value stride = inputStrides.empty() 1851 ? genConstantIndex(loc, idxTy, rewriter, 1) 1852 : inputStrides[0]; 1853 for (unsigned i = 0; i < rebox.shape().size(); ++i) { 1854 mlir::Value rawExtent = operands[rebox.shapeOffset() + i]; 1855 mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent); 1856 newExtents.emplace_back(extent); 1857 newStrides.emplace_back(stride); 1858 // nextStride = extent * stride; 1859 stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride); 1860 } 1861 return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides, 1862 rewriter); 1863 } 1864 1865 /// Return scalar element type of the input box. 1866 static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) { 1867 auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType()); 1868 if (auto seqTy = ty.dyn_cast<fir::SequenceType>()) 1869 return seqTy.getEleTy(); 1870 return ty; 1871 } 1872 }; 1873 1874 /// Lower `fir.emboxproc` operation. Creates a procedure box. 1875 /// TODO: Part of supporting Fortran 2003 procedure pointers. 1876 struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> { 1877 using FIROpConversion::FIROpConversion; 1878 1879 mlir::LogicalResult 1880 matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor, 1881 mlir::ConversionPatternRewriter &rewriter) const override { 1882 TODO(emboxproc.getLoc(), "fir.emboxproc codegen"); 1883 return mlir::failure(); 1884 } 1885 }; 1886 1887 // Code shared between insert_value and extract_value Ops. 1888 struct ValueOpCommon { 1889 // Translate the arguments pertaining to any multidimensional array to 1890 // row-major order for LLVM-IR. 1891 static void toRowMajor(llvm::SmallVectorImpl<mlir::Attribute> &attrs, 1892 mlir::Type ty) { 1893 assert(ty && "type is null"); 1894 const auto end = attrs.size(); 1895 for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) { 1896 if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 1897 const auto dim = getDimension(seq); 1898 if (dim > 1) { 1899 auto ub = std::min(i + dim, end); 1900 std::reverse(attrs.begin() + i, attrs.begin() + ub); 1901 i += dim - 1; 1902 } 1903 ty = getArrayElementType(seq); 1904 } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) { 1905 ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()]; 1906 } else { 1907 llvm_unreachable("index into invalid type"); 1908 } 1909 } 1910 } 1911 1912 static llvm::SmallVector<mlir::Attribute> 1913 collectIndices(mlir::ConversionPatternRewriter &rewriter, 1914 mlir::ArrayAttr arrAttr) { 1915 llvm::SmallVector<mlir::Attribute> attrs; 1916 for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) { 1917 if (i->isa<mlir::IntegerAttr>()) { 1918 attrs.push_back(*i); 1919 } else { 1920 auto fieldName = i->cast<mlir::StringAttr>().getValue(); 1921 ++i; 1922 auto ty = i->cast<mlir::TypeAttr>().getValue(); 1923 auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName); 1924 attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index)); 1925 } 1926 } 1927 return attrs; 1928 } 1929 1930 private: 1931 static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) { 1932 unsigned result = 1; 1933 for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>(); 1934 eleTy; 1935 eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>()) 1936 ++result; 1937 return result; 1938 } 1939 1940 static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) { 1941 auto eleTy = ty.getElementType(); 1942 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 1943 eleTy = arrTy.getElementType(); 1944 return eleTy; 1945 } 1946 }; 1947 1948 namespace { 1949 /// Extract a subobject value from an ssa-value of aggregate type 1950 struct ExtractValueOpConversion 1951 : public FIROpAndTypeConversion<fir::ExtractValueOp>, 1952 public ValueOpCommon { 1953 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1954 1955 mlir::LogicalResult 1956 doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor, 1957 mlir::ConversionPatternRewriter &rewriter) const override { 1958 auto attrs = collectIndices(rewriter, extractVal.getCoor()); 1959 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 1960 auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs); 1961 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>( 1962 extractVal, ty, adaptor.getOperands()[0], position); 1963 return mlir::success(); 1964 } 1965 }; 1966 1967 /// InsertValue is the generalized instruction for the composition of new 1968 /// aggregate type values. 1969 struct InsertValueOpConversion 1970 : public FIROpAndTypeConversion<fir::InsertValueOp>, 1971 public ValueOpCommon { 1972 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1973 1974 mlir::LogicalResult 1975 doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor, 1976 mlir::ConversionPatternRewriter &rewriter) const override { 1977 auto attrs = collectIndices(rewriter, insertVal.getCoor()); 1978 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 1979 auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs); 1980 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 1981 insertVal, ty, adaptor.getOperands()[0], adaptor.getOperands()[1], 1982 position); 1983 return mlir::success(); 1984 } 1985 }; 1986 1987 /// InsertOnRange inserts a value into a sequence over a range of offsets. 1988 struct InsertOnRangeOpConversion 1989 : public FIROpAndTypeConversion<fir::InsertOnRangeOp> { 1990 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1991 1992 // Increments an array of subscripts in a row major fasion. 1993 void incrementSubscripts(const llvm::SmallVector<uint64_t> &dims, 1994 llvm::SmallVector<uint64_t> &subscripts) const { 1995 for (size_t i = dims.size(); i > 0; --i) { 1996 if (++subscripts[i - 1] < dims[i - 1]) { 1997 return; 1998 } 1999 subscripts[i - 1] = 0; 2000 } 2001 } 2002 2003 mlir::LogicalResult 2004 doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor, 2005 mlir::ConversionPatternRewriter &rewriter) const override { 2006 2007 llvm::SmallVector<uint64_t> dims; 2008 auto type = adaptor.getOperands()[0].getType(); 2009 2010 // Iteratively extract the array dimensions from the type. 2011 while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 2012 dims.push_back(t.getNumElements()); 2013 type = t.getElementType(); 2014 } 2015 2016 llvm::SmallVector<std::uint64_t> lBounds; 2017 llvm::SmallVector<std::uint64_t> uBounds; 2018 2019 // Unzip the upper and lower bound and convert to a row major format. 2020 mlir::DenseIntElementsAttr coor = range.getCoor(); 2021 auto reversedCoor = llvm::reverse(coor.getValues<int64_t>()); 2022 for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) { 2023 uBounds.push_back(*i++); 2024 lBounds.push_back(*i); 2025 } 2026 2027 auto &subscripts = lBounds; 2028 auto loc = range.getLoc(); 2029 mlir::Value lastOp = adaptor.getOperands()[0]; 2030 mlir::Value insertVal = adaptor.getOperands()[1]; 2031 2032 auto i64Ty = rewriter.getI64Type(); 2033 while (subscripts != uBounds) { 2034 // Convert uint64_t's to Attribute's. 2035 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2036 for (const auto &subscript : subscripts) 2037 subscriptAttrs.push_back(mlir::IntegerAttr::get(i64Ty, subscript)); 2038 lastOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2039 loc, ty, lastOp, insertVal, 2040 mlir::ArrayAttr::get(range.getContext(), subscriptAttrs)); 2041 2042 incrementSubscripts(dims, subscripts); 2043 } 2044 2045 // Convert uint64_t's to Attribute's. 2046 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2047 for (const auto &subscript : subscripts) 2048 subscriptAttrs.push_back( 2049 mlir::IntegerAttr::get(rewriter.getI64Type(), subscript)); 2050 mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs); 2051 2052 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2053 range, ty, lastOp, insertVal, 2054 mlir::ArrayAttr::get(range.getContext(), arrayRef)); 2055 2056 return mlir::success(); 2057 } 2058 }; 2059 } // namespace 2060 2061 namespace { 2062 /// XArrayCoor is the address arithmetic on a dynamically shaped, sliced, 2063 /// shifted etc. array. 2064 /// (See the static restriction on coordinate_of.) array_coor determines the 2065 /// coordinate (location) of a specific element. 2066 struct XArrayCoorOpConversion 2067 : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> { 2068 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2069 2070 mlir::LogicalResult 2071 doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor, 2072 mlir::ConversionPatternRewriter &rewriter) const override { 2073 auto loc = coor.getLoc(); 2074 mlir::ValueRange operands = adaptor.getOperands(); 2075 unsigned rank = coor.getRank(); 2076 assert(coor.indices().size() == rank); 2077 assert(coor.shape().empty() || coor.shape().size() == rank); 2078 assert(coor.shift().empty() || coor.shift().size() == rank); 2079 assert(coor.slice().empty() || coor.slice().size() == 3 * rank); 2080 mlir::Type idxTy = lowerTy().indexType(); 2081 unsigned indexOffset = coor.indicesOffset(); 2082 unsigned shapeOffset = coor.shapeOffset(); 2083 unsigned shiftOffset = coor.shiftOffset(); 2084 unsigned sliceOffset = coor.sliceOffset(); 2085 auto sliceOps = coor.slice().begin(); 2086 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2087 mlir::Value prevExt = one; 2088 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2089 mlir::Value offset = zero; 2090 const bool isShifted = !coor.shift().empty(); 2091 const bool isSliced = !coor.slice().empty(); 2092 const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>(); 2093 2094 // For each dimension of the array, generate the offset calculation. 2095 for (unsigned i = 0; i < rank; ++i, ++indexOffset, ++shapeOffset, 2096 ++shiftOffset, sliceOffset += 3, sliceOps += 3) { 2097 mlir::Value index = 2098 integerCast(loc, rewriter, idxTy, operands[indexOffset]); 2099 mlir::Value lb = 2100 isShifted ? integerCast(loc, rewriter, idxTy, operands[shiftOffset]) 2101 : one; 2102 mlir::Value step = one; 2103 bool normalSlice = isSliced; 2104 // Compute zero based index in dimension i of the element, applying 2105 // potential triplets and lower bounds. 2106 if (isSliced) { 2107 mlir::Value originalUb = *(sliceOps + 1); 2108 normalSlice = 2109 !mlir::isa_and_nonnull<fir::UndefOp>(originalUb.getDefiningOp()); 2110 if (normalSlice) 2111 step = integerCast(loc, rewriter, idxTy, operands[sliceOffset + 2]); 2112 } 2113 auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb); 2114 mlir::Value diff = 2115 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step); 2116 if (normalSlice) { 2117 mlir::Value sliceLb = 2118 integerCast(loc, rewriter, idxTy, operands[sliceOffset]); 2119 auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb); 2120 diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj); 2121 } 2122 // Update the offset given the stride and the zero based index `diff` 2123 // that was just computed. 2124 if (baseIsBoxed) { 2125 // Use stride in bytes from the descriptor. 2126 mlir::Value stride = 2127 loadStrideFromBox(loc, adaptor.getOperands()[0], i, rewriter); 2128 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride); 2129 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2130 } else { 2131 // Use stride computed at last iteration. 2132 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt); 2133 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2134 // Compute next stride assuming contiguity of the base array 2135 // (in element number). 2136 auto nextExt = integerCast(loc, rewriter, idxTy, operands[shapeOffset]); 2137 prevExt = 2138 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt); 2139 } 2140 } 2141 2142 // Add computed offset to the base address. 2143 if (baseIsBoxed) { 2144 // Working with byte offsets. The base address is read from the fir.box. 2145 // and need to be casted to i8* to do the pointer arithmetic. 2146 mlir::Type baseTy = 2147 getBaseAddrTypeFromBox(adaptor.getOperands()[0].getType()); 2148 mlir::Value base = 2149 loadBaseAddrFromBox(loc, baseTy, adaptor.getOperands()[0], rewriter); 2150 mlir::Type voidPtrTy = getVoidPtrType(); 2151 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2152 llvm::SmallVector<mlir::Value> args{offset}; 2153 auto addr = 2154 rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args); 2155 if (coor.subcomponent().empty()) { 2156 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, baseTy, addr); 2157 return mlir::success(); 2158 } 2159 auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr); 2160 args.clear(); 2161 args.push_back(zero); 2162 if (!coor.lenParams().empty()) { 2163 // If type parameters are present, then we don't want to use a GEPOp 2164 // as below, as the LLVM struct type cannot be statically defined. 2165 TODO(loc, "derived type with type parameters"); 2166 } 2167 // TODO: array offset subcomponents must be converted to LLVM's 2168 // row-major layout here. 2169 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2170 args.push_back(operands[i]); 2171 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, baseTy, casted, 2172 args); 2173 return mlir::success(); 2174 } 2175 2176 // The array was not boxed, so it must be contiguous. offset is therefore an 2177 // element offset and the base type is kept in the GEP unless the element 2178 // type size is itself dynamic. 2179 mlir::Value base; 2180 if (coor.subcomponent().empty()) { 2181 // No subcomponent. 2182 if (!coor.lenParams().empty()) { 2183 // Type parameters. Adjust element size explicitly. 2184 auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType()); 2185 assert(eleTy && "result must be a reference-like type"); 2186 if (fir::characterWithDynamicLen(eleTy)) { 2187 assert(coor.lenParams().size() == 1); 2188 auto length = integerCast(loc, rewriter, idxTy, 2189 operands[coor.lenParamsOffset()]); 2190 offset = 2191 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, length); 2192 2193 } else { 2194 TODO(loc, "compute size of derived type with type parameters"); 2195 } 2196 } 2197 // Cast the base address to a pointer to T. 2198 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty, 2199 adaptor.getOperands()[0]); 2200 } else { 2201 // Operand #0 must have a pointer type. For subcomponent slicing, we 2202 // want to cast away the array type and have a plain struct type. 2203 mlir::Type ty0 = adaptor.getOperands()[0].getType(); 2204 auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 2205 assert(ptrTy && "expected pointer type"); 2206 mlir::Type eleTy = ptrTy.getElementType(); 2207 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2208 eleTy = arrTy.getElementType(); 2209 auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy); 2210 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy, 2211 adaptor.getOperands()[0]); 2212 } 2213 llvm::SmallVector<mlir::Value> args = {offset}; 2214 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2215 args.push_back(operands[i]); 2216 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args); 2217 return mlir::success(); 2218 } 2219 }; 2220 } // namespace 2221 2222 /// Convert to (memory) reference to a reference to a subobject. 2223 /// The coordinate_of op is a Swiss army knife operation that can be used on 2224 /// (memory) references to records, arrays, complex, etc. as well as boxes. 2225 /// With unboxed arrays, there is the restriction that the array have a static 2226 /// shape in all but the last column. 2227 struct CoordinateOpConversion 2228 : public FIROpAndTypeConversion<fir::CoordinateOp> { 2229 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2230 2231 mlir::LogicalResult 2232 doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor, 2233 mlir::ConversionPatternRewriter &rewriter) const override { 2234 mlir::ValueRange operands = adaptor.getOperands(); 2235 2236 mlir::Location loc = coor.getLoc(); 2237 mlir::Value base = operands[0]; 2238 mlir::Type baseObjectTy = coor.getBaseType(); 2239 mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2240 assert(objectTy && "fir.coordinate_of expects a reference type"); 2241 2242 // Complex type - basically, extract the real or imaginary part 2243 if (fir::isa_complex(objectTy)) { 2244 mlir::LLVM::ConstantOp c0 = 2245 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2246 llvm::SmallVector<mlir::Value> offs = {c0, operands[1]}; 2247 mlir::Value gep = genGEP(loc, ty, rewriter, base, offs); 2248 rewriter.replaceOp(coor, gep); 2249 return mlir::success(); 2250 } 2251 2252 // Boxed type - get the base pointer from the box 2253 if (baseObjectTy.dyn_cast<fir::BoxType>()) 2254 return doRewriteBox(coor, ty, operands, loc, rewriter); 2255 2256 // Reference, pointer or a heap type 2257 if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType, fir::HeapType>()) 2258 return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter); 2259 2260 return rewriter.notifyMatchFailure( 2261 coor, "fir.coordinate_of base operand has unsupported type"); 2262 } 2263 2264 static unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) { 2265 return fir::hasDynamicSize(ty) 2266 ? op.getDefiningOp() 2267 ->getAttrOfType<mlir::IntegerAttr>("field") 2268 .getInt() 2269 : getIntValue(op); 2270 } 2271 2272 static int64_t getIntValue(mlir::Value val) { 2273 assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value"); 2274 mlir::Operation *defop = val.getDefiningOp(); 2275 2276 if (auto constOp = mlir::dyn_cast<mlir::arith::ConstantIntOp>(defop)) 2277 return constOp.value(); 2278 if (auto llConstOp = mlir::dyn_cast<mlir::LLVM::ConstantOp>(defop)) 2279 if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>()) 2280 return attr.getValue().getSExtValue(); 2281 fir::emitFatalError(val.getLoc(), "must be a constant"); 2282 } 2283 2284 static bool hasSubDimensions(mlir::Type type) { 2285 return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>(); 2286 } 2287 2288 /// Check whether this form of `!fir.coordinate_of` is supported. These 2289 /// additional checks are required, because we are not yet able to convert 2290 /// all valid forms of `!fir.coordinate_of`. 2291 /// TODO: Either implement the unsupported cases or extend the verifier 2292 /// in FIROps.cpp instead. 2293 static bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) { 2294 const std::size_t numOfCoors = coors.size(); 2295 std::size_t i = 0; 2296 bool subEle = false; 2297 bool ptrEle = false; 2298 for (; i < numOfCoors; ++i) { 2299 mlir::Value nxtOpnd = coors[i]; 2300 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2301 subEle = true; 2302 i += arrTy.getDimension() - 1; 2303 type = arrTy.getEleTy(); 2304 } else if (auto recTy = type.dyn_cast<fir::RecordType>()) { 2305 subEle = true; 2306 type = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2307 } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) { 2308 subEle = true; 2309 type = tupTy.getType(getIntValue(nxtOpnd)); 2310 } else { 2311 ptrEle = true; 2312 } 2313 } 2314 if (ptrEle) 2315 return (!subEle) && (numOfCoors == 1); 2316 return subEle && (i >= numOfCoors); 2317 } 2318 2319 /// Walk the abstract memory layout and determine if the path traverses any 2320 /// array types with unknown shape. Return true iff all the array types have a 2321 /// constant shape along the path. 2322 static bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) { 2323 for (std::size_t i = 0, sz = coors.size(); i < sz; ++i) { 2324 mlir::Value nxtOpnd = coors[i]; 2325 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2326 if (fir::sequenceWithNonConstantShape(arrTy)) 2327 return false; 2328 i += arrTy.getDimension() - 1; 2329 type = arrTy.getEleTy(); 2330 } else if (auto strTy = type.dyn_cast<fir::RecordType>()) { 2331 type = strTy.getType(getFieldNumber(strTy, nxtOpnd)); 2332 } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) { 2333 type = strTy.getType(getIntValue(nxtOpnd)); 2334 } else { 2335 return true; 2336 } 2337 } 2338 return true; 2339 } 2340 2341 private: 2342 mlir::LogicalResult 2343 doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands, 2344 mlir::Location loc, 2345 mlir::ConversionPatternRewriter &rewriter) const { 2346 mlir::Type boxObjTy = coor.getBaseType(); 2347 assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`"); 2348 2349 mlir::Value boxBaseAddr = operands[0]; 2350 2351 // 1. SPECIAL CASE (uses `fir.len_param_index`): 2352 // %box = ... : !fir.box<!fir.type<derived{len1:i32}>> 2353 // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}> 2354 // %addr = coordinate_of %box, %lenp 2355 if (coor.getNumOperands() == 2) { 2356 mlir::Operation *coordinateDef = 2357 (*coor.getCoor().begin()).getDefiningOp(); 2358 if (mlir::isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef)) 2359 TODO(loc, 2360 "fir.coordinate_of - fir.len_param_index is not supported yet"); 2361 } 2362 2363 // 2. GENERAL CASE: 2364 // 2.1. (`fir.array`) 2365 // %box = ... : !fix.box<!fir.array<?xU>> 2366 // %idx = ... : index 2367 // %resultAddr = coordinate_of %box, %idx : !fir.ref<U> 2368 // 2.2 (`fir.derived`) 2369 // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>> 2370 // %idx = ... : i32 2371 // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32> 2372 // 2.3 (`fir.derived` inside `fir.array`) 2373 // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32, 2374 // field_2:f32}>>> %idx1 = ... : index %idx2 = ... : i32 %resultAddr = 2375 // coordinate_of %box, %idx1, %idx2 : !fir.ref<f32> 2376 // 2.4. TODO: Either document or disable any other case that the following 2377 // implementation might convert. 2378 mlir::LLVM::ConstantOp c0 = 2379 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2380 mlir::Value resultAddr = 2381 loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()), 2382 boxBaseAddr, rewriter); 2383 // Component Type 2384 auto cpnTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy); 2385 mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext()); 2386 2387 for (unsigned i = 1, last = operands.size(); i < last; ++i) { 2388 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2389 if (i != 1) 2390 TODO(loc, "fir.array nested inside other array and/or derived type"); 2391 // Applies byte strides from the box. Ignore lower bound from box 2392 // since fir.coordinate_of indexes are zero based. Lowering takes care 2393 // of lower bound aspects. This both accounts for dynamically sized 2394 // types and non contiguous arrays. 2395 auto idxTy = lowerTy().indexType(); 2396 mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0); 2397 for (unsigned index = i, lastIndex = i + arrTy.getDimension(); 2398 index < lastIndex; ++index) { 2399 mlir::Value stride = 2400 loadStrideFromBox(loc, operands[0], index - i, rewriter); 2401 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, 2402 operands[index], stride); 2403 off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off); 2404 } 2405 auto voidPtrBase = 2406 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr); 2407 llvm::SmallVector<mlir::Value> args = {off}; 2408 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, 2409 voidPtrBase, args); 2410 i += arrTy.getDimension() - 1; 2411 cpnTy = arrTy.getEleTy(); 2412 } else if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) { 2413 auto recRefTy = 2414 mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy)); 2415 mlir::Value nxtOpnd = operands[i]; 2416 auto memObj = 2417 rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr); 2418 llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd}; 2419 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2420 auto llvmCurrentObjTy = lowerTy().convertType(cpnTy); 2421 auto gep = rewriter.create<mlir::LLVM::GEPOp>( 2422 loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj, 2423 args); 2424 resultAddr = 2425 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep); 2426 } else { 2427 fir::emitFatalError(loc, "unexpected type in coordinate_of"); 2428 } 2429 } 2430 2431 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr); 2432 return mlir::success(); 2433 } 2434 2435 mlir::LogicalResult 2436 doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty, 2437 mlir::ValueRange operands, mlir::Location loc, 2438 mlir::ConversionPatternRewriter &rewriter) const { 2439 mlir::Type baseObjectTy = coor.getBaseType(); 2440 2441 // Component Type 2442 mlir::Type cpnTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2443 bool hasSubdimension = hasSubDimensions(cpnTy); 2444 bool columnIsDeferred = !hasSubdimension; 2445 2446 if (!supportedCoordinate(cpnTy, operands.drop_front(1))) 2447 TODO(loc, "unsupported combination of coordinate operands"); 2448 2449 const bool hasKnownShape = 2450 arraysHaveKnownShape(cpnTy, operands.drop_front(1)); 2451 2452 // If only the column is `?`, then we can simply place the column value in 2453 // the 0-th GEP position. 2454 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2455 if (!hasKnownShape) { 2456 const unsigned sz = arrTy.getDimension(); 2457 if (arraysHaveKnownShape(arrTy.getEleTy(), 2458 operands.drop_front(1 + sz))) { 2459 fir::SequenceType::ShapeRef shape = arrTy.getShape(); 2460 bool allConst = true; 2461 for (unsigned i = 0; i < sz - 1; ++i) { 2462 if (shape[i] < 0) { 2463 allConst = false; 2464 break; 2465 } 2466 } 2467 if (allConst) 2468 columnIsDeferred = true; 2469 } 2470 } 2471 } 2472 2473 if (fir::hasDynamicSize(fir::unwrapSequenceType(cpnTy))) 2474 return mlir::emitError( 2475 loc, "fir.coordinate_of with a dynamic element size is unsupported"); 2476 2477 if (hasKnownShape || columnIsDeferred) { 2478 llvm::SmallVector<mlir::Value> offs; 2479 if (hasKnownShape && hasSubdimension) { 2480 mlir::LLVM::ConstantOp c0 = 2481 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2482 offs.push_back(c0); 2483 } 2484 llvm::Optional<int> dims; 2485 llvm::SmallVector<mlir::Value> arrIdx; 2486 for (std::size_t i = 1, sz = operands.size(); i < sz; ++i) { 2487 mlir::Value nxtOpnd = operands[i]; 2488 2489 if (!cpnTy) 2490 return mlir::emitError(loc, "invalid coordinate/check failed"); 2491 2492 // check if the i-th coordinate relates to an array 2493 if (dims) { 2494 arrIdx.push_back(nxtOpnd); 2495 int dimsLeft = *dims; 2496 if (dimsLeft > 1) { 2497 dims = dimsLeft - 1; 2498 continue; 2499 } 2500 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2501 // append array range in reverse (FIR arrays are column-major) 2502 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2503 arrIdx.clear(); 2504 dims.reset(); 2505 continue; 2506 } 2507 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2508 int d = arrTy.getDimension() - 1; 2509 if (d > 0) { 2510 dims = d; 2511 arrIdx.push_back(nxtOpnd); 2512 continue; 2513 } 2514 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2515 offs.push_back(nxtOpnd); 2516 continue; 2517 } 2518 2519 // check if the i-th coordinate relates to a field 2520 if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) 2521 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2522 else if (auto tupTy = cpnTy.dyn_cast<mlir::TupleType>()) 2523 cpnTy = tupTy.getType(getIntValue(nxtOpnd)); 2524 else 2525 cpnTy = nullptr; 2526 2527 offs.push_back(nxtOpnd); 2528 } 2529 if (dims) 2530 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2531 mlir::Value base = operands[0]; 2532 mlir::Value retval = genGEP(loc, ty, rewriter, base, offs); 2533 rewriter.replaceOp(coor, retval); 2534 return mlir::success(); 2535 } 2536 2537 return mlir::emitError( 2538 loc, "fir.coordinate_of base operand has unsupported type"); 2539 } 2540 }; 2541 2542 /// Convert `fir.field_index`. The conversion depends on whether the size of 2543 /// the record is static or dynamic. 2544 struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> { 2545 using FIROpConversion::FIROpConversion; 2546 2547 // NB: most field references should be resolved by this point 2548 mlir::LogicalResult 2549 matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor, 2550 mlir::ConversionPatternRewriter &rewriter) const override { 2551 auto recTy = field.getOnType().cast<fir::RecordType>(); 2552 unsigned index = recTy.getFieldIndex(field.getFieldId()); 2553 2554 if (!fir::hasDynamicSize(recTy)) { 2555 // Derived type has compile-time constant layout. Return index of the 2556 // component type in the parent type (to be used in GEP). 2557 rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset( 2558 field.getLoc(), rewriter, index)}); 2559 return mlir::success(); 2560 } 2561 2562 // Derived type has compile-time constant layout. Call the compiler 2563 // generated function to determine the byte offset of the field at runtime. 2564 // This returns a non-constant. 2565 mlir::FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get( 2566 field.getContext(), getOffsetMethodName(recTy, field.getFieldId())); 2567 mlir::NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr); 2568 mlir::NamedAttribute fieldAttr = rewriter.getNamedAttr( 2569 "field", mlir::IntegerAttr::get(lowerTy().indexType(), index)); 2570 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 2571 field, lowerTy().offsetType(), adaptor.getOperands(), 2572 llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr}); 2573 return mlir::success(); 2574 } 2575 2576 // Re-Construct the name of the compiler generated method that calculates the 2577 // offset 2578 inline static std::string getOffsetMethodName(fir::RecordType recTy, 2579 llvm::StringRef field) { 2580 return recTy.getName().str() + "P." + field.str() + ".offset"; 2581 } 2582 }; 2583 2584 /// Convert `fir.end` 2585 struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> { 2586 using FIROpConversion::FIROpConversion; 2587 2588 mlir::LogicalResult 2589 matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor, 2590 mlir::ConversionPatternRewriter &rewriter) const override { 2591 TODO(firEnd.getLoc(), "fir.end codegen"); 2592 return mlir::failure(); 2593 } 2594 }; 2595 2596 /// Lower `fir.gentypedesc` to a global constant. 2597 struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> { 2598 using FIROpConversion::FIROpConversion; 2599 2600 mlir::LogicalResult 2601 matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor, 2602 mlir::ConversionPatternRewriter &rewriter) const override { 2603 TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen"); 2604 return mlir::failure(); 2605 } 2606 }; 2607 2608 /// Lower `fir.has_value` operation to `llvm.return` operation. 2609 struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> { 2610 using FIROpConversion::FIROpConversion; 2611 2612 mlir::LogicalResult 2613 matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor, 2614 mlir::ConversionPatternRewriter &rewriter) const override { 2615 rewriter.replaceOpWithNewOp<mlir::LLVM::ReturnOp>(op, 2616 adaptor.getOperands()); 2617 return mlir::success(); 2618 } 2619 }; 2620 2621 /// Lower `fir.global` operation to `llvm.global` operation. 2622 /// `fir.insert_on_range` operations are replaced with constant dense attribute 2623 /// if they are applied on the full range. 2624 struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> { 2625 using FIROpConversion::FIROpConversion; 2626 2627 mlir::LogicalResult 2628 matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor, 2629 mlir::ConversionPatternRewriter &rewriter) const override { 2630 auto tyAttr = convertType(global.getType()); 2631 if (global.getType().isa<fir::BoxType>()) 2632 tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType(); 2633 auto loc = global.getLoc(); 2634 mlir::Attribute initAttr; 2635 if (global.getInitVal()) 2636 initAttr = global.getInitVal().getValue(); 2637 auto linkage = convertLinkage(global.getLinkName()); 2638 auto isConst = global.getConstant().hasValue(); 2639 auto g = rewriter.create<mlir::LLVM::GlobalOp>( 2640 loc, tyAttr, isConst, linkage, global.getSymName(), initAttr); 2641 auto &gr = g.getInitializerRegion(); 2642 rewriter.inlineRegionBefore(global.getRegion(), gr, gr.end()); 2643 if (!gr.empty()) { 2644 // Replace insert_on_range with a constant dense attribute if the 2645 // initialization is on the full range. 2646 auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>(); 2647 for (auto insertOp : insertOnRangeOps) { 2648 if (isFullRange(insertOp.getCoor(), insertOp.getType())) { 2649 auto seqTyAttr = convertType(insertOp.getType()); 2650 auto *op = insertOp.getVal().getDefiningOp(); 2651 auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op); 2652 if (!constant) { 2653 auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op); 2654 if (!convertOp) 2655 continue; 2656 constant = mlir::cast<mlir::arith::ConstantOp>( 2657 convertOp.getValue().getDefiningOp()); 2658 } 2659 mlir::Type vecType = mlir::VectorType::get( 2660 insertOp.getType().getShape(), constant.getType()); 2661 auto denseAttr = mlir::DenseElementsAttr::get( 2662 vecType.cast<mlir::ShapedType>(), constant.getValue()); 2663 rewriter.setInsertionPointAfter(insertOp); 2664 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>( 2665 insertOp, seqTyAttr, denseAttr); 2666 } 2667 } 2668 } 2669 rewriter.eraseOp(global); 2670 return mlir::success(); 2671 } 2672 2673 bool isFullRange(mlir::DenseIntElementsAttr indexes, 2674 fir::SequenceType seqTy) const { 2675 auto extents = seqTy.getShape(); 2676 if (indexes.size() / 2 != static_cast<int64_t>(extents.size())) 2677 return false; 2678 auto cur_index = indexes.value_begin<int64_t>(); 2679 for (unsigned i = 0; i < indexes.size(); i += 2) { 2680 if (*(cur_index++) != 0) 2681 return false; 2682 if (*(cur_index++) != extents[i / 2] - 1) 2683 return false; 2684 } 2685 return true; 2686 } 2687 2688 // TODO: String comparaison should be avoided. Replace linkName with an 2689 // enumeration. 2690 mlir::LLVM::Linkage 2691 convertLinkage(llvm::Optional<llvm::StringRef> optLinkage) const { 2692 if (optLinkage.hasValue()) { 2693 auto name = optLinkage.getValue(); 2694 if (name == "internal") 2695 return mlir::LLVM::Linkage::Internal; 2696 if (name == "linkonce") 2697 return mlir::LLVM::Linkage::Linkonce; 2698 if (name == "linkonce_odr") 2699 return mlir::LLVM::Linkage::LinkonceODR; 2700 if (name == "common") 2701 return mlir::LLVM::Linkage::Common; 2702 if (name == "weak") 2703 return mlir::LLVM::Linkage::Weak; 2704 } 2705 return mlir::LLVM::Linkage::External; 2706 } 2707 }; 2708 2709 /// `fir.load` --> `llvm.load` 2710 struct LoadOpConversion : public FIROpConversion<fir::LoadOp> { 2711 using FIROpConversion::FIROpConversion; 2712 2713 mlir::LogicalResult 2714 matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor, 2715 mlir::ConversionPatternRewriter &rewriter) const override { 2716 // fir.box is a special case because it is considered as an ssa values in 2717 // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box> 2718 // and fir.box end up being the same llvm types and loading a 2719 // fir.ref<fir.box> is actually a no op in LLVM. 2720 if (load.getType().isa<fir::BoxType>()) { 2721 rewriter.replaceOp(load, adaptor.getOperands()[0]); 2722 } else { 2723 rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>( 2724 load, convertType(load.getType()), adaptor.getOperands(), 2725 load->getAttrs()); 2726 } 2727 return mlir::success(); 2728 } 2729 }; 2730 2731 /// Lower `fir.no_reassoc` to LLVM IR dialect. 2732 /// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast 2733 /// math flags? 2734 struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> { 2735 using FIROpConversion::FIROpConversion; 2736 2737 mlir::LogicalResult 2738 matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor, 2739 mlir::ConversionPatternRewriter &rewriter) const override { 2740 rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]); 2741 return mlir::success(); 2742 } 2743 }; 2744 2745 static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest, 2746 llvm::Optional<mlir::ValueRange> destOps, 2747 mlir::ConversionPatternRewriter &rewriter, 2748 mlir::Block *newBlock) { 2749 if (destOps.hasValue()) 2750 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, destOps.getValue(), 2751 newBlock, mlir::ValueRange()); 2752 else 2753 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock); 2754 } 2755 2756 template <typename A, typename B> 2757 static void genBrOp(A caseOp, mlir::Block *dest, llvm::Optional<B> destOps, 2758 mlir::ConversionPatternRewriter &rewriter) { 2759 if (destOps.hasValue()) 2760 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, destOps.getValue(), 2761 dest); 2762 else 2763 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest); 2764 } 2765 2766 static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp, 2767 mlir::Block *dest, 2768 llvm::Optional<mlir::ValueRange> destOps, 2769 mlir::ConversionPatternRewriter &rewriter) { 2770 auto *thisBlock = rewriter.getInsertionBlock(); 2771 auto *newBlock = createBlock(rewriter, dest); 2772 rewriter.setInsertionPointToEnd(thisBlock); 2773 genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock); 2774 rewriter.setInsertionPointToEnd(newBlock); 2775 } 2776 2777 /// Conversion of `fir.select_case` 2778 /// 2779 /// The `fir.select_case` operation is converted to a if-then-else ladder. 2780 /// Depending on the case condition type, one or several comparison and 2781 /// conditional branching can be generated. 2782 /// 2783 /// A a point value case such as `case(4)`, a lower bound case such as 2784 /// `case(5:)` or an upper bound case such as `case(:3)` are converted to a 2785 /// simple comparison between the selector value and the constant value in the 2786 /// case. The block associated with the case condition is then executed if 2787 /// the comparison succeed otherwise it branch to the next block with the 2788 /// comparison for the the next case conditon. 2789 /// 2790 /// A closed interval case condition such as `case(7:10)` is converted with a 2791 /// first comparison and conditional branching for the lower bound. If 2792 /// successful, it branch to a second block with the comparison for the 2793 /// upper bound in the same case condition. 2794 /// 2795 /// TODO: lowering of CHARACTER type cases is not handled yet. 2796 struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> { 2797 using FIROpConversion::FIROpConversion; 2798 2799 mlir::LogicalResult 2800 matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor, 2801 mlir::ConversionPatternRewriter &rewriter) const override { 2802 unsigned conds = caseOp.getNumConditions(); 2803 llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue(); 2804 // Type can be CHARACTER, INTEGER, or LOGICAL (C1145) 2805 auto ty = caseOp.getSelector().getType(); 2806 if (ty.isa<fir::CharacterType>()) { 2807 TODO(caseOp.getLoc(), "fir.select_case codegen with character type"); 2808 return mlir::failure(); 2809 } 2810 mlir::Value selector = caseOp.getSelector(adaptor.getOperands()); 2811 auto loc = caseOp.getLoc(); 2812 for (unsigned t = 0; t != conds; ++t) { 2813 mlir::Block *dest = caseOp.getSuccessor(t); 2814 llvm::Optional<mlir::ValueRange> destOps = 2815 caseOp.getSuccessorOperands(adaptor.getOperands(), t); 2816 llvm::Optional<mlir::ValueRange> cmpOps = 2817 *caseOp.getCompareOperands(adaptor.getOperands(), t); 2818 mlir::Value caseArg = *(cmpOps.value().begin()); 2819 mlir::Attribute attr = cases[t]; 2820 if (attr.isa<fir::PointIntervalAttr>()) { 2821 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2822 loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg); 2823 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2824 continue; 2825 } 2826 if (attr.isa<fir::LowerBoundAttr>()) { 2827 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2828 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2829 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2830 continue; 2831 } 2832 if (attr.isa<fir::UpperBoundAttr>()) { 2833 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2834 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg); 2835 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2836 continue; 2837 } 2838 if (attr.isa<fir::ClosedIntervalAttr>()) { 2839 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2840 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2841 auto *thisBlock = rewriter.getInsertionBlock(); 2842 auto *newBlock1 = createBlock(rewriter, dest); 2843 auto *newBlock2 = createBlock(rewriter, dest); 2844 rewriter.setInsertionPointToEnd(thisBlock); 2845 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2); 2846 rewriter.setInsertionPointToEnd(newBlock1); 2847 mlir::Value caseArg0 = *(cmpOps.value().begin() + 1); 2848 auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>( 2849 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0); 2850 genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2); 2851 rewriter.setInsertionPointToEnd(newBlock2); 2852 continue; 2853 } 2854 assert(attr.isa<mlir::UnitAttr>()); 2855 assert((t + 1 == conds) && "unit must be last"); 2856 genBrOp(caseOp, dest, destOps, rewriter); 2857 } 2858 return mlir::success(); 2859 } 2860 }; 2861 2862 template <typename OP> 2863 static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select, 2864 typename OP::Adaptor adaptor, 2865 mlir::ConversionPatternRewriter &rewriter) { 2866 unsigned conds = select.getNumConditions(); 2867 auto cases = select.getCases().getValue(); 2868 mlir::Value selector = adaptor.getSelector(); 2869 auto loc = select.getLoc(); 2870 assert(conds > 0 && "select must have cases"); 2871 2872 llvm::SmallVector<mlir::Block *> destinations; 2873 llvm::SmallVector<mlir::ValueRange> destinationsOperands; 2874 mlir::Block *defaultDestination; 2875 mlir::ValueRange defaultOperands; 2876 llvm::SmallVector<int32_t> caseValues; 2877 2878 for (unsigned t = 0; t != conds; ++t) { 2879 mlir::Block *dest = select.getSuccessor(t); 2880 auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t); 2881 const mlir::Attribute &attr = cases[t]; 2882 if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) { 2883 destinations.push_back(dest); 2884 destinationsOperands.push_back(destOps.hasValue() ? *destOps 2885 : mlir::ValueRange{}); 2886 caseValues.push_back(intAttr.getInt()); 2887 continue; 2888 } 2889 assert(attr.template dyn_cast_or_null<mlir::UnitAttr>()); 2890 assert((t + 1 == conds) && "unit must be last"); 2891 defaultDestination = dest; 2892 defaultOperands = destOps.hasValue() ? *destOps : mlir::ValueRange{}; 2893 } 2894 2895 // LLVM::SwitchOp takes a i32 type for the selector. 2896 if (select.getSelector().getType() != rewriter.getI32Type()) 2897 selector = rewriter.create<mlir::LLVM::TruncOp>(loc, rewriter.getI32Type(), 2898 selector); 2899 2900 rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>( 2901 select, selector, 2902 /*defaultDestination=*/defaultDestination, 2903 /*defaultOperands=*/defaultOperands, 2904 /*caseValues=*/caseValues, 2905 /*caseDestinations=*/destinations, 2906 /*caseOperands=*/destinationsOperands, 2907 /*branchWeights=*/llvm::ArrayRef<std::int32_t>()); 2908 } 2909 2910 /// conversion of fir::SelectOp to an if-then-else ladder 2911 struct SelectOpConversion : public FIROpConversion<fir::SelectOp> { 2912 using FIROpConversion::FIROpConversion; 2913 2914 mlir::LogicalResult 2915 matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor, 2916 mlir::ConversionPatternRewriter &rewriter) const override { 2917 selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter); 2918 return mlir::success(); 2919 } 2920 }; 2921 2922 /// conversion of fir::SelectRankOp to an if-then-else ladder 2923 struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> { 2924 using FIROpConversion::FIROpConversion; 2925 2926 mlir::LogicalResult 2927 matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor, 2928 mlir::ConversionPatternRewriter &rewriter) const override { 2929 selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter); 2930 return mlir::success(); 2931 } 2932 }; 2933 2934 /// Lower `fir.select_type` to LLVM IR dialect. 2935 struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> { 2936 using FIROpConversion::FIROpConversion; 2937 2938 mlir::LogicalResult 2939 matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor, 2940 mlir::ConversionPatternRewriter &rewriter) const override { 2941 mlir::emitError(select.getLoc(), 2942 "fir.select_type should have already been converted"); 2943 return mlir::failure(); 2944 } 2945 }; 2946 2947 /// `fir.store` --> `llvm.store` 2948 struct StoreOpConversion : public FIROpConversion<fir::StoreOp> { 2949 using FIROpConversion::FIROpConversion; 2950 2951 mlir::LogicalResult 2952 matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor, 2953 mlir::ConversionPatternRewriter &rewriter) const override { 2954 if (store.getValue().getType().isa<fir::BoxType>()) { 2955 // fir.box value is actually in memory, load it first before storing it. 2956 mlir::Location loc = store.getLoc(); 2957 mlir::Type boxPtrTy = adaptor.getOperands()[0].getType(); 2958 auto val = rewriter.create<mlir::LLVM::LoadOp>( 2959 loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(), 2960 adaptor.getOperands()[0]); 2961 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2962 store, val, adaptor.getOperands()[1]); 2963 } else { 2964 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2965 store, adaptor.getOperands()[0], adaptor.getOperands()[1]); 2966 } 2967 return mlir::success(); 2968 } 2969 }; 2970 2971 namespace { 2972 2973 /// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for 2974 /// the character buffer and one for the buffer length. 2975 struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> { 2976 using FIROpConversion::FIROpConversion; 2977 2978 mlir::LogicalResult 2979 matchAndRewrite(fir::UnboxCharOp unboxchar, OpAdaptor adaptor, 2980 mlir::ConversionPatternRewriter &rewriter) const override { 2981 auto *ctx = unboxchar.getContext(); 2982 2983 mlir::Type lenTy = convertType(unboxchar.getType(1)); 2984 mlir::Value tuple = adaptor.getOperands()[0]; 2985 mlir::Type tupleTy = tuple.getType(); 2986 2987 mlir::Location loc = unboxchar.getLoc(); 2988 mlir::Value ptrToBuffer = 2989 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 0); 2990 2991 mlir::LLVM::ExtractValueOp len = 2992 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 1); 2993 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, len); 2994 2995 rewriter.replaceOp(unboxchar, 2996 llvm::ArrayRef<mlir::Value>{ptrToBuffer, lenAfterCast}); 2997 return mlir::success(); 2998 } 2999 }; 3000 3001 /// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its 3002 /// components. 3003 /// TODO: Part of supporting Fortran 2003 procedure pointers. 3004 struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> { 3005 using FIROpConversion::FIROpConversion; 3006 3007 mlir::LogicalResult 3008 matchAndRewrite(fir::UnboxProcOp unboxproc, OpAdaptor adaptor, 3009 mlir::ConversionPatternRewriter &rewriter) const override { 3010 TODO(unboxproc.getLoc(), "fir.unboxproc codegen"); 3011 return mlir::failure(); 3012 } 3013 }; 3014 3015 /// convert to LLVM IR dialect `undef` 3016 struct UndefOpConversion : public FIROpConversion<fir::UndefOp> { 3017 using FIROpConversion::FIROpConversion; 3018 3019 mlir::LogicalResult 3020 matchAndRewrite(fir::UndefOp undef, OpAdaptor, 3021 mlir::ConversionPatternRewriter &rewriter) const override { 3022 rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>( 3023 undef, convertType(undef.getType())); 3024 return mlir::success(); 3025 } 3026 }; 3027 3028 struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> { 3029 using FIROpConversion::FIROpConversion; 3030 3031 mlir::LogicalResult 3032 matchAndRewrite(fir::ZeroOp zero, OpAdaptor, 3033 mlir::ConversionPatternRewriter &rewriter) const override { 3034 mlir::Type ty = convertType(zero.getType()); 3035 if (ty.isa<mlir::LLVM::LLVMPointerType>()) { 3036 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty); 3037 } else if (ty.isa<mlir::IntegerType>()) { 3038 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3039 zero, ty, mlir::IntegerAttr::get(zero.getType(), 0)); 3040 } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) { 3041 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3042 zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0)); 3043 } else { 3044 // TODO: create ConstantAggregateZero for FIR aggregate/array types. 3045 return rewriter.notifyMatchFailure( 3046 zero, 3047 "conversion of fir.zero with aggregate type not implemented yet"); 3048 } 3049 return mlir::success(); 3050 } 3051 }; 3052 3053 /// `fir.unreachable` --> `llvm.unreachable` 3054 struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> { 3055 using FIROpConversion::FIROpConversion; 3056 3057 mlir::LogicalResult 3058 matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor, 3059 mlir::ConversionPatternRewriter &rewriter) const override { 3060 rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach); 3061 return mlir::success(); 3062 } 3063 }; 3064 3065 /// `fir.is_present` --> 3066 /// ``` 3067 /// %0 = llvm.mlir.constant(0 : i64) 3068 /// %1 = llvm.ptrtoint %0 3069 /// %2 = llvm.icmp "ne" %1, %0 : i64 3070 /// ``` 3071 struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> { 3072 using FIROpConversion::FIROpConversion; 3073 3074 mlir::LogicalResult 3075 matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor, 3076 mlir::ConversionPatternRewriter &rewriter) const override { 3077 mlir::Type idxTy = lowerTy().indexType(); 3078 mlir::Location loc = isPresent.getLoc(); 3079 auto ptr = adaptor.getOperands()[0]; 3080 3081 if (isPresent.getVal().getType().isa<fir::BoxCharType>()) { 3082 auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>(); 3083 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3084 3085 mlir::Type ty = structTy.getBody()[0]; 3086 mlir::MLIRContext *ctx = isPresent.getContext(); 3087 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3088 ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0); 3089 } 3090 mlir::LLVM::ConstantOp c0 = 3091 genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0); 3092 auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr); 3093 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 3094 isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0); 3095 3096 return mlir::success(); 3097 } 3098 }; 3099 3100 /// Create value signaling an absent optional argument in a call, e.g. 3101 /// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>` 3102 struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> { 3103 using FIROpConversion::FIROpConversion; 3104 3105 mlir::LogicalResult 3106 matchAndRewrite(fir::AbsentOp absent, OpAdaptor, 3107 mlir::ConversionPatternRewriter &rewriter) const override { 3108 mlir::Type ty = convertType(absent.getType()); 3109 mlir::Location loc = absent.getLoc(); 3110 3111 if (absent.getType().isa<fir::BoxCharType>()) { 3112 auto structTy = ty.cast<mlir::LLVM::LLVMStructType>(); 3113 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3114 auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3115 auto nullField = 3116 rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]); 3117 mlir::MLIRContext *ctx = absent.getContext(); 3118 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3119 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 3120 absent, ty, undefStruct, nullField, c0); 3121 } else { 3122 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty); 3123 } 3124 return mlir::success(); 3125 } 3126 }; 3127 3128 // 3129 // Primitive operations on Complex types 3130 // 3131 3132 /// Generate inline code for complex addition/subtraction 3133 template <typename LLVMOP, typename OPTY> 3134 static mlir::LLVM::InsertValueOp 3135 complexSum(OPTY sumop, mlir::ValueRange opnds, 3136 mlir::ConversionPatternRewriter &rewriter, 3137 fir::LLVMTypeConverter &lowering) { 3138 mlir::Value a = opnds[0]; 3139 mlir::Value b = opnds[1]; 3140 auto loc = sumop.getLoc(); 3141 auto ctx = sumop.getContext(); 3142 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3143 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3144 mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType())); 3145 mlir::Type ty = lowering.convertType(sumop.getType()); 3146 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3147 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3148 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3149 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3150 auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1); 3151 auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1); 3152 auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3153 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0); 3154 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1); 3155 } 3156 } // namespace 3157 3158 namespace { 3159 struct AddcOpConversion : public FIROpConversion<fir::AddcOp> { 3160 using FIROpConversion::FIROpConversion; 3161 3162 mlir::LogicalResult 3163 matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor, 3164 mlir::ConversionPatternRewriter &rewriter) const override { 3165 // given: (x + iy) + (x' + iy') 3166 // result: (x + x') + i(y + y') 3167 auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(), 3168 rewriter, lowerTy()); 3169 rewriter.replaceOp(addc, r.getResult()); 3170 return mlir::success(); 3171 } 3172 }; 3173 3174 struct SubcOpConversion : public FIROpConversion<fir::SubcOp> { 3175 using FIROpConversion::FIROpConversion; 3176 3177 mlir::LogicalResult 3178 matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor, 3179 mlir::ConversionPatternRewriter &rewriter) const override { 3180 // given: (x + iy) - (x' + iy') 3181 // result: (x - x') + i(y - y') 3182 auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(), 3183 rewriter, lowerTy()); 3184 rewriter.replaceOp(subc, r.getResult()); 3185 return mlir::success(); 3186 } 3187 }; 3188 3189 /// Inlined complex multiply 3190 struct MulcOpConversion : public FIROpConversion<fir::MulcOp> { 3191 using FIROpConversion::FIROpConversion; 3192 3193 mlir::LogicalResult 3194 matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor, 3195 mlir::ConversionPatternRewriter &rewriter) const override { 3196 // TODO: Can we use a call to __muldc3 ? 3197 // given: (x + iy) * (x' + iy') 3198 // result: (xx'-yy')+i(xy'+yx') 3199 mlir::Value a = adaptor.getOperands()[0]; 3200 mlir::Value b = adaptor.getOperands()[1]; 3201 auto loc = mulc.getLoc(); 3202 auto *ctx = mulc.getContext(); 3203 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3204 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3205 mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType())); 3206 mlir::Type ty = convertType(mulc.getType()); 3207 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3208 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3209 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3210 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3211 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3212 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3213 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3214 auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx); 3215 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3216 auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy); 3217 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3218 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3219 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3220 rewriter.replaceOp(mulc, r0.getResult()); 3221 return mlir::success(); 3222 } 3223 }; 3224 3225 /// Inlined complex division 3226 struct DivcOpConversion : public FIROpConversion<fir::DivcOp> { 3227 using FIROpConversion::FIROpConversion; 3228 3229 mlir::LogicalResult 3230 matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor, 3231 mlir::ConversionPatternRewriter &rewriter) const override { 3232 // TODO: Can we use a call to __divdc3 instead? 3233 // Just generate inline code for now. 3234 // given: (x + iy) / (x' + iy') 3235 // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y' 3236 mlir::Value a = adaptor.getOperands()[0]; 3237 mlir::Value b = adaptor.getOperands()[1]; 3238 auto loc = divc.getLoc(); 3239 auto *ctx = divc.getContext(); 3240 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3241 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3242 mlir::Type eleTy = convertType(getComplexEleTy(divc.getType())); 3243 mlir::Type ty = convertType(divc.getType()); 3244 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3245 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3246 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3247 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3248 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3249 auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1); 3250 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3251 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3252 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3253 auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1); 3254 auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1); 3255 auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy); 3256 auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy); 3257 auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d); 3258 auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d); 3259 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3260 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3261 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3262 rewriter.replaceOp(divc, r0.getResult()); 3263 return mlir::success(); 3264 } 3265 }; 3266 3267 /// Inlined complex negation 3268 struct NegcOpConversion : public FIROpConversion<fir::NegcOp> { 3269 using FIROpConversion::FIROpConversion; 3270 3271 mlir::LogicalResult 3272 matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor, 3273 mlir::ConversionPatternRewriter &rewriter) const override { 3274 // given: -(x + iy) 3275 // result: -x - iy 3276 auto *ctxt = neg.getContext(); 3277 auto eleTy = convertType(getComplexEleTy(neg.getType())); 3278 auto ty = convertType(neg.getType()); 3279 auto loc = neg.getLoc(); 3280 mlir::Value o0 = adaptor.getOperands()[0]; 3281 auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 3282 auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 3283 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0); 3284 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1); 3285 auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp); 3286 auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip); 3287 auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0); 3288 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1); 3289 return mlir::success(); 3290 } 3291 }; 3292 3293 /// Conversion pattern for operation that must be dead. The information in these 3294 /// operations is used by other operation. At this point they should not have 3295 /// anymore uses. 3296 /// These operations are normally dead after the pre-codegen pass. 3297 template <typename FromOp> 3298 struct MustBeDeadConversion : public FIROpConversion<FromOp> { 3299 explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering, 3300 const fir::FIRToLLVMPassOptions &options) 3301 : FIROpConversion<FromOp>(lowering, options) {} 3302 using OpAdaptor = typename FromOp::Adaptor; 3303 3304 mlir::LogicalResult 3305 matchAndRewrite(FromOp op, OpAdaptor adaptor, 3306 mlir::ConversionPatternRewriter &rewriter) const final { 3307 if (!op->getUses().empty()) 3308 return rewriter.notifyMatchFailure(op, "op must be dead"); 3309 rewriter.eraseOp(op); 3310 return mlir::success(); 3311 } 3312 }; 3313 3314 struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> { 3315 using MustBeDeadConversion::MustBeDeadConversion; 3316 }; 3317 3318 struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> { 3319 using MustBeDeadConversion::MustBeDeadConversion; 3320 }; 3321 3322 struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> { 3323 using MustBeDeadConversion::MustBeDeadConversion; 3324 }; 3325 3326 struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> { 3327 using MustBeDeadConversion::MustBeDeadConversion; 3328 }; 3329 3330 } // namespace 3331 3332 namespace { 3333 /// Convert FIR dialect to LLVM dialect 3334 /// 3335 /// This pass lowers all FIR dialect operations to LLVM IR dialect. An 3336 /// MLIR pass is used to lower residual Std dialect to LLVM IR dialect. 3337 class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> { 3338 public: 3339 FIRToLLVMLowering() = default; 3340 FIRToLLVMLowering(fir::FIRToLLVMPassOptions options) : options{options} {} 3341 mlir::ModuleOp getModule() { return getOperation(); } 3342 3343 void runOnOperation() override final { 3344 auto mod = getModule(); 3345 if (!forcedTargetTriple.empty()) 3346 fir::setTargetTriple(mod, forcedTargetTriple); 3347 3348 auto *context = getModule().getContext(); 3349 fir::LLVMTypeConverter typeConverter{getModule()}; 3350 mlir::RewritePatternSet pattern(context); 3351 pattern.insert< 3352 AbsentOpConversion, AddcOpConversion, AddrOfOpConversion, 3353 AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion, 3354 BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion, 3355 BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion, 3356 BoxProcHostOpConversion, BoxRankOpConversion, BoxTypeDescOpConversion, 3357 CallOpConversion, CmpcOpConversion, ConstcOpConversion, 3358 ConvertOpConversion, CoordinateOpConversion, DispatchOpConversion, 3359 DispatchTableOpConversion, DTEntryOpConversion, DivcOpConversion, 3360 EmboxOpConversion, EmboxCharOpConversion, EmboxProcOpConversion, 3361 ExtractValueOpConversion, FieldIndexOpConversion, FirEndOpConversion, 3362 FreeMemOpConversion, GenTypeDescOpConversion, GlobalLenOpConversion, 3363 GlobalOpConversion, HasValueOpConversion, InsertOnRangeOpConversion, 3364 InsertValueOpConversion, IsPresentOpConversion, 3365 LenParamIndexOpConversion, LoadOpConversion, MulcOpConversion, 3366 NegcOpConversion, NoReassocOpConversion, SelectCaseOpConversion, 3367 SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion, 3368 ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion, 3369 SliceOpConversion, StoreOpConversion, StringLitOpConversion, 3370 SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion, 3371 UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion, 3372 XEmboxOpConversion, XReboxOpConversion, ZeroOpConversion>(typeConverter, 3373 options); 3374 mlir::populateFuncToLLVMConversionPatterns(typeConverter, pattern); 3375 mlir::populateOpenMPToLLVMConversionPatterns(typeConverter, pattern); 3376 mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter, 3377 pattern); 3378 mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter, 3379 pattern); 3380 // Convert math-like dialect operations, which can be produced 3381 // when late math lowering mode is used, into llvm dialect. 3382 mlir::populateMathToLLVMConversionPatterns(typeConverter, pattern); 3383 mlir::populateMathToLibmConversionPatterns(pattern, /*benefit=*/0); 3384 mlir::ConversionTarget target{*context}; 3385 target.addLegalDialect<mlir::LLVM::LLVMDialect>(); 3386 // The OpenMP dialect is legal for Operations without regions, for those 3387 // which contains regions it is legal if the region contains only the 3388 // LLVM dialect. Add OpenMP dialect as a legal dialect for conversion and 3389 // legalize conversion of OpenMP operations without regions. 3390 mlir::configureOpenMPToLLVMConversionLegality(target, typeConverter); 3391 target.addLegalDialect<mlir::omp::OpenMPDialect>(); 3392 3393 // required NOPs for applying a full conversion 3394 target.addLegalOp<mlir::ModuleOp>(); 3395 3396 // apply the patterns 3397 if (mlir::failed(mlir::applyFullConversion(getModule(), target, 3398 std::move(pattern)))) { 3399 signalPassFailure(); 3400 } 3401 } 3402 3403 private: 3404 fir::FIRToLLVMPassOptions options; 3405 }; 3406 3407 /// Lower from LLVM IR dialect to proper LLVM-IR and dump the module 3408 struct LLVMIRLoweringPass 3409 : public mlir::PassWrapper<LLVMIRLoweringPass, 3410 mlir::OperationPass<mlir::ModuleOp>> { 3411 MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(LLVMIRLoweringPass) 3412 3413 LLVMIRLoweringPass(llvm::raw_ostream &output, fir::LLVMIRLoweringPrinter p) 3414 : output{output}, printer{p} {} 3415 3416 mlir::ModuleOp getModule() { return getOperation(); } 3417 3418 void runOnOperation() override final { 3419 auto *ctx = getModule().getContext(); 3420 auto optName = getModule().getName(); 3421 llvm::LLVMContext llvmCtx; 3422 if (auto llvmModule = mlir::translateModuleToLLVMIR( 3423 getModule(), llvmCtx, optName ? *optName : "FIRModule")) { 3424 printer(*llvmModule, output); 3425 return; 3426 } 3427 3428 mlir::emitError(mlir::UnknownLoc::get(ctx), "could not emit LLVM-IR\n"); 3429 signalPassFailure(); 3430 } 3431 3432 private: 3433 llvm::raw_ostream &output; 3434 fir::LLVMIRLoweringPrinter printer; 3435 }; 3436 3437 } // namespace 3438 3439 std::unique_ptr<mlir::Pass> fir::createFIRToLLVMPass() { 3440 return std::make_unique<FIRToLLVMLowering>(); 3441 } 3442 3443 std::unique_ptr<mlir::Pass> 3444 fir::createFIRToLLVMPass(fir::FIRToLLVMPassOptions options) { 3445 return std::make_unique<FIRToLLVMLowering>(options); 3446 } 3447 3448 std::unique_ptr<mlir::Pass> 3449 fir::createLLVMDialectToLLVMPass(llvm::raw_ostream &output, 3450 fir::LLVMIRLoweringPrinter printer) { 3451 return std::make_unique<LLVMIRLoweringPass>(output, printer); 3452 } 3453