1 //===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/ 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "flang/Optimizer/CodeGen/CodeGen.h" 14 #include "CGOps.h" 15 #include "PassDetail.h" 16 #include "flang/ISO_Fortran_binding.h" 17 #include "flang/Optimizer/Dialect/FIRAttr.h" 18 #include "flang/Optimizer/Dialect/FIROps.h" 19 #include "flang/Optimizer/Support/InternalNames.h" 20 #include "flang/Optimizer/Support/TypeCode.h" 21 #include "flang/Semantics/runtime-type-info.h" 22 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" 23 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" 24 #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" 25 #include "mlir/Conversion/LLVMCommon/Pattern.h" 26 #include "mlir/Conversion/OpenMPToLLVM/ConvertOpenMPToLLVM.h" 27 #include "mlir/IR/BuiltinTypes.h" 28 #include "mlir/IR/Matchers.h" 29 #include "mlir/Pass/Pass.h" 30 #include "mlir/Target/LLVMIR/ModuleTranslation.h" 31 #include "llvm/ADT/ArrayRef.h" 32 33 #define DEBUG_TYPE "flang-codegen" 34 35 // fir::LLVMTypeConverter for converting to LLVM IR dialect types. 36 #include "TypeConverter.h" 37 38 // TODO: This should really be recovered from the specified target. 39 static constexpr unsigned defaultAlign = 8; 40 41 /// `fir.box` attribute values as defined for CFI_attribute_t in 42 /// flang/ISO_Fortran_binding.h. 43 static constexpr unsigned kAttrPointer = CFI_attribute_pointer; 44 static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable; 45 46 static inline mlir::Type getVoidPtrType(mlir::MLIRContext *context) { 47 return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8)); 48 } 49 50 static mlir::LLVM::ConstantOp 51 genConstantIndex(mlir::Location loc, mlir::Type ity, 52 mlir::ConversionPatternRewriter &rewriter, 53 std::int64_t offset) { 54 auto cattr = rewriter.getI64IntegerAttr(offset); 55 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 56 } 57 58 static mlir::Block *createBlock(mlir::ConversionPatternRewriter &rewriter, 59 mlir::Block *insertBefore) { 60 assert(insertBefore && "expected valid insertion block"); 61 return rewriter.createBlock(insertBefore->getParent(), 62 mlir::Region::iterator(insertBefore)); 63 } 64 65 namespace { 66 /// FIR conversion pattern template 67 template <typename FromOp> 68 class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> { 69 public: 70 explicit FIROpConversion(fir::LLVMTypeConverter &lowering, 71 const fir::FIRToLLVMPassOptions &options) 72 : mlir::ConvertOpToLLVMPattern<FromOp>(lowering), options(options) {} 73 74 protected: 75 mlir::Type convertType(mlir::Type ty) const { 76 return lowerTy().convertType(ty); 77 } 78 mlir::Type voidPtrTy() const { return getVoidPtrType(); } 79 80 mlir::Type getVoidPtrType() const { 81 return mlir::LLVM::LLVMPointerType::get( 82 mlir::IntegerType::get(&lowerTy().getContext(), 8)); 83 } 84 85 mlir::LLVM::ConstantOp 86 genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 87 int value) const { 88 mlir::Type i32Ty = rewriter.getI32Type(); 89 mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value); 90 return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr); 91 } 92 93 mlir::LLVM::ConstantOp 94 genConstantOffset(mlir::Location loc, 95 mlir::ConversionPatternRewriter &rewriter, 96 int offset) const { 97 mlir::Type ity = lowerTy().offsetType(); 98 mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset); 99 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 100 } 101 102 /// Perform an extension or truncation as needed on an integer value. Lowering 103 /// to the specific target may involve some sign-extending or truncation of 104 /// values, particularly to fit them from abstract box types to the 105 /// appropriate reified structures. 106 mlir::Value integerCast(mlir::Location loc, 107 mlir::ConversionPatternRewriter &rewriter, 108 mlir::Type ty, mlir::Value val) const { 109 auto valTy = val.getType(); 110 // If the value was not yet lowered, lower its type so that it can 111 // be used in getPrimitiveTypeSizeInBits. 112 if (!valTy.isa<mlir::IntegerType>()) 113 valTy = convertType(valTy); 114 auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 115 auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy); 116 if (toSize < fromSize) 117 return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val); 118 if (toSize > fromSize) 119 return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val); 120 return val; 121 } 122 123 /// Construct code sequence to extract the specifc value from a `fir.box`. 124 mlir::Value getValueFromBox(mlir::Location loc, mlir::Value box, 125 mlir::Type resultTy, 126 mlir::ConversionPatternRewriter &rewriter, 127 unsigned boxValue) const { 128 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 129 mlir::LLVM::ConstantOp cValuePos = 130 genConstantOffset(loc, rewriter, boxValue); 131 auto pty = mlir::LLVM::LLVMPointerType::get(resultTy); 132 auto p = rewriter.create<mlir::LLVM::GEPOp>( 133 loc, pty, box, mlir::ValueRange{c0, cValuePos}); 134 return rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p); 135 } 136 137 /// Method to construct code sequence to get the triple for dimension `dim` 138 /// from a box. 139 llvm::SmallVector<mlir::Value, 3> 140 getDimsFromBox(mlir::Location loc, llvm::ArrayRef<mlir::Type> retTys, 141 mlir::Value box, mlir::Value dim, 142 mlir::ConversionPatternRewriter &rewriter) const { 143 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 144 mlir::LLVM::ConstantOp cDims = 145 genConstantOffset(loc, rewriter, kDimsPosInBox); 146 mlir::LLVM::LoadOp l0 = 147 loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter); 148 mlir::LLVM::LoadOp l1 = 149 loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter); 150 mlir::LLVM::LoadOp l2 = 151 loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter); 152 return {l0.getResult(), l1.getResult(), l2.getResult()}; 153 } 154 155 mlir::LLVM::LoadOp 156 loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0, 157 mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off, 158 mlir::Type ty, 159 mlir::ConversionPatternRewriter &rewriter) const { 160 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 161 mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off); 162 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c); 163 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 164 } 165 166 mlir::Value 167 loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim, 168 mlir::ConversionPatternRewriter &rewriter) const { 169 auto idxTy = lowerTy().indexType(); 170 auto c0 = genConstantOffset(loc, rewriter, 0); 171 auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox); 172 auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim); 173 return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy, 174 rewriter); 175 } 176 177 /// Read base address from a fir.box. Returned address has type ty. 178 mlir::Value 179 loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 180 mlir::ConversionPatternRewriter &rewriter) const { 181 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 182 mlir::LLVM::ConstantOp cAddr = 183 genConstantOffset(loc, rewriter, kAddrPosInBox); 184 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 185 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr); 186 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 187 } 188 189 mlir::Value 190 loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 191 mlir::ConversionPatternRewriter &rewriter) const { 192 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 193 mlir::LLVM::ConstantOp cElemLen = 194 genConstantOffset(loc, rewriter, kElemLenPosInBox); 195 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 196 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen); 197 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 198 } 199 200 // Get the element type given an LLVM type that is of the form 201 // [llvm.ptr](array|struct|vector)+ and the provided indexes. 202 static mlir::Type getBoxEleTy(mlir::Type type, 203 llvm::ArrayRef<unsigned> indexes) { 204 if (auto t = type.dyn_cast<mlir::LLVM::LLVMPointerType>()) 205 type = t.getElementType(); 206 for (auto i : indexes) { 207 if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) { 208 assert(!t.isOpaque() && i < t.getBody().size()); 209 type = t.getBody()[i]; 210 } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 211 type = t.getElementType(); 212 } else if (auto t = type.dyn_cast<mlir::VectorType>()) { 213 type = t.getElementType(); 214 } else { 215 fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()), 216 "request for invalid box element type"); 217 } 218 } 219 return type; 220 } 221 222 // Return LLVM type of the base address given the LLVM type 223 // of the related descriptor (lowered fir.box type). 224 static mlir::Type getBaseAddrTypeFromBox(mlir::Type type) { 225 return getBoxEleTy(type, {kAddrPosInBox}); 226 } 227 228 // Load the attribute from the \p box and perform a check against \p maskValue 229 // The final comparison is implemented as `(attribute & maskValue) != 0`. 230 mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box, 231 mlir::ConversionPatternRewriter &rewriter, 232 unsigned maskValue) const { 233 mlir::Type attrTy = rewriter.getI32Type(); 234 mlir::Value attribute = 235 getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox); 236 mlir::LLVM::ConstantOp attrMask = 237 genConstantOffset(loc, rewriter, maskValue); 238 auto maskRes = 239 rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask); 240 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 241 return rewriter.create<mlir::LLVM::ICmpOp>( 242 loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0); 243 } 244 245 template <typename... ARGS> 246 mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty, 247 mlir::ConversionPatternRewriter &rewriter, 248 mlir::Value base, ARGS... args) const { 249 llvm::SmallVector<mlir::Value> cv = {args...}; 250 return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv); 251 } 252 253 fir::LLVMTypeConverter &lowerTy() const { 254 return *static_cast<fir::LLVMTypeConverter *>(this->getTypeConverter()); 255 } 256 257 const fir::FIRToLLVMPassOptions &options; 258 }; 259 260 /// FIR conversion pattern template 261 template <typename FromOp> 262 class FIROpAndTypeConversion : public FIROpConversion<FromOp> { 263 public: 264 using FIROpConversion<FromOp>::FIROpConversion; 265 using OpAdaptor = typename FromOp::Adaptor; 266 267 mlir::LogicalResult 268 matchAndRewrite(FromOp op, OpAdaptor adaptor, 269 mlir::ConversionPatternRewriter &rewriter) const final { 270 mlir::Type ty = this->convertType(op.getType()); 271 return doRewrite(op, ty, adaptor, rewriter); 272 } 273 274 virtual mlir::LogicalResult 275 doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor, 276 mlir::ConversionPatternRewriter &rewriter) const = 0; 277 }; 278 } // namespace 279 280 namespace { 281 /// Lower `fir.address_of` operation to `llvm.address_of` operation. 282 struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> { 283 using FIROpConversion::FIROpConversion; 284 285 mlir::LogicalResult 286 matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor, 287 mlir::ConversionPatternRewriter &rewriter) const override { 288 auto ty = convertType(addr.getType()); 289 rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>( 290 addr, ty, addr.getSymbol().getRootReference().getValue()); 291 return mlir::success(); 292 } 293 }; 294 } // namespace 295 296 /// Lookup the function to compute the memory size of this parametric derived 297 /// type. The size of the object may depend on the LEN type parameters of the 298 /// derived type. 299 static mlir::LLVM::LLVMFuncOp 300 getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op, 301 mlir::ConversionPatternRewriter &rewriter) { 302 auto module = op->getParentOfType<mlir::ModuleOp>(); 303 std::string name = recTy.getName().str() + "P.mem.size"; 304 if (auto memSizeFunc = module.lookupSymbol<mlir::LLVM::LLVMFuncOp>(name)) 305 return memSizeFunc; 306 TODO(op.getLoc(), "did not find allocation function"); 307 } 308 309 // Compute the alloc scale size (constant factors encoded in the array type). 310 // We do this for arrays without a constant interior or arrays of character with 311 // dynamic length arrays, since those are the only ones that get decayed to a 312 // pointer to the element type. 313 template <typename OP> 314 static mlir::Value 315 genAllocationScaleSize(OP op, mlir::Type ity, 316 mlir::ConversionPatternRewriter &rewriter) { 317 mlir::Location loc = op.getLoc(); 318 mlir::Type dataTy = op.getInType(); 319 mlir::Type scalarType = fir::unwrapSequenceType(dataTy); 320 auto seqTy = dataTy.dyn_cast<fir::SequenceType>(); 321 if ((op.hasShapeOperands() && seqTy && !seqTy.hasConstantInterior()) || 322 (seqTy && fir::characterWithDynamicLen(scalarType))) { 323 fir::SequenceType::Extent constSize = 1; 324 for (auto extent : seqTy.getShape()) 325 if (extent != fir::SequenceType::getUnknownExtent()) 326 constSize *= extent; 327 if (constSize != 1) { 328 mlir::Value constVal{ 329 genConstantIndex(loc, ity, rewriter, constSize).getResult()}; 330 return constVal; 331 } 332 } 333 return nullptr; 334 } 335 336 namespace { 337 /// convert to LLVM IR dialect `alloca` 338 struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> { 339 using FIROpConversion::FIROpConversion; 340 341 mlir::LogicalResult 342 matchAndRewrite(fir::AllocaOp alloc, OpAdaptor adaptor, 343 mlir::ConversionPatternRewriter &rewriter) const override { 344 mlir::ValueRange operands = adaptor.getOperands(); 345 auto loc = alloc.getLoc(); 346 mlir::Type ity = lowerTy().indexType(); 347 unsigned i = 0; 348 mlir::Value size = genConstantIndex(loc, ity, rewriter, 1).getResult(); 349 mlir::Type ty = convertType(alloc.getType()); 350 mlir::Type resultTy = ty; 351 if (alloc.hasLenParams()) { 352 unsigned end = alloc.numLenParams(); 353 llvm::SmallVector<mlir::Value> lenParams; 354 for (; i < end; ++i) 355 lenParams.push_back(operands[i]); 356 mlir::Type scalarType = fir::unwrapSequenceType(alloc.getInType()); 357 if (auto chrTy = scalarType.dyn_cast<fir::CharacterType>()) { 358 fir::CharacterType rawCharTy = fir::CharacterType::getUnknownLen( 359 chrTy.getContext(), chrTy.getFKind()); 360 ty = mlir::LLVM::LLVMPointerType::get(convertType(rawCharTy)); 361 assert(end == 1); 362 size = integerCast(loc, rewriter, ity, lenParams[0]); 363 } else if (auto recTy = scalarType.dyn_cast<fir::RecordType>()) { 364 mlir::LLVM::LLVMFuncOp memSizeFn = 365 getDependentTypeMemSizeFn(recTy, alloc, rewriter); 366 if (!memSizeFn) 367 emitError(loc, "did not find allocation function"); 368 mlir::NamedAttribute attr = rewriter.getNamedAttr( 369 "callee", mlir::SymbolRefAttr::get(memSizeFn)); 370 auto call = rewriter.create<mlir::LLVM::CallOp>( 371 loc, ity, lenParams, llvm::ArrayRef<mlir::NamedAttribute>{attr}); 372 size = call.getResult(0); 373 ty = ::getVoidPtrType(alloc.getContext()); 374 } else { 375 return emitError(loc, "unexpected type ") 376 << scalarType << " with type parameters"; 377 } 378 } 379 if (auto scaleSize = genAllocationScaleSize(alloc, ity, rewriter)) 380 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize); 381 if (alloc.hasShapeOperands()) { 382 unsigned end = operands.size(); 383 for (; i < end; ++i) 384 size = rewriter.create<mlir::LLVM::MulOp>( 385 loc, ity, size, integerCast(loc, rewriter, ity, operands[i])); 386 } 387 if (ty == resultTy) { 388 // Do not emit the bitcast if ty and resultTy are the same. 389 rewriter.replaceOpWithNewOp<mlir::LLVM::AllocaOp>(alloc, ty, size, 390 alloc->getAttrs()); 391 } else { 392 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, ty, size, 393 alloc->getAttrs()); 394 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(alloc, resultTy, al); 395 } 396 return mlir::success(); 397 } 398 }; 399 } // namespace 400 401 /// Construct an `llvm.extractvalue` instruction. It will return value at 402 /// element \p x from \p tuple. 403 static mlir::LLVM::ExtractValueOp 404 genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty, 405 mlir::ConversionPatternRewriter &rewriter, 406 mlir::MLIRContext *ctx, int x) { 407 auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x)); 408 auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x]; 409 return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx); 410 } 411 412 namespace { 413 /// Lower `fir.box_addr` to the sequence of operations to extract the first 414 /// element of the box. 415 struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> { 416 using FIROpConversion::FIROpConversion; 417 418 mlir::LogicalResult 419 matchAndRewrite(fir::BoxAddrOp boxaddr, OpAdaptor adaptor, 420 mlir::ConversionPatternRewriter &rewriter) const override { 421 mlir::Value a = adaptor.getOperands()[0]; 422 auto loc = boxaddr.getLoc(); 423 mlir::Type ty = convertType(boxaddr.getType()); 424 if (auto argty = boxaddr.getVal().getType().dyn_cast<fir::BoxType>()) { 425 rewriter.replaceOp(boxaddr, loadBaseAddrFromBox(loc, ty, a, rewriter)); 426 } else { 427 auto c0attr = rewriter.getI32IntegerAttr(0); 428 auto c0 = mlir::ArrayAttr::get(boxaddr.getContext(), c0attr); 429 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(boxaddr, ty, a, 430 c0); 431 } 432 return mlir::success(); 433 } 434 }; 435 436 /// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the 437 /// boxchar. 438 struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> { 439 using FIROpConversion::FIROpConversion; 440 441 mlir::LogicalResult 442 matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor, 443 mlir::ConversionPatternRewriter &rewriter) const override { 444 mlir::Value boxChar = adaptor.getOperands()[0]; 445 mlir::Location loc = boxChar.getLoc(); 446 mlir::MLIRContext *ctx = boxChar.getContext(); 447 mlir::Type returnValTy = boxCharLen.getResult().getType(); 448 449 constexpr int boxcharLenIdx = 1; 450 mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex( 451 loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx); 452 mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len); 453 rewriter.replaceOp(boxCharLen, lenAfterCast); 454 455 return mlir::success(); 456 } 457 }; 458 459 /// Lower `fir.box_dims` to a sequence of operations to extract the requested 460 /// dimension infomartion from the boxed value. 461 /// Result in a triple set of GEPs and loads. 462 struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> { 463 using FIROpConversion::FIROpConversion; 464 465 mlir::LogicalResult 466 matchAndRewrite(fir::BoxDimsOp boxdims, OpAdaptor adaptor, 467 mlir::ConversionPatternRewriter &rewriter) const override { 468 llvm::SmallVector<mlir::Type, 3> resultTypes = { 469 convertType(boxdims.getResult(0).getType()), 470 convertType(boxdims.getResult(1).getType()), 471 convertType(boxdims.getResult(2).getType()), 472 }; 473 auto results = 474 getDimsFromBox(boxdims.getLoc(), resultTypes, adaptor.getOperands()[0], 475 adaptor.getOperands()[1], rewriter); 476 rewriter.replaceOp(boxdims, results); 477 return mlir::success(); 478 } 479 }; 480 481 /// Lower `fir.box_elesize` to a sequence of operations ro extract the size of 482 /// an element in the boxed value. 483 struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> { 484 using FIROpConversion::FIROpConversion; 485 486 mlir::LogicalResult 487 matchAndRewrite(fir::BoxEleSizeOp boxelesz, OpAdaptor adaptor, 488 mlir::ConversionPatternRewriter &rewriter) const override { 489 mlir::Value a = adaptor.getOperands()[0]; 490 auto loc = boxelesz.getLoc(); 491 auto ty = convertType(boxelesz.getType()); 492 auto elemSize = getValueFromBox(loc, a, ty, rewriter, kElemLenPosInBox); 493 rewriter.replaceOp(boxelesz, elemSize); 494 return mlir::success(); 495 } 496 }; 497 498 /// Lower `fir.box_isalloc` to a sequence of operations to determine if the 499 /// boxed value was from an ALLOCATABLE entity. 500 struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> { 501 using FIROpConversion::FIROpConversion; 502 503 mlir::LogicalResult 504 matchAndRewrite(fir::BoxIsAllocOp boxisalloc, OpAdaptor adaptor, 505 mlir::ConversionPatternRewriter &rewriter) const override { 506 mlir::Value box = adaptor.getOperands()[0]; 507 auto loc = boxisalloc.getLoc(); 508 mlir::Value check = 509 genBoxAttributeCheck(loc, box, rewriter, kAttrAllocatable); 510 rewriter.replaceOp(boxisalloc, check); 511 return mlir::success(); 512 } 513 }; 514 515 /// Lower `fir.box_isarray` to a sequence of operations to determine if the 516 /// boxed is an array. 517 struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> { 518 using FIROpConversion::FIROpConversion; 519 520 mlir::LogicalResult 521 matchAndRewrite(fir::BoxIsArrayOp boxisarray, OpAdaptor adaptor, 522 mlir::ConversionPatternRewriter &rewriter) const override { 523 mlir::Value a = adaptor.getOperands()[0]; 524 auto loc = boxisarray.getLoc(); 525 auto rank = 526 getValueFromBox(loc, a, rewriter.getI32Type(), rewriter, kRankPosInBox); 527 auto c0 = genConstantOffset(loc, rewriter, 0); 528 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 529 boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0); 530 return mlir::success(); 531 } 532 }; 533 534 /// Lower `fir.box_isptr` to a sequence of operations to determined if the 535 /// boxed value was from a POINTER entity. 536 struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> { 537 using FIROpConversion::FIROpConversion; 538 539 mlir::LogicalResult 540 matchAndRewrite(fir::BoxIsPtrOp boxisptr, OpAdaptor adaptor, 541 mlir::ConversionPatternRewriter &rewriter) const override { 542 mlir::Value box = adaptor.getOperands()[0]; 543 auto loc = boxisptr.getLoc(); 544 mlir::Value check = genBoxAttributeCheck(loc, box, rewriter, kAttrPointer); 545 rewriter.replaceOp(boxisptr, check); 546 return mlir::success(); 547 } 548 }; 549 550 /// Lower `fir.box_rank` to the sequence of operation to extract the rank from 551 /// the box. 552 struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> { 553 using FIROpConversion::FIROpConversion; 554 555 mlir::LogicalResult 556 matchAndRewrite(fir::BoxRankOp boxrank, OpAdaptor adaptor, 557 mlir::ConversionPatternRewriter &rewriter) const override { 558 mlir::Value a = adaptor.getOperands()[0]; 559 auto loc = boxrank.getLoc(); 560 mlir::Type ty = convertType(boxrank.getType()); 561 auto result = getValueFromBox(loc, a, ty, rewriter, kRankPosInBox); 562 rewriter.replaceOp(boxrank, result); 563 return mlir::success(); 564 } 565 }; 566 567 /// Lower `fir.boxproc_host` operation. Extracts the host pointer from the 568 /// boxproc. 569 /// TODO: Part of supporting Fortran 2003 procedure pointers. 570 struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> { 571 using FIROpConversion::FIROpConversion; 572 573 mlir::LogicalResult 574 matchAndRewrite(fir::BoxProcHostOp boxprochost, OpAdaptor adaptor, 575 mlir::ConversionPatternRewriter &rewriter) const override { 576 TODO(boxprochost.getLoc(), "fir.boxproc_host codegen"); 577 return mlir::failure(); 578 } 579 }; 580 581 /// Lower `fir.box_tdesc` to the sequence of operations to extract the type 582 /// descriptor from the box. 583 struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> { 584 using FIROpConversion::FIROpConversion; 585 586 mlir::LogicalResult 587 matchAndRewrite(fir::BoxTypeDescOp boxtypedesc, OpAdaptor adaptor, 588 mlir::ConversionPatternRewriter &rewriter) const override { 589 mlir::Value box = adaptor.getOperands()[0]; 590 auto loc = boxtypedesc.getLoc(); 591 mlir::Type typeTy = 592 fir::getDescFieldTypeModel<kTypePosInBox>()(boxtypedesc.getContext()); 593 auto result = getValueFromBox(loc, box, typeTy, rewriter, kTypePosInBox); 594 auto typePtrTy = mlir::LLVM::LLVMPointerType::get(typeTy); 595 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(boxtypedesc, typePtrTy, 596 result); 597 return mlir::success(); 598 } 599 }; 600 601 /// Lower `fir.string_lit` to LLVM IR dialect operation. 602 struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> { 603 using FIROpConversion::FIROpConversion; 604 605 mlir::LogicalResult 606 matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor, 607 mlir::ConversionPatternRewriter &rewriter) const override { 608 auto ty = convertType(constop.getType()); 609 auto attr = constop.getValue(); 610 if (attr.isa<mlir::StringAttr>()) { 611 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr); 612 return mlir::success(); 613 } 614 615 auto charTy = constop.getType().cast<fir::CharacterType>(); 616 unsigned bits = lowerTy().characterBitsize(charTy); 617 mlir::Type intTy = rewriter.getIntegerType(bits); 618 mlir::Location loc = constop.getLoc(); 619 mlir::Value cst = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 620 if (auto arr = attr.dyn_cast<mlir::DenseElementsAttr>()) { 621 cst = rewriter.create<mlir::LLVM::ConstantOp>(loc, ty, arr); 622 } else if (auto arr = attr.dyn_cast<mlir::ArrayAttr>()) { 623 for (auto a : llvm::enumerate(arr.getValue())) { 624 // convert each character to a precise bitsize 625 auto elemAttr = mlir::IntegerAttr::get( 626 intTy, 627 a.value().cast<mlir::IntegerAttr>().getValue().zextOrTrunc(bits)); 628 auto elemCst = 629 rewriter.create<mlir::LLVM::ConstantOp>(loc, intTy, elemAttr); 630 auto index = mlir::ArrayAttr::get( 631 constop.getContext(), rewriter.getI32IntegerAttr(a.index())); 632 cst = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, cst, elemCst, 633 index); 634 } 635 } else { 636 return mlir::failure(); 637 } 638 rewriter.replaceOp(constop, cst); 639 return mlir::success(); 640 } 641 }; 642 643 /// `fir.call` -> `llvm.call` 644 struct CallOpConversion : public FIROpConversion<fir::CallOp> { 645 using FIROpConversion::FIROpConversion; 646 647 mlir::LogicalResult 648 matchAndRewrite(fir::CallOp call, OpAdaptor adaptor, 649 mlir::ConversionPatternRewriter &rewriter) const override { 650 llvm::SmallVector<mlir::Type> resultTys; 651 for (auto r : call.getResults()) 652 resultTys.push_back(convertType(r.getType())); 653 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 654 call, resultTys, adaptor.getOperands(), call->getAttrs()); 655 return mlir::success(); 656 } 657 }; 658 } // namespace 659 660 static mlir::Type getComplexEleTy(mlir::Type complex) { 661 if (auto cc = complex.dyn_cast<mlir::ComplexType>()) 662 return cc.getElementType(); 663 return complex.cast<fir::ComplexType>().getElementType(); 664 } 665 666 namespace { 667 /// Compare complex values 668 /// 669 /// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une). 670 /// 671 /// For completeness, all other comparison are done on the real component only. 672 struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> { 673 using FIROpConversion::FIROpConversion; 674 675 mlir::LogicalResult 676 matchAndRewrite(fir::CmpcOp cmp, OpAdaptor adaptor, 677 mlir::ConversionPatternRewriter &rewriter) const override { 678 mlir::ValueRange operands = adaptor.getOperands(); 679 mlir::MLIRContext *ctxt = cmp.getContext(); 680 mlir::Type eleTy = convertType(getComplexEleTy(cmp.getLhs().getType())); 681 mlir::Type resTy = convertType(cmp.getType()); 682 mlir::Location loc = cmp.getLoc(); 683 auto pos0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 684 llvm::SmallVector<mlir::Value, 2> rp = { 685 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 686 pos0), 687 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 688 pos0)}; 689 auto rcp = 690 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, rp, cmp->getAttrs()); 691 auto pos1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 692 llvm::SmallVector<mlir::Value, 2> ip = { 693 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 694 pos1), 695 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 696 pos1)}; 697 auto icp = 698 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, ip, cmp->getAttrs()); 699 llvm::SmallVector<mlir::Value, 2> cp = {rcp, icp}; 700 switch (cmp.getPredicate()) { 701 case mlir::arith::CmpFPredicate::OEQ: // .EQ. 702 rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(cmp, resTy, cp); 703 break; 704 case mlir::arith::CmpFPredicate::UNE: // .NE. 705 rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(cmp, resTy, cp); 706 break; 707 default: 708 rewriter.replaceOp(cmp, rcp.getResult()); 709 break; 710 } 711 return mlir::success(); 712 } 713 }; 714 715 /// Lower complex constants 716 struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> { 717 using FIROpConversion::FIROpConversion; 718 719 mlir::LogicalResult 720 matchAndRewrite(fir::ConstcOp conc, OpAdaptor, 721 mlir::ConversionPatternRewriter &rewriter) const override { 722 mlir::Location loc = conc.getLoc(); 723 mlir::MLIRContext *ctx = conc.getContext(); 724 mlir::Type ty = convertType(conc.getType()); 725 mlir::Type ety = convertType(getComplexEleTy(conc.getType())); 726 auto realFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getReal())); 727 auto realPart = 728 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, realFloatAttr); 729 auto imFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getImaginary())); 730 auto imPart = 731 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, imFloatAttr); 732 auto realIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 733 auto imIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 734 auto undef = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 735 auto setReal = rewriter.create<mlir::LLVM::InsertValueOp>( 736 loc, ty, undef, realPart, realIndex); 737 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(conc, ty, setReal, 738 imPart, imIndex); 739 return mlir::success(); 740 } 741 742 inline llvm::APFloat getValue(mlir::Attribute attr) const { 743 return attr.cast<fir::RealAttr>().getValue(); 744 } 745 }; 746 747 /// convert value of from-type to value of to-type 748 struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> { 749 using FIROpConversion::FIROpConversion; 750 751 static bool isFloatingPointTy(mlir::Type ty) { 752 return ty.isa<mlir::FloatType>(); 753 } 754 755 mlir::LogicalResult 756 matchAndRewrite(fir::ConvertOp convert, OpAdaptor adaptor, 757 mlir::ConversionPatternRewriter &rewriter) const override { 758 auto fromFirTy = convert.getValue().getType(); 759 auto toFirTy = convert.getRes().getType(); 760 auto fromTy = convertType(fromFirTy); 761 auto toTy = convertType(toFirTy); 762 mlir::Value op0 = adaptor.getOperands()[0]; 763 if (fromTy == toTy) { 764 rewriter.replaceOp(convert, op0); 765 return mlir::success(); 766 } 767 auto loc = convert.getLoc(); 768 auto convertFpToFp = [&](mlir::Value val, unsigned fromBits, 769 unsigned toBits, mlir::Type toTy) -> mlir::Value { 770 if (fromBits == toBits) { 771 // TODO: Converting between two floating-point representations with the 772 // same bitwidth is not allowed for now. 773 mlir::emitError(loc, 774 "cannot implicitly convert between two floating-point " 775 "representations of the same bitwidth"); 776 return {}; 777 } 778 if (fromBits > toBits) 779 return rewriter.create<mlir::LLVM::FPTruncOp>(loc, toTy, val); 780 return rewriter.create<mlir::LLVM::FPExtOp>(loc, toTy, val); 781 }; 782 // Complex to complex conversion. 783 if (fir::isa_complex(fromFirTy) && fir::isa_complex(toFirTy)) { 784 // Special case: handle the conversion of a complex such that both the 785 // real and imaginary parts are converted together. 786 auto zero = mlir::ArrayAttr::get(convert.getContext(), 787 rewriter.getI32IntegerAttr(0)); 788 auto one = mlir::ArrayAttr::get(convert.getContext(), 789 rewriter.getI32IntegerAttr(1)); 790 auto ty = convertType(getComplexEleTy(convert.getValue().getType())); 791 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, zero); 792 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, one); 793 auto nt = convertType(getComplexEleTy(convert.getRes().getType())); 794 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 795 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(nt); 796 auto rc = convertFpToFp(rp, fromBits, toBits, nt); 797 auto ic = convertFpToFp(ip, fromBits, toBits, nt); 798 auto un = rewriter.create<mlir::LLVM::UndefOp>(loc, toTy); 799 auto i1 = 800 rewriter.create<mlir::LLVM::InsertValueOp>(loc, toTy, un, rc, zero); 801 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(convert, toTy, i1, 802 ic, one); 803 return mlir::success(); 804 } 805 806 // Follow UNIX F77 convention for logicals: 807 // 1. underlying integer is not zero => logical is .TRUE. 808 // 2. logical is .TRUE. => set underlying integer to 1. 809 auto i1Type = mlir::IntegerType::get(convert.getContext(), 1); 810 if (fromFirTy.isa<fir::LogicalType>() && toFirTy == i1Type) { 811 mlir::Value zero = genConstantIndex(loc, fromTy, rewriter, 0); 812 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 813 convert, mlir::LLVM::ICmpPredicate::ne, op0, zero); 814 return mlir::success(); 815 } 816 if (fromFirTy == i1Type && toFirTy.isa<fir::LogicalType>()) { 817 rewriter.replaceOpWithNewOp<mlir::LLVM::ZExtOp>(convert, toTy, op0); 818 return mlir::success(); 819 } 820 821 // Floating point to floating point conversion. 822 if (isFloatingPointTy(fromTy)) { 823 if (isFloatingPointTy(toTy)) { 824 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 825 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 826 auto v = convertFpToFp(op0, fromBits, toBits, toTy); 827 rewriter.replaceOp(convert, v); 828 return mlir::success(); 829 } 830 if (toTy.isa<mlir::IntegerType>()) { 831 rewriter.replaceOpWithNewOp<mlir::LLVM::FPToSIOp>(convert, toTy, op0); 832 return mlir::success(); 833 } 834 } else if (fromTy.isa<mlir::IntegerType>()) { 835 // Integer to integer conversion. 836 if (toTy.isa<mlir::IntegerType>()) { 837 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 838 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 839 assert(fromBits != toBits); 840 if (fromBits > toBits) { 841 rewriter.replaceOpWithNewOp<mlir::LLVM::TruncOp>(convert, toTy, op0); 842 return mlir::success(); 843 } 844 rewriter.replaceOpWithNewOp<mlir::LLVM::SExtOp>(convert, toTy, op0); 845 return mlir::success(); 846 } 847 // Integer to floating point conversion. 848 if (isFloatingPointTy(toTy)) { 849 rewriter.replaceOpWithNewOp<mlir::LLVM::SIToFPOp>(convert, toTy, op0); 850 return mlir::success(); 851 } 852 // Integer to pointer conversion. 853 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 854 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(convert, toTy, op0); 855 return mlir::success(); 856 } 857 } else if (fromTy.isa<mlir::LLVM::LLVMPointerType>()) { 858 // Pointer to integer conversion. 859 if (toTy.isa<mlir::IntegerType>()) { 860 rewriter.replaceOpWithNewOp<mlir::LLVM::PtrToIntOp>(convert, toTy, op0); 861 return mlir::success(); 862 } 863 // Pointer to pointer conversion. 864 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 865 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(convert, toTy, op0); 866 return mlir::success(); 867 } 868 } 869 return emitError(loc) << "cannot convert " << fromTy << " to " << toTy; 870 } 871 }; 872 873 /// Lower `fir.dispatch` operation. A virtual call to a method in a dispatch 874 /// table. 875 struct DispatchOpConversion : public FIROpConversion<fir::DispatchOp> { 876 using FIROpConversion::FIROpConversion; 877 878 mlir::LogicalResult 879 matchAndRewrite(fir::DispatchOp dispatch, OpAdaptor adaptor, 880 mlir::ConversionPatternRewriter &rewriter) const override { 881 TODO(dispatch.getLoc(), "fir.dispatch codegen"); 882 return mlir::failure(); 883 } 884 }; 885 886 /// Lower `fir.dispatch_table` operation. The dispatch table for a Fortran 887 /// derived type. 888 struct DispatchTableOpConversion 889 : public FIROpConversion<fir::DispatchTableOp> { 890 using FIROpConversion::FIROpConversion; 891 892 mlir::LogicalResult 893 matchAndRewrite(fir::DispatchTableOp dispTab, OpAdaptor adaptor, 894 mlir::ConversionPatternRewriter &rewriter) const override { 895 TODO(dispTab.getLoc(), "fir.dispatch_table codegen"); 896 return mlir::failure(); 897 } 898 }; 899 900 /// Lower `fir.dt_entry` operation. An entry in a dispatch table; binds a 901 /// method-name to a function. 902 struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> { 903 using FIROpConversion::FIROpConversion; 904 905 mlir::LogicalResult 906 matchAndRewrite(fir::DTEntryOp dtEnt, OpAdaptor adaptor, 907 mlir::ConversionPatternRewriter &rewriter) const override { 908 TODO(dtEnt.getLoc(), "fir.dt_entry codegen"); 909 return mlir::failure(); 910 } 911 }; 912 913 /// Lower `fir.global_len` operation. 914 struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> { 915 using FIROpConversion::FIROpConversion; 916 917 mlir::LogicalResult 918 matchAndRewrite(fir::GlobalLenOp globalLen, OpAdaptor adaptor, 919 mlir::ConversionPatternRewriter &rewriter) const override { 920 TODO(globalLen.getLoc(), "fir.global_len codegen"); 921 return mlir::failure(); 922 } 923 }; 924 925 /// Lower fir.len_param_index 926 struct LenParamIndexOpConversion 927 : public FIROpConversion<fir::LenParamIndexOp> { 928 using FIROpConversion::FIROpConversion; 929 930 // FIXME: this should be specialized by the runtime target 931 mlir::LogicalResult 932 matchAndRewrite(fir::LenParamIndexOp lenp, OpAdaptor, 933 mlir::ConversionPatternRewriter &rewriter) const override { 934 TODO(lenp.getLoc(), "fir.len_param_index codegen"); 935 } 936 }; 937 938 /// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of 939 /// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element 940 /// in this struct is a pointer. Its type is determined from `KIND`. The 2nd 941 /// element is the length of the character buffer (`#n`). 942 struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> { 943 using FIROpConversion::FIROpConversion; 944 945 mlir::LogicalResult 946 matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor, 947 mlir::ConversionPatternRewriter &rewriter) const override { 948 mlir::ValueRange operands = adaptor.getOperands(); 949 auto *ctx = emboxChar.getContext(); 950 951 mlir::Value charBuffer = operands[0]; 952 mlir::Value charBufferLen = operands[1]; 953 954 mlir::Location loc = emboxChar.getLoc(); 955 mlir::Type llvmStructTy = convertType(emboxChar.getType()); 956 auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy); 957 958 mlir::Type lenTy = 959 llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1]; 960 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen); 961 962 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 963 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 964 auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>( 965 loc, llvmStructTy, llvmStruct, charBuffer, c0); 966 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 967 emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1); 968 969 return mlir::success(); 970 } 971 }; 972 } // namespace 973 974 /// Return the LLVMFuncOp corresponding to the standard malloc call. 975 static mlir::LLVM::LLVMFuncOp 976 getMalloc(fir::AllocMemOp op, mlir::ConversionPatternRewriter &rewriter) { 977 auto module = op->getParentOfType<mlir::ModuleOp>(); 978 if (mlir::LLVM::LLVMFuncOp mallocFunc = 979 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("malloc")) 980 return mallocFunc; 981 mlir::OpBuilder moduleBuilder( 982 op->getParentOfType<mlir::ModuleOp>().getBodyRegion()); 983 auto indexType = mlir::IntegerType::get(op.getContext(), 64); 984 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 985 rewriter.getUnknownLoc(), "malloc", 986 mlir::LLVM::LLVMFunctionType::get(getVoidPtrType(op.getContext()), 987 indexType, 988 /*isVarArg=*/false)); 989 } 990 991 /// Helper function for generating the LLVM IR that computes the size 992 /// in bytes for a derived type. 993 static mlir::Value 994 computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy, 995 mlir::ConversionPatternRewriter &rewriter) { 996 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 997 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 998 llvm::SmallVector<mlir::Value> args = {one}; 999 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, args); 1000 return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep); 1001 } 1002 1003 namespace { 1004 /// Lower a `fir.allocmem` instruction into `llvm.call @malloc` 1005 struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> { 1006 using FIROpConversion::FIROpConversion; 1007 1008 mlir::LogicalResult 1009 matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor, 1010 mlir::ConversionPatternRewriter &rewriter) const override { 1011 mlir::Type heapTy = heap.getType(); 1012 mlir::Type ty = convertType(heapTy); 1013 mlir::LLVM::LLVMFuncOp mallocFunc = getMalloc(heap, rewriter); 1014 mlir::Location loc = heap.getLoc(); 1015 auto ity = lowerTy().indexType(); 1016 mlir::Type dataTy = fir::unwrapRefType(heapTy); 1017 if (fir::isRecordWithTypeParameters(fir::unwrapSequenceType(dataTy))) 1018 TODO(loc, "fir.allocmem codegen of derived type with length parameters"); 1019 mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, ty); 1020 if (auto scaleSize = genAllocationScaleSize(heap, ity, rewriter)) 1021 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize); 1022 for (mlir::Value opnd : adaptor.getOperands()) 1023 size = rewriter.create<mlir::LLVM::MulOp>( 1024 loc, ity, size, integerCast(loc, rewriter, ity, opnd)); 1025 heap->setAttr("callee", mlir::SymbolRefAttr::get(mallocFunc)); 1026 auto malloc = rewriter.create<mlir::LLVM::CallOp>( 1027 loc, ::getVoidPtrType(heap.getContext()), size, heap->getAttrs()); 1028 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(heap, ty, 1029 malloc.getResult(0)); 1030 return mlir::success(); 1031 } 1032 1033 // Compute the (allocation) size of the allocmem type in bytes. 1034 mlir::Value genTypeSizeInBytes(mlir::Location loc, mlir::Type idxTy, 1035 mlir::ConversionPatternRewriter &rewriter, 1036 mlir::Type llTy) const { 1037 // Use the primitive size, if available. 1038 auto ptrTy = llTy.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1039 if (auto size = 1040 mlir::LLVM::getPrimitiveTypeSizeInBits(ptrTy.getElementType())) 1041 return genConstantIndex(loc, idxTy, rewriter, size / 8); 1042 1043 // Otherwise, generate the GEP trick in LLVM IR to compute the size. 1044 return computeDerivedTypeSize(loc, ptrTy, idxTy, rewriter); 1045 } 1046 }; 1047 } // namespace 1048 1049 /// Return the LLVMFuncOp corresponding to the standard free call. 1050 static mlir::LLVM::LLVMFuncOp 1051 getFree(fir::FreeMemOp op, mlir::ConversionPatternRewriter &rewriter) { 1052 auto module = op->getParentOfType<mlir::ModuleOp>(); 1053 if (mlir::LLVM::LLVMFuncOp freeFunc = 1054 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("free")) 1055 return freeFunc; 1056 mlir::OpBuilder moduleBuilder(module.getBodyRegion()); 1057 auto voidType = mlir::LLVM::LLVMVoidType::get(op.getContext()); 1058 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 1059 rewriter.getUnknownLoc(), "free", 1060 mlir::LLVM::LLVMFunctionType::get(voidType, 1061 getVoidPtrType(op.getContext()), 1062 /*isVarArg=*/false)); 1063 } 1064 1065 namespace { 1066 /// Lower a `fir.freemem` instruction into `llvm.call @free` 1067 struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> { 1068 using FIROpConversion::FIROpConversion; 1069 1070 mlir::LogicalResult 1071 matchAndRewrite(fir::FreeMemOp freemem, OpAdaptor adaptor, 1072 mlir::ConversionPatternRewriter &rewriter) const override { 1073 mlir::LLVM::LLVMFuncOp freeFunc = getFree(freemem, rewriter); 1074 mlir::Location loc = freemem.getLoc(); 1075 auto bitcast = rewriter.create<mlir::LLVM::BitcastOp>( 1076 freemem.getLoc(), voidPtrTy(), adaptor.getOperands()[0]); 1077 freemem->setAttr("callee", mlir::SymbolRefAttr::get(freeFunc)); 1078 rewriter.create<mlir::LLVM::CallOp>( 1079 loc, mlir::TypeRange{}, mlir::ValueRange{bitcast}, freemem->getAttrs()); 1080 rewriter.eraseOp(freemem); 1081 return mlir::success(); 1082 } 1083 }; 1084 } // namespace 1085 1086 /// Common base class for embox to descriptor conversion. 1087 template <typename OP> 1088 struct EmboxCommonConversion : public FIROpConversion<OP> { 1089 using FIROpConversion<OP>::FIROpConversion; 1090 1091 // Find the LLVMFuncOp in whose entry block the alloca should be inserted. 1092 // The order to find the LLVMFuncOp is as follows: 1093 // 1. The parent operation of the current block if it is a LLVMFuncOp. 1094 // 2. The first ancestor that is a LLVMFuncOp. 1095 mlir::LLVM::LLVMFuncOp 1096 getFuncForAllocaInsert(mlir::ConversionPatternRewriter &rewriter) const { 1097 mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp(); 1098 return mlir::isa<mlir::LLVM::LLVMFuncOp>(parentOp) 1099 ? mlir::cast<mlir::LLVM::LLVMFuncOp>(parentOp) 1100 : parentOp->getParentOfType<mlir::LLVM::LLVMFuncOp>(); 1101 } 1102 1103 // Generate an alloca of size 1 and type \p toTy. 1104 mlir::LLVM::AllocaOp 1105 genAllocaWithType(mlir::Location loc, mlir::Type toTy, unsigned alignment, 1106 mlir::ConversionPatternRewriter &rewriter) const { 1107 auto thisPt = rewriter.saveInsertionPoint(); 1108 mlir::LLVM::LLVMFuncOp func = getFuncForAllocaInsert(rewriter); 1109 rewriter.setInsertionPointToStart(&func.front()); 1110 auto size = this->genI32Constant(loc, rewriter, 1); 1111 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, toTy, size, alignment); 1112 rewriter.restoreInsertionPoint(thisPt); 1113 return al; 1114 } 1115 1116 static int getCFIAttr(fir::BoxType boxTy) { 1117 auto eleTy = boxTy.getEleTy(); 1118 if (eleTy.isa<fir::PointerType>()) 1119 return CFI_attribute_pointer; 1120 if (eleTy.isa<fir::HeapType>()) 1121 return CFI_attribute_allocatable; 1122 return CFI_attribute_other; 1123 } 1124 1125 static fir::RecordType unwrapIfDerived(fir::BoxType boxTy) { 1126 return fir::unwrapSequenceType(fir::dyn_cast_ptrOrBoxEleTy(boxTy)) 1127 .template dyn_cast<fir::RecordType>(); 1128 } 1129 static bool isDerivedTypeWithLenParams(fir::BoxType boxTy) { 1130 auto recTy = unwrapIfDerived(boxTy); 1131 return recTy && recTy.getNumLenParams() > 0; 1132 } 1133 static bool isDerivedType(fir::BoxType boxTy) { 1134 return static_cast<bool>(unwrapIfDerived(boxTy)); 1135 } 1136 1137 // Get the element size and CFI type code of the boxed value. 1138 std::tuple<mlir::Value, mlir::Value> getSizeAndTypeCode( 1139 mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 1140 mlir::Type boxEleTy, mlir::ValueRange lenParams = {}) const { 1141 auto doInteger = 1142 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1143 int typeCode = fir::integerBitsToTypeCode(width); 1144 return {this->genConstantOffset(loc, rewriter, width / 8), 1145 this->genConstantOffset(loc, rewriter, typeCode)}; 1146 }; 1147 auto doLogical = 1148 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1149 int typeCode = fir::logicalBitsToTypeCode(width); 1150 return {this->genConstantOffset(loc, rewriter, width / 8), 1151 this->genConstantOffset(loc, rewriter, typeCode)}; 1152 }; 1153 auto doFloat = [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1154 int typeCode = fir::realBitsToTypeCode(width); 1155 return {this->genConstantOffset(loc, rewriter, width / 8), 1156 this->genConstantOffset(loc, rewriter, typeCode)}; 1157 }; 1158 auto doComplex = 1159 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1160 auto typeCode = fir::complexBitsToTypeCode(width); 1161 return {this->genConstantOffset(loc, rewriter, width / 8 * 2), 1162 this->genConstantOffset(loc, rewriter, typeCode)}; 1163 }; 1164 auto doCharacter = 1165 [&](unsigned width, 1166 mlir::Value len) -> std::tuple<mlir::Value, mlir::Value> { 1167 auto typeCode = fir::characterBitsToTypeCode(width); 1168 auto typeCodeVal = this->genConstantOffset(loc, rewriter, typeCode); 1169 if (width == 8) 1170 return {len, typeCodeVal}; 1171 auto i64Ty = mlir::IntegerType::get(&this->lowerTy().getContext(), 64); 1172 auto byteWidth = genConstantIndex(loc, i64Ty, rewriter, width / 8); 1173 auto len64 = FIROpConversion<OP>::integerCast(loc, rewriter, i64Ty, len); 1174 auto size = 1175 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len64); 1176 return {size, typeCodeVal}; 1177 }; 1178 auto getKindMap = [&]() -> fir::KindMapping & { 1179 return this->lowerTy().getKindMap(); 1180 }; 1181 // Pointer-like types. 1182 if (auto eleTy = fir::dyn_cast_ptrEleTy(boxEleTy)) 1183 boxEleTy = eleTy; 1184 // Integer types. 1185 if (fir::isa_integer(boxEleTy)) { 1186 if (auto ty = boxEleTy.dyn_cast<mlir::IntegerType>()) 1187 return doInteger(ty.getWidth()); 1188 auto ty = boxEleTy.cast<fir::IntegerType>(); 1189 return doInteger(getKindMap().getIntegerBitsize(ty.getFKind())); 1190 } 1191 // Floating point types. 1192 if (fir::isa_real(boxEleTy)) { 1193 if (auto ty = boxEleTy.dyn_cast<mlir::FloatType>()) 1194 return doFloat(ty.getWidth()); 1195 auto ty = boxEleTy.cast<fir::RealType>(); 1196 return doFloat(getKindMap().getRealBitsize(ty.getFKind())); 1197 } 1198 // Complex types. 1199 if (fir::isa_complex(boxEleTy)) { 1200 if (auto ty = boxEleTy.dyn_cast<mlir::ComplexType>()) 1201 return doComplex( 1202 ty.getElementType().cast<mlir::FloatType>().getWidth()); 1203 auto ty = boxEleTy.cast<fir::ComplexType>(); 1204 return doComplex(getKindMap().getRealBitsize(ty.getFKind())); 1205 } 1206 // Character types. 1207 if (auto ty = boxEleTy.dyn_cast<fir::CharacterType>()) { 1208 auto charWidth = getKindMap().getCharacterBitsize(ty.getFKind()); 1209 if (ty.getLen() != fir::CharacterType::unknownLen()) { 1210 auto len = this->genConstantOffset(loc, rewriter, ty.getLen()); 1211 return doCharacter(charWidth, len); 1212 } 1213 assert(!lenParams.empty()); 1214 return doCharacter(charWidth, lenParams.back()); 1215 } 1216 // Logical type. 1217 if (auto ty = boxEleTy.dyn_cast<fir::LogicalType>()) 1218 return doLogical(getKindMap().getLogicalBitsize(ty.getFKind())); 1219 // Array types. 1220 if (auto seqTy = boxEleTy.dyn_cast<fir::SequenceType>()) 1221 return getSizeAndTypeCode(loc, rewriter, seqTy.getEleTy(), lenParams); 1222 // Derived-type types. 1223 if (boxEleTy.isa<fir::RecordType>()) { 1224 auto ptrTy = mlir::LLVM::LLVMPointerType::get( 1225 this->lowerTy().convertType(boxEleTy)); 1226 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 1227 auto one = 1228 genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1); 1229 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, 1230 mlir::ValueRange{one}); 1231 auto eleSize = rewriter.create<mlir::LLVM::PtrToIntOp>( 1232 loc, this->lowerTy().indexType(), gep); 1233 return {eleSize, 1234 this->genConstantOffset(loc, rewriter, fir::derivedToTypeCode())}; 1235 } 1236 // Reference type. 1237 if (fir::isa_ref_type(boxEleTy)) { 1238 // FIXME: use the target pointer size rather than sizeof(void*) 1239 return {this->genConstantOffset(loc, rewriter, sizeof(void *)), 1240 this->genConstantOffset(loc, rewriter, CFI_type_cptr)}; 1241 } 1242 fir::emitFatalError(loc, "unhandled type in fir.box code generation"); 1243 } 1244 1245 /// Basic pattern to write a field in the descriptor 1246 mlir::Value insertField(mlir::ConversionPatternRewriter &rewriter, 1247 mlir::Location loc, mlir::Value dest, 1248 llvm::ArrayRef<unsigned> fldIndexes, 1249 mlir::Value value, bool bitcast = false) const { 1250 auto boxTy = dest.getType(); 1251 auto fldTy = this->getBoxEleTy(boxTy, fldIndexes); 1252 if (bitcast) 1253 value = rewriter.create<mlir::LLVM::BitcastOp>(loc, fldTy, value); 1254 else 1255 value = this->integerCast(loc, rewriter, fldTy, value); 1256 llvm::SmallVector<mlir::Attribute, 2> attrs; 1257 for (auto i : fldIndexes) 1258 attrs.push_back(rewriter.getI32IntegerAttr(i)); 1259 auto indexesAttr = mlir::ArrayAttr::get(rewriter.getContext(), attrs); 1260 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, boxTy, dest, value, 1261 indexesAttr); 1262 } 1263 1264 inline mlir::Value 1265 insertBaseAddress(mlir::ConversionPatternRewriter &rewriter, 1266 mlir::Location loc, mlir::Value dest, 1267 mlir::Value base) const { 1268 return insertField(rewriter, loc, dest, {kAddrPosInBox}, base, 1269 /*bitCast=*/true); 1270 } 1271 1272 inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter, 1273 mlir::Location loc, mlir::Value dest, 1274 unsigned dim, mlir::Value lb) const { 1275 return insertField(rewriter, loc, dest, 1276 {kDimsPosInBox, dim, kDimLowerBoundPos}, lb); 1277 } 1278 1279 inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter, 1280 mlir::Location loc, mlir::Value dest, 1281 unsigned dim, mlir::Value extent) const { 1282 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos}, 1283 extent); 1284 } 1285 1286 inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter, 1287 mlir::Location loc, mlir::Value dest, 1288 unsigned dim, mlir::Value stride) const { 1289 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos}, 1290 stride); 1291 } 1292 1293 /// Get the address of the type descriptor global variable that was created by 1294 /// lowering for derived type \p recType. 1295 template <typename BOX> 1296 mlir::Value 1297 getTypeDescriptor(BOX box, mlir::ConversionPatternRewriter &rewriter, 1298 mlir::Location loc, fir::RecordType recType) const { 1299 std::string name = 1300 fir::NameUniquer::getTypeDescriptorName(recType.getName()); 1301 auto module = box->template getParentOfType<mlir::ModuleOp>(); 1302 if (auto global = module.template lookupSymbol<fir::GlobalOp>(name)) { 1303 auto ty = mlir::LLVM::LLVMPointerType::get( 1304 this->lowerTy().convertType(global.getType())); 1305 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1306 global.getSymName()); 1307 } 1308 if (auto global = 1309 module.template lookupSymbol<mlir::LLVM::GlobalOp>(name)) { 1310 // The global may have already been translated to LLVM. 1311 auto ty = mlir::LLVM::LLVMPointerType::get(global.getType()); 1312 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1313 global.getSymName()); 1314 } 1315 // Type info derived types do not have type descriptors since they are the 1316 // types defining type descriptors. 1317 if (!this->options.ignoreMissingTypeDescriptors && 1318 !fir::NameUniquer::belongsToModule( 1319 name, Fortran::semantics::typeInfoBuiltinModule)) 1320 fir::emitFatalError( 1321 loc, "runtime derived type info descriptor was not generated"); 1322 return rewriter.create<mlir::LLVM::NullOp>( 1323 loc, ::getVoidPtrType(box.getContext())); 1324 } 1325 1326 template <typename BOX> 1327 std::tuple<fir::BoxType, mlir::Value, mlir::Value> 1328 consDescriptorPrefix(BOX box, mlir::ConversionPatternRewriter &rewriter, 1329 unsigned rank, mlir::ValueRange lenParams) const { 1330 auto loc = box.getLoc(); 1331 auto boxTy = box.getType().template dyn_cast<fir::BoxType>(); 1332 auto convTy = this->lowerTy().convertBoxType(boxTy, rank); 1333 auto llvmBoxPtrTy = convTy.template cast<mlir::LLVM::LLVMPointerType>(); 1334 auto llvmBoxTy = llvmBoxPtrTy.getElementType(); 1335 mlir::Value descriptor = 1336 rewriter.create<mlir::LLVM::UndefOp>(loc, llvmBoxTy); 1337 1338 llvm::SmallVector<mlir::Value> typeparams = lenParams; 1339 if constexpr (!std::is_same_v<BOX, fir::EmboxOp>) { 1340 if (!box.substr().empty() && fir::hasDynamicSize(boxTy.getEleTy())) 1341 typeparams.push_back(box.substr()[1]); 1342 } 1343 1344 // Write each of the fields with the appropriate values 1345 auto [eleSize, cfiTy] = 1346 getSizeAndTypeCode(loc, rewriter, boxTy.getEleTy(), typeparams); 1347 descriptor = 1348 insertField(rewriter, loc, descriptor, {kElemLenPosInBox}, eleSize); 1349 descriptor = insertField(rewriter, loc, descriptor, {kVersionPosInBox}, 1350 this->genI32Constant(loc, rewriter, CFI_VERSION)); 1351 descriptor = insertField(rewriter, loc, descriptor, {kRankPosInBox}, 1352 this->genI32Constant(loc, rewriter, rank)); 1353 descriptor = insertField(rewriter, loc, descriptor, {kTypePosInBox}, cfiTy); 1354 descriptor = 1355 insertField(rewriter, loc, descriptor, {kAttributePosInBox}, 1356 this->genI32Constant(loc, rewriter, getCFIAttr(boxTy))); 1357 const bool hasAddendum = isDerivedType(boxTy); 1358 descriptor = 1359 insertField(rewriter, loc, descriptor, {kF18AddendumPosInBox}, 1360 this->genI32Constant(loc, rewriter, hasAddendum ? 1 : 0)); 1361 1362 if (hasAddendum) { 1363 auto isArray = 1364 fir::dyn_cast_ptrOrBoxEleTy(boxTy).template isa<fir::SequenceType>(); 1365 unsigned typeDescFieldId = isArray ? kOptTypePtrPosInBox : kDimsPosInBox; 1366 auto typeDesc = 1367 getTypeDescriptor(box, rewriter, loc, unwrapIfDerived(boxTy)); 1368 descriptor = 1369 insertField(rewriter, loc, descriptor, {typeDescFieldId}, typeDesc, 1370 /*bitCast=*/true); 1371 } 1372 1373 return {boxTy, descriptor, eleSize}; 1374 } 1375 1376 /// Compute the base address of a substring given the base address of a scalar 1377 /// string and the zero based string lower bound. 1378 mlir::Value shiftSubstringBase(mlir::ConversionPatternRewriter &rewriter, 1379 mlir::Location loc, mlir::Value base, 1380 mlir::Value lowerBound) const { 1381 llvm::SmallVector<mlir::Value> gepOperands; 1382 auto baseType = 1383 base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1384 if (baseType.isa<mlir::LLVM::LLVMArrayType>()) { 1385 auto idxTy = this->lowerTy().indexType(); 1386 gepOperands.push_back(genConstantIndex(loc, idxTy, rewriter, 0)); 1387 gepOperands.push_back(lowerBound); 1388 } else { 1389 gepOperands.push_back(lowerBound); 1390 } 1391 return this->genGEP(loc, base.getType(), rewriter, base, gepOperands); 1392 } 1393 1394 /// If the embox is not in a globalOp body, allocate storage for the box; 1395 /// store the value inside and return the generated alloca. Return the input 1396 /// value otherwise. 1397 mlir::Value 1398 placeInMemoryIfNotGlobalInit(mlir::ConversionPatternRewriter &rewriter, 1399 mlir::Location loc, mlir::Value boxValue) const { 1400 auto *thisBlock = rewriter.getInsertionBlock(); 1401 if (thisBlock && mlir::isa<mlir::LLVM::GlobalOp>(thisBlock->getParentOp())) 1402 return boxValue; 1403 auto boxPtrTy = mlir::LLVM::LLVMPointerType::get(boxValue.getType()); 1404 auto alloca = genAllocaWithType(loc, boxPtrTy, defaultAlign, rewriter); 1405 rewriter.create<mlir::LLVM::StoreOp>(loc, boxValue, alloca); 1406 return alloca; 1407 } 1408 }; 1409 1410 /// Compute the extent of a triplet slice (lb:ub:step). 1411 static mlir::Value 1412 computeTripletExtent(mlir::ConversionPatternRewriter &rewriter, 1413 mlir::Location loc, mlir::Value lb, mlir::Value ub, 1414 mlir::Value step, mlir::Value zero, mlir::Type type) { 1415 mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb); 1416 extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step); 1417 extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step); 1418 // If the resulting extent is negative (`ub-lb` and `step` have different 1419 // signs), zero must be returned instead. 1420 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1421 loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero); 1422 return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero); 1423 } 1424 1425 /// Create a generic box on a memory reference. This conversions lowers the 1426 /// abstract box to the appropriate, initialized descriptor. 1427 struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> { 1428 using EmboxCommonConversion::EmboxCommonConversion; 1429 1430 mlir::LogicalResult 1431 matchAndRewrite(fir::EmboxOp embox, OpAdaptor adaptor, 1432 mlir::ConversionPatternRewriter &rewriter) const override { 1433 assert(!embox.getShape() && "There should be no dims on this embox op"); 1434 auto [boxTy, dest, eleSize] = 1435 consDescriptorPrefix(embox, rewriter, /*rank=*/0, 1436 /*lenParams=*/adaptor.getOperands().drop_front(1)); 1437 dest = insertBaseAddress(rewriter, embox.getLoc(), dest, 1438 adaptor.getOperands()[0]); 1439 if (isDerivedTypeWithLenParams(boxTy)) { 1440 TODO(embox.getLoc(), 1441 "fir.embox codegen of derived with length parameters"); 1442 return mlir::failure(); 1443 } 1444 auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest); 1445 rewriter.replaceOp(embox, result); 1446 return mlir::success(); 1447 } 1448 }; 1449 1450 /// Create a generic box on a memory reference. 1451 struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> { 1452 using EmboxCommonConversion::EmboxCommonConversion; 1453 1454 mlir::LogicalResult 1455 matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor, 1456 mlir::ConversionPatternRewriter &rewriter) const override { 1457 auto [boxTy, dest, eleSize] = consDescriptorPrefix( 1458 xbox, rewriter, xbox.getOutRank(), 1459 adaptor.getOperands().drop_front(xbox.lenParamOffset())); 1460 // Generate the triples in the dims field of the descriptor 1461 mlir::ValueRange operands = adaptor.getOperands(); 1462 auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64); 1463 mlir::Value base = operands[0]; 1464 assert(!xbox.shape().empty() && "must have a shape"); 1465 unsigned shapeOffset = xbox.shapeOffset(); 1466 bool hasShift = !xbox.shift().empty(); 1467 unsigned shiftOffset = xbox.shiftOffset(); 1468 bool hasSlice = !xbox.slice().empty(); 1469 unsigned sliceOffset = xbox.sliceOffset(); 1470 mlir::Location loc = xbox.getLoc(); 1471 mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0); 1472 mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1); 1473 mlir::Value prevPtrOff = one; 1474 mlir::Type eleTy = boxTy.getEleTy(); 1475 const unsigned rank = xbox.getRank(); 1476 llvm::SmallVector<mlir::Value> gepArgs; 1477 unsigned constRows = 0; 1478 mlir::Value ptrOffset = zero; 1479 mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()); 1480 assert(memEleTy.isa<fir::SequenceType>()); 1481 auto seqTy = memEleTy.cast<fir::SequenceType>(); 1482 mlir::Type seqEleTy = seqTy.getEleTy(); 1483 // Adjust the element scaling factor if the element is a dependent type. 1484 if (fir::hasDynamicSize(seqEleTy)) { 1485 if (auto charTy = seqEleTy.dyn_cast<fir::CharacterType>()) { 1486 assert(xbox.lenParams().size() == 1); 1487 mlir::LLVM::ConstantOp charSize = genConstantIndex( 1488 loc, i64Ty, rewriter, lowerTy().characterBitsize(charTy) / 8); 1489 mlir::Value castedLen = 1490 integerCast(loc, rewriter, i64Ty, operands[xbox.lenParamOffset()]); 1491 auto byteOffset = 1492 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, charSize, castedLen); 1493 prevPtrOff = integerCast(loc, rewriter, i64Ty, byteOffset); 1494 } else if (seqEleTy.isa<fir::RecordType>()) { 1495 // prevPtrOff = ; 1496 TODO(loc, "generate call to calculate size of PDT"); 1497 } else { 1498 fir::emitFatalError(loc, "unexpected dynamic type"); 1499 } 1500 } else { 1501 constRows = seqTy.getConstantRows(); 1502 } 1503 1504 const auto hasSubcomp = !xbox.subcomponent().empty(); 1505 const bool hasSubstr = !xbox.substr().empty(); 1506 /// Compute initial element stride that will be use to compute the step in 1507 /// each dimension. 1508 mlir::Value prevDimByteStride = integerCast(loc, rewriter, i64Ty, eleSize); 1509 if (hasSubcomp) { 1510 // We have a subcomponent. The step value needs to be the number of 1511 // bytes per element (which is a derived type). 1512 auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy)); 1513 prevDimByteStride = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter); 1514 } else if (hasSubstr) { 1515 // We have a substring. The step value needs to be the number of bytes 1516 // per CHARACTER element. 1517 auto charTy = seqEleTy.cast<fir::CharacterType>(); 1518 if (fir::hasDynamicSize(charTy)) { 1519 prevDimByteStride = prevPtrOff; 1520 } else { 1521 prevDimByteStride = genConstantIndex( 1522 loc, i64Ty, rewriter, 1523 charTy.getLen() * lowerTy().characterBitsize(charTy) / 8); 1524 } 1525 } 1526 1527 // Process the array subspace arguments (shape, shift, etc.), if any, 1528 // translating everything to values in the descriptor wherever the entity 1529 // has a dynamic array dimension. 1530 for (unsigned di = 0, descIdx = 0; di < rank; ++di) { 1531 mlir::Value extent = operands[shapeOffset]; 1532 mlir::Value outerExtent = extent; 1533 bool skipNext = false; 1534 if (hasSlice) { 1535 mlir::Value off = operands[sliceOffset]; 1536 mlir::Value adj = one; 1537 if (hasShift) 1538 adj = operands[shiftOffset]; 1539 auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj); 1540 if (constRows > 0) { 1541 gepArgs.push_back(ao); 1542 } else { 1543 auto dimOff = 1544 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff); 1545 ptrOffset = 1546 rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset); 1547 } 1548 if (mlir::isa_and_nonnull<fir::UndefOp>( 1549 xbox.slice()[3 * di + 1].getDefiningOp())) { 1550 // This dimension contains a scalar expression in the array slice op. 1551 // The dimension is loop invariant, will be dropped, and will not 1552 // appear in the descriptor. 1553 skipNext = true; 1554 } 1555 } 1556 if (!skipNext) { 1557 // store extent 1558 if (hasSlice) 1559 extent = computeTripletExtent(rewriter, loc, operands[sliceOffset], 1560 operands[sliceOffset + 1], 1561 operands[sliceOffset + 2], zero, i64Ty); 1562 // Lower bound is normalized to 0 for BIND(C) interoperability. 1563 mlir::Value lb = zero; 1564 const bool isaPointerOrAllocatable = 1565 eleTy.isa<fir::PointerType>() || eleTy.isa<fir::HeapType>(); 1566 // Lower bound is defaults to 1 for POINTER, ALLOCATABLE, and 1567 // denormalized descriptors. 1568 if (isaPointerOrAllocatable || !normalizedLowerBound(xbox)) 1569 lb = one; 1570 // If there is a shifted origin, and no fir.slice, and this is not 1571 // a normalized descriptor then use the value from the shift op as 1572 // the lower bound. 1573 if (hasShift && !(hasSlice || hasSubcomp || hasSubstr) && 1574 (isaPointerOrAllocatable || !normalizedLowerBound(xbox))) { 1575 lb = operands[shiftOffset]; 1576 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1577 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1578 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, 1579 lb); 1580 } 1581 dest = insertLowerBound(rewriter, loc, dest, descIdx, lb); 1582 1583 dest = insertExtent(rewriter, loc, dest, descIdx, extent); 1584 1585 // store step (scaled by shaped extent) 1586 mlir::Value step = prevDimByteStride; 1587 if (hasSlice) 1588 step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step, 1589 operands[sliceOffset + 2]); 1590 dest = insertStride(rewriter, loc, dest, descIdx, step); 1591 ++descIdx; 1592 } 1593 1594 // compute the stride and offset for the next natural dimension 1595 prevDimByteStride = rewriter.create<mlir::LLVM::MulOp>( 1596 loc, i64Ty, prevDimByteStride, outerExtent); 1597 if (constRows == 0) 1598 prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff, 1599 outerExtent); 1600 else 1601 --constRows; 1602 1603 // increment iterators 1604 ++shapeOffset; 1605 if (hasShift) 1606 ++shiftOffset; 1607 if (hasSlice) 1608 sliceOffset += 3; 1609 } 1610 if (hasSlice || hasSubcomp || hasSubstr) { 1611 llvm::SmallVector<mlir::Value> args = {ptrOffset}; 1612 args.append(gepArgs.rbegin(), gepArgs.rend()); 1613 if (hasSubcomp) { 1614 // For each field in the path add the offset to base via the args list. 1615 // In the most general case, some offsets must be computed since 1616 // they are not be known until runtime. 1617 if (fir::hasDynamicSize(fir::unwrapSequenceType( 1618 fir::unwrapPassByRefType(xbox.memref().getType())))) 1619 TODO(loc, "fir.embox codegen dynamic size component in derived type"); 1620 args.append(operands.begin() + xbox.subcomponentOffset(), 1621 operands.begin() + xbox.subcomponentOffset() + 1622 xbox.subcomponent().size()); 1623 } 1624 base = 1625 rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), base, args); 1626 if (hasSubstr) 1627 base = shiftSubstringBase(rewriter, loc, base, 1628 operands[xbox.substrOffset()]); 1629 } 1630 dest = insertBaseAddress(rewriter, loc, dest, base); 1631 if (isDerivedTypeWithLenParams(boxTy)) 1632 TODO(loc, "fir.embox codegen of derived with length parameters"); 1633 1634 mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest); 1635 rewriter.replaceOp(xbox, result); 1636 return mlir::success(); 1637 } 1638 1639 /// Return true if `xbox` has a normalized lower bounds attribute. A box value 1640 /// that is neither a POINTER nor an ALLOCATABLE should be normalized to a 1641 /// zero origin lower bound for interoperability with BIND(C). 1642 inline static bool normalizedLowerBound(fir::cg::XEmboxOp xbox) { 1643 return xbox->hasAttr(fir::getNormalizedLowerBoundAttrName()); 1644 } 1645 }; 1646 1647 /// Create a new box given a box reference. 1648 struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> { 1649 using EmboxCommonConversion::EmboxCommonConversion; 1650 1651 mlir::LogicalResult 1652 matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor, 1653 mlir::ConversionPatternRewriter &rewriter) const override { 1654 mlir::Location loc = rebox.getLoc(); 1655 mlir::Type idxTy = lowerTy().indexType(); 1656 mlir::Value loweredBox = adaptor.getOperands()[0]; 1657 mlir::ValueRange operands = adaptor.getOperands(); 1658 1659 // Create new descriptor and fill its non-shape related data. 1660 llvm::SmallVector<mlir::Value, 2> lenParams; 1661 mlir::Type inputEleTy = getInputEleTy(rebox); 1662 if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) { 1663 mlir::Value len = 1664 loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter); 1665 if (charTy.getFKind() != 1) { 1666 mlir::Value width = 1667 genConstantIndex(loc, idxTy, rewriter, charTy.getFKind()); 1668 len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width); 1669 } 1670 lenParams.emplace_back(len); 1671 } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) { 1672 if (recTy.getNumLenParams() != 0) 1673 TODO(loc, "reboxing descriptor of derived type with length parameters"); 1674 } 1675 auto [boxTy, dest, eleSize] = 1676 consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams); 1677 1678 // Read input extents, strides, and base address 1679 llvm::SmallVector<mlir::Value> inputExtents; 1680 llvm::SmallVector<mlir::Value> inputStrides; 1681 const unsigned inputRank = rebox.getRank(); 1682 for (unsigned i = 0; i < inputRank; ++i) { 1683 mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i); 1684 llvm::SmallVector<mlir::Value, 3> dimInfo = 1685 getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter); 1686 inputExtents.emplace_back(dimInfo[1]); 1687 inputStrides.emplace_back(dimInfo[2]); 1688 } 1689 1690 mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType()); 1691 mlir::Value baseAddr = 1692 loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter); 1693 1694 if (!rebox.slice().empty() || !rebox.subcomponent().empty()) 1695 return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1696 operands, rewriter); 1697 return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1698 operands, rewriter); 1699 } 1700 1701 private: 1702 /// Write resulting shape and base address in descriptor, and replace rebox 1703 /// op. 1704 mlir::LogicalResult 1705 finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1706 mlir::ValueRange lbounds, mlir::ValueRange extents, 1707 mlir::ValueRange strides, 1708 mlir::ConversionPatternRewriter &rewriter) const { 1709 mlir::Location loc = rebox.getLoc(); 1710 mlir::Value zero = 1711 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 1712 mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1); 1713 for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) { 1714 mlir::Value extent = std::get<0>(iter.value()); 1715 unsigned dim = iter.index(); 1716 mlir::Value lb = one; 1717 if (!lbounds.empty()) { 1718 lb = lbounds[dim]; 1719 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1720 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1721 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, lb); 1722 }; 1723 dest = insertLowerBound(rewriter, loc, dest, dim, lb); 1724 dest = insertExtent(rewriter, loc, dest, dim, extent); 1725 dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value())); 1726 } 1727 dest = insertBaseAddress(rewriter, loc, dest, base); 1728 mlir::Value result = 1729 placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest); 1730 rewriter.replaceOp(rebox, result); 1731 return mlir::success(); 1732 } 1733 1734 // Apply slice given the base address, extents and strides of the input box. 1735 mlir::LogicalResult 1736 sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1737 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1738 mlir::ValueRange operands, 1739 mlir::ConversionPatternRewriter &rewriter) const { 1740 mlir::Location loc = rebox.getLoc(); 1741 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1742 mlir::Type idxTy = lowerTy().indexType(); 1743 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1744 // Apply subcomponent and substring shift on base address. 1745 if (!rebox.subcomponent().empty() || !rebox.substr().empty()) { 1746 // Cast to inputEleTy* so that a GEP can be used. 1747 mlir::Type inputEleTy = getInputEleTy(rebox); 1748 auto llvmElePtrTy = 1749 mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy)); 1750 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base); 1751 1752 if (!rebox.subcomponent().empty()) { 1753 llvm::SmallVector<mlir::Value> gepOperands = {zero}; 1754 for (unsigned i = 0; i < rebox.subcomponent().size(); ++i) 1755 gepOperands.push_back(operands[rebox.subcomponentOffset() + i]); 1756 base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands); 1757 } 1758 if (!rebox.substr().empty()) 1759 base = shiftSubstringBase(rewriter, loc, base, 1760 operands[rebox.substrOffset()]); 1761 } 1762 1763 if (rebox.slice().empty()) 1764 // The array section is of the form array[%component][substring], keep 1765 // the input array extents and strides. 1766 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1767 inputExtents, inputStrides, rewriter); 1768 1769 // Strides from the fir.box are in bytes. 1770 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1771 1772 // The slice is of the form array(i:j:k)[%component]. Compute new extents 1773 // and strides. 1774 llvm::SmallVector<mlir::Value> slicedExtents; 1775 llvm::SmallVector<mlir::Value> slicedStrides; 1776 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 1777 const bool sliceHasOrigins = !rebox.shift().empty(); 1778 unsigned sliceOps = rebox.sliceOffset(); 1779 unsigned shiftOps = rebox.shiftOffset(); 1780 auto strideOps = inputStrides.begin(); 1781 const unsigned inputRank = inputStrides.size(); 1782 for (unsigned i = 0; i < inputRank; 1783 ++i, ++strideOps, ++shiftOps, sliceOps += 3) { 1784 mlir::Value sliceLb = 1785 integerCast(loc, rewriter, idxTy, operands[sliceOps]); 1786 mlir::Value inputStride = *strideOps; // already idxTy 1787 // Apply origin shift: base += (lb-shift)*input_stride 1788 mlir::Value sliceOrigin = 1789 sliceHasOrigins 1790 ? integerCast(loc, rewriter, idxTy, operands[shiftOps]) 1791 : one; 1792 mlir::Value diff = 1793 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin); 1794 mlir::Value offset = 1795 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride); 1796 base = genGEP(loc, voidPtrTy, rewriter, base, offset); 1797 // Apply upper bound and step if this is a triplet. Otherwise, the 1798 // dimension is dropped and no extents/strides are computed. 1799 mlir::Value upper = operands[sliceOps + 1]; 1800 const bool isTripletSlice = 1801 !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp()); 1802 if (isTripletSlice) { 1803 mlir::Value step = 1804 integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]); 1805 // extent = ub-lb+step/step 1806 mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper); 1807 mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb, 1808 sliceUb, step, zero, idxTy); 1809 slicedExtents.emplace_back(extent); 1810 // stride = step*input_stride 1811 mlir::Value stride = 1812 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride); 1813 slicedStrides.emplace_back(stride); 1814 } 1815 } 1816 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1817 slicedExtents, slicedStrides, rewriter); 1818 } 1819 1820 /// Apply a new shape to the data described by a box given the base address, 1821 /// extents and strides of the box. 1822 mlir::LogicalResult 1823 reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1824 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1825 mlir::ValueRange operands, 1826 mlir::ConversionPatternRewriter &rewriter) const { 1827 mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(), 1828 operands.begin() + rebox.shiftOffset() + 1829 rebox.shift().size()}; 1830 if (rebox.shape().empty()) { 1831 // Only setting new lower bounds. 1832 return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents, 1833 inputStrides, rewriter); 1834 } 1835 1836 mlir::Location loc = rebox.getLoc(); 1837 // Strides from the fir.box are in bytes. 1838 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1839 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1840 1841 llvm::SmallVector<mlir::Value> newStrides; 1842 llvm::SmallVector<mlir::Value> newExtents; 1843 mlir::Type idxTy = lowerTy().indexType(); 1844 // First stride from input box is kept. The rest is assumed contiguous 1845 // (it is not possible to reshape otherwise). If the input is scalar, 1846 // which may be OK if all new extents are ones, the stride does not 1847 // matter, use one. 1848 mlir::Value stride = inputStrides.empty() 1849 ? genConstantIndex(loc, idxTy, rewriter, 1) 1850 : inputStrides[0]; 1851 for (unsigned i = 0; i < rebox.shape().size(); ++i) { 1852 mlir::Value rawExtent = operands[rebox.shapeOffset() + i]; 1853 mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent); 1854 newExtents.emplace_back(extent); 1855 newStrides.emplace_back(stride); 1856 // nextStride = extent * stride; 1857 stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride); 1858 } 1859 return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides, 1860 rewriter); 1861 } 1862 1863 /// Return scalar element type of the input box. 1864 static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) { 1865 auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType()); 1866 if (auto seqTy = ty.dyn_cast<fir::SequenceType>()) 1867 return seqTy.getEleTy(); 1868 return ty; 1869 } 1870 }; 1871 1872 /// Lower `fir.emboxproc` operation. Creates a procedure box. 1873 /// TODO: Part of supporting Fortran 2003 procedure pointers. 1874 struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> { 1875 using FIROpConversion::FIROpConversion; 1876 1877 mlir::LogicalResult 1878 matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor, 1879 mlir::ConversionPatternRewriter &rewriter) const override { 1880 TODO(emboxproc.getLoc(), "fir.emboxproc codegen"); 1881 return mlir::failure(); 1882 } 1883 }; 1884 1885 // Code shared between insert_value and extract_value Ops. 1886 struct ValueOpCommon { 1887 // Translate the arguments pertaining to any multidimensional array to 1888 // row-major order for LLVM-IR. 1889 static void toRowMajor(llvm::SmallVectorImpl<mlir::Attribute> &attrs, 1890 mlir::Type ty) { 1891 assert(ty && "type is null"); 1892 const auto end = attrs.size(); 1893 for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) { 1894 if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 1895 const auto dim = getDimension(seq); 1896 if (dim > 1) { 1897 auto ub = std::min(i + dim, end); 1898 std::reverse(attrs.begin() + i, attrs.begin() + ub); 1899 i += dim - 1; 1900 } 1901 ty = getArrayElementType(seq); 1902 } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) { 1903 ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()]; 1904 } else { 1905 llvm_unreachable("index into invalid type"); 1906 } 1907 } 1908 } 1909 1910 static llvm::SmallVector<mlir::Attribute> 1911 collectIndices(mlir::ConversionPatternRewriter &rewriter, 1912 mlir::ArrayAttr arrAttr) { 1913 llvm::SmallVector<mlir::Attribute> attrs; 1914 for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) { 1915 if (i->isa<mlir::IntegerAttr>()) { 1916 attrs.push_back(*i); 1917 } else { 1918 auto fieldName = i->cast<mlir::StringAttr>().getValue(); 1919 ++i; 1920 auto ty = i->cast<mlir::TypeAttr>().getValue(); 1921 auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName); 1922 attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index)); 1923 } 1924 } 1925 return attrs; 1926 } 1927 1928 private: 1929 static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) { 1930 unsigned result = 1; 1931 for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>(); 1932 eleTy; 1933 eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>()) 1934 ++result; 1935 return result; 1936 } 1937 1938 static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) { 1939 auto eleTy = ty.getElementType(); 1940 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 1941 eleTy = arrTy.getElementType(); 1942 return eleTy; 1943 } 1944 }; 1945 1946 namespace { 1947 /// Extract a subobject value from an ssa-value of aggregate type 1948 struct ExtractValueOpConversion 1949 : public FIROpAndTypeConversion<fir::ExtractValueOp>, 1950 public ValueOpCommon { 1951 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1952 1953 mlir::LogicalResult 1954 doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor, 1955 mlir::ConversionPatternRewriter &rewriter) const override { 1956 auto attrs = collectIndices(rewriter, extractVal.getCoor()); 1957 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 1958 auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs); 1959 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>( 1960 extractVal, ty, adaptor.getOperands()[0], position); 1961 return mlir::success(); 1962 } 1963 }; 1964 1965 /// InsertValue is the generalized instruction for the composition of new 1966 /// aggregate type values. 1967 struct InsertValueOpConversion 1968 : public FIROpAndTypeConversion<fir::InsertValueOp>, 1969 public ValueOpCommon { 1970 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1971 1972 mlir::LogicalResult 1973 doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor, 1974 mlir::ConversionPatternRewriter &rewriter) const override { 1975 auto attrs = collectIndices(rewriter, insertVal.getCoor()); 1976 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 1977 auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs); 1978 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 1979 insertVal, ty, adaptor.getOperands()[0], adaptor.getOperands()[1], 1980 position); 1981 return mlir::success(); 1982 } 1983 }; 1984 1985 /// InsertOnRange inserts a value into a sequence over a range of offsets. 1986 struct InsertOnRangeOpConversion 1987 : public FIROpAndTypeConversion<fir::InsertOnRangeOp> { 1988 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1989 1990 // Increments an array of subscripts in a row major fasion. 1991 void incrementSubscripts(const llvm::SmallVector<uint64_t> &dims, 1992 llvm::SmallVector<uint64_t> &subscripts) const { 1993 for (size_t i = dims.size(); i > 0; --i) { 1994 if (++subscripts[i - 1] < dims[i - 1]) { 1995 return; 1996 } 1997 subscripts[i - 1] = 0; 1998 } 1999 } 2000 2001 mlir::LogicalResult 2002 doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor, 2003 mlir::ConversionPatternRewriter &rewriter) const override { 2004 2005 llvm::SmallVector<uint64_t> dims; 2006 auto type = adaptor.getOperands()[0].getType(); 2007 2008 // Iteratively extract the array dimensions from the type. 2009 while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 2010 dims.push_back(t.getNumElements()); 2011 type = t.getElementType(); 2012 } 2013 2014 llvm::SmallVector<std::uint64_t> lBounds; 2015 llvm::SmallVector<std::uint64_t> uBounds; 2016 2017 // Unzip the upper and lower bound and convert to a row major format. 2018 mlir::DenseIntElementsAttr coor = range.getCoor(); 2019 auto reversedCoor = llvm::reverse(coor.getValues<int64_t>()); 2020 for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) { 2021 uBounds.push_back(*i++); 2022 lBounds.push_back(*i); 2023 } 2024 2025 auto &subscripts = lBounds; 2026 auto loc = range.getLoc(); 2027 mlir::Value lastOp = adaptor.getOperands()[0]; 2028 mlir::Value insertVal = adaptor.getOperands()[1]; 2029 2030 auto i64Ty = rewriter.getI64Type(); 2031 while (subscripts != uBounds) { 2032 // Convert uint64_t's to Attribute's. 2033 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2034 for (const auto &subscript : subscripts) 2035 subscriptAttrs.push_back(mlir::IntegerAttr::get(i64Ty, subscript)); 2036 lastOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2037 loc, ty, lastOp, insertVal, 2038 mlir::ArrayAttr::get(range.getContext(), subscriptAttrs)); 2039 2040 incrementSubscripts(dims, subscripts); 2041 } 2042 2043 // Convert uint64_t's to Attribute's. 2044 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2045 for (const auto &subscript : subscripts) 2046 subscriptAttrs.push_back( 2047 mlir::IntegerAttr::get(rewriter.getI64Type(), subscript)); 2048 mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs); 2049 2050 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2051 range, ty, lastOp, insertVal, 2052 mlir::ArrayAttr::get(range.getContext(), arrayRef)); 2053 2054 return mlir::success(); 2055 } 2056 }; 2057 } // namespace 2058 2059 namespace { 2060 /// XArrayCoor is the address arithmetic on a dynamically shaped, sliced, 2061 /// shifted etc. array. 2062 /// (See the static restriction on coordinate_of.) array_coor determines the 2063 /// coordinate (location) of a specific element. 2064 struct XArrayCoorOpConversion 2065 : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> { 2066 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2067 2068 mlir::LogicalResult 2069 doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor, 2070 mlir::ConversionPatternRewriter &rewriter) const override { 2071 auto loc = coor.getLoc(); 2072 mlir::ValueRange operands = adaptor.getOperands(); 2073 unsigned rank = coor.getRank(); 2074 assert(coor.indices().size() == rank); 2075 assert(coor.shape().empty() || coor.shape().size() == rank); 2076 assert(coor.shift().empty() || coor.shift().size() == rank); 2077 assert(coor.slice().empty() || coor.slice().size() == 3 * rank); 2078 mlir::Type idxTy = lowerTy().indexType(); 2079 unsigned indexOffset = coor.indicesOffset(); 2080 unsigned shapeOffset = coor.shapeOffset(); 2081 unsigned shiftOffset = coor.shiftOffset(); 2082 unsigned sliceOffset = coor.sliceOffset(); 2083 auto sliceOps = coor.slice().begin(); 2084 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2085 mlir::Value prevExt = one; 2086 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2087 mlir::Value offset = zero; 2088 const bool isShifted = !coor.shift().empty(); 2089 const bool isSliced = !coor.slice().empty(); 2090 const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>(); 2091 2092 // For each dimension of the array, generate the offset calculation. 2093 for (unsigned i = 0; i < rank; ++i, ++indexOffset, ++shapeOffset, 2094 ++shiftOffset, sliceOffset += 3, sliceOps += 3) { 2095 mlir::Value index = 2096 integerCast(loc, rewriter, idxTy, operands[indexOffset]); 2097 mlir::Value lb = 2098 isShifted ? integerCast(loc, rewriter, idxTy, operands[shiftOffset]) 2099 : one; 2100 mlir::Value step = one; 2101 bool normalSlice = isSliced; 2102 // Compute zero based index in dimension i of the element, applying 2103 // potential triplets and lower bounds. 2104 if (isSliced) { 2105 mlir::Value originalUb = *(sliceOps + 1); 2106 normalSlice = 2107 !mlir::isa_and_nonnull<fir::UndefOp>(originalUb.getDefiningOp()); 2108 if (normalSlice) 2109 step = integerCast(loc, rewriter, idxTy, operands[sliceOffset + 2]); 2110 } 2111 auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb); 2112 mlir::Value diff = 2113 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step); 2114 if (normalSlice) { 2115 mlir::Value sliceLb = 2116 integerCast(loc, rewriter, idxTy, operands[sliceOffset]); 2117 auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb); 2118 diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj); 2119 } 2120 // Update the offset given the stride and the zero based index `diff` 2121 // that was just computed. 2122 if (baseIsBoxed) { 2123 // Use stride in bytes from the descriptor. 2124 mlir::Value stride = 2125 loadStrideFromBox(loc, adaptor.getOperands()[0], i, rewriter); 2126 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride); 2127 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2128 } else { 2129 // Use stride computed at last iteration. 2130 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt); 2131 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2132 // Compute next stride assuming contiguity of the base array 2133 // (in element number). 2134 auto nextExt = integerCast(loc, rewriter, idxTy, operands[shapeOffset]); 2135 prevExt = 2136 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt); 2137 } 2138 } 2139 2140 // Add computed offset to the base address. 2141 if (baseIsBoxed) { 2142 // Working with byte offsets. The base address is read from the fir.box. 2143 // and need to be casted to i8* to do the pointer arithmetic. 2144 mlir::Type baseTy = 2145 getBaseAddrTypeFromBox(adaptor.getOperands()[0].getType()); 2146 mlir::Value base = 2147 loadBaseAddrFromBox(loc, baseTy, adaptor.getOperands()[0], rewriter); 2148 mlir::Type voidPtrTy = getVoidPtrType(); 2149 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2150 llvm::SmallVector<mlir::Value> args{offset}; 2151 auto addr = 2152 rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args); 2153 if (coor.subcomponent().empty()) { 2154 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, baseTy, addr); 2155 return mlir::success(); 2156 } 2157 auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr); 2158 args.clear(); 2159 args.push_back(zero); 2160 if (!coor.lenParams().empty()) { 2161 // If type parameters are present, then we don't want to use a GEPOp 2162 // as below, as the LLVM struct type cannot be statically defined. 2163 TODO(loc, "derived type with type parameters"); 2164 } 2165 // TODO: array offset subcomponents must be converted to LLVM's 2166 // row-major layout here. 2167 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2168 args.push_back(operands[i]); 2169 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, baseTy, casted, 2170 args); 2171 return mlir::success(); 2172 } 2173 2174 // The array was not boxed, so it must be contiguous. offset is therefore an 2175 // element offset and the base type is kept in the GEP unless the element 2176 // type size is itself dynamic. 2177 mlir::Value base; 2178 if (coor.subcomponent().empty()) { 2179 // No subcomponent. 2180 if (!coor.lenParams().empty()) { 2181 // Type parameters. Adjust element size explicitly. 2182 auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType()); 2183 assert(eleTy && "result must be a reference-like type"); 2184 if (fir::characterWithDynamicLen(eleTy)) { 2185 assert(coor.lenParams().size() == 1); 2186 auto bitsInChar = lowerTy().getKindMap().getCharacterBitsize( 2187 eleTy.cast<fir::CharacterType>().getFKind()); 2188 auto scaling = genConstantIndex(loc, idxTy, rewriter, bitsInChar / 8); 2189 auto scaledBySize = 2190 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, scaling); 2191 auto length = 2192 integerCast(loc, rewriter, idxTy, 2193 adaptor.getOperands()[coor.lenParamsOffset()]); 2194 offset = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, scaledBySize, 2195 length); 2196 } else { 2197 TODO(loc, "compute size of derived type with type parameters"); 2198 } 2199 } 2200 // Cast the base address to a pointer to T. 2201 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty, 2202 adaptor.getOperands()[0]); 2203 } else { 2204 // Operand #0 must have a pointer type. For subcomponent slicing, we 2205 // want to cast away the array type and have a plain struct type. 2206 mlir::Type ty0 = adaptor.getOperands()[0].getType(); 2207 auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 2208 assert(ptrTy && "expected pointer type"); 2209 mlir::Type eleTy = ptrTy.getElementType(); 2210 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2211 eleTy = arrTy.getElementType(); 2212 auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy); 2213 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy, 2214 adaptor.getOperands()[0]); 2215 } 2216 llvm::SmallVector<mlir::Value> args = {offset}; 2217 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2218 args.push_back(operands[i]); 2219 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args); 2220 return mlir::success(); 2221 } 2222 }; 2223 } // namespace 2224 2225 /// Convert to (memory) reference to a reference to a subobject. 2226 /// The coordinate_of op is a Swiss army knife operation that can be used on 2227 /// (memory) references to records, arrays, complex, etc. as well as boxes. 2228 /// With unboxed arrays, there is the restriction that the array have a static 2229 /// shape in all but the last column. 2230 struct CoordinateOpConversion 2231 : public FIROpAndTypeConversion<fir::CoordinateOp> { 2232 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2233 2234 mlir::LogicalResult 2235 doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor, 2236 mlir::ConversionPatternRewriter &rewriter) const override { 2237 mlir::ValueRange operands = adaptor.getOperands(); 2238 2239 mlir::Location loc = coor.getLoc(); 2240 mlir::Value base = operands[0]; 2241 mlir::Type baseObjectTy = coor.getBaseType(); 2242 mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2243 assert(objectTy && "fir.coordinate_of expects a reference type"); 2244 2245 // Complex type - basically, extract the real or imaginary part 2246 if (fir::isa_complex(objectTy)) { 2247 mlir::LLVM::ConstantOp c0 = 2248 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2249 llvm::SmallVector<mlir::Value> offs = {c0, operands[1]}; 2250 mlir::Value gep = genGEP(loc, ty, rewriter, base, offs); 2251 rewriter.replaceOp(coor, gep); 2252 return mlir::success(); 2253 } 2254 2255 // Boxed type - get the base pointer from the box 2256 if (baseObjectTy.dyn_cast<fir::BoxType>()) 2257 return doRewriteBox(coor, ty, operands, loc, rewriter); 2258 2259 // Reference, pointer or a heap type 2260 if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType, fir::HeapType>()) 2261 return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter); 2262 2263 return rewriter.notifyMatchFailure( 2264 coor, "fir.coordinate_of base operand has unsupported type"); 2265 } 2266 2267 static unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) { 2268 return fir::hasDynamicSize(ty) 2269 ? op.getDefiningOp() 2270 ->getAttrOfType<mlir::IntegerAttr>("field") 2271 .getInt() 2272 : getIntValue(op); 2273 } 2274 2275 static int64_t getIntValue(mlir::Value val) { 2276 assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value"); 2277 mlir::Operation *defop = val.getDefiningOp(); 2278 2279 if (auto constOp = mlir::dyn_cast<mlir::arith::ConstantIntOp>(defop)) 2280 return constOp.value(); 2281 if (auto llConstOp = mlir::dyn_cast<mlir::LLVM::ConstantOp>(defop)) 2282 if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>()) 2283 return attr.getValue().getSExtValue(); 2284 fir::emitFatalError(val.getLoc(), "must be a constant"); 2285 } 2286 2287 static bool hasSubDimensions(mlir::Type type) { 2288 return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>(); 2289 } 2290 2291 /// Check whether this form of `!fir.coordinate_of` is supported. These 2292 /// additional checks are required, because we are not yet able to convert 2293 /// all valid forms of `!fir.coordinate_of`. 2294 /// TODO: Either implement the unsupported cases or extend the verifier 2295 /// in FIROps.cpp instead. 2296 static bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) { 2297 const std::size_t numOfCoors = coors.size(); 2298 std::size_t i = 0; 2299 bool subEle = false; 2300 bool ptrEle = false; 2301 for (; i < numOfCoors; ++i) { 2302 mlir::Value nxtOpnd = coors[i]; 2303 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2304 subEle = true; 2305 i += arrTy.getDimension() - 1; 2306 type = arrTy.getEleTy(); 2307 } else if (auto recTy = type.dyn_cast<fir::RecordType>()) { 2308 subEle = true; 2309 type = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2310 } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) { 2311 subEle = true; 2312 type = tupTy.getType(getIntValue(nxtOpnd)); 2313 } else { 2314 ptrEle = true; 2315 } 2316 } 2317 if (ptrEle) 2318 return (!subEle) && (numOfCoors == 1); 2319 return subEle && (i >= numOfCoors); 2320 } 2321 2322 /// Walk the abstract memory layout and determine if the path traverses any 2323 /// array types with unknown shape. Return true iff all the array types have a 2324 /// constant shape along the path. 2325 static bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) { 2326 for (std::size_t i = 0, sz = coors.size(); i < sz; ++i) { 2327 mlir::Value nxtOpnd = coors[i]; 2328 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2329 if (fir::sequenceWithNonConstantShape(arrTy)) 2330 return false; 2331 i += arrTy.getDimension() - 1; 2332 type = arrTy.getEleTy(); 2333 } else if (auto strTy = type.dyn_cast<fir::RecordType>()) { 2334 type = strTy.getType(getFieldNumber(strTy, nxtOpnd)); 2335 } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) { 2336 type = strTy.getType(getIntValue(nxtOpnd)); 2337 } else { 2338 return true; 2339 } 2340 } 2341 return true; 2342 } 2343 2344 private: 2345 mlir::LogicalResult 2346 doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands, 2347 mlir::Location loc, 2348 mlir::ConversionPatternRewriter &rewriter) const { 2349 mlir::Type boxObjTy = coor.getBaseType(); 2350 assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`"); 2351 2352 mlir::Value boxBaseAddr = operands[0]; 2353 2354 // 1. SPECIAL CASE (uses `fir.len_param_index`): 2355 // %box = ... : !fir.box<!fir.type<derived{len1:i32}>> 2356 // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}> 2357 // %addr = coordinate_of %box, %lenp 2358 if (coor.getNumOperands() == 2) { 2359 mlir::Operation *coordinateDef = 2360 (*coor.getCoor().begin()).getDefiningOp(); 2361 if (mlir::isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef)) 2362 TODO(loc, 2363 "fir.coordinate_of - fir.len_param_index is not supported yet"); 2364 } 2365 2366 // 2. GENERAL CASE: 2367 // 2.1. (`fir.array`) 2368 // %box = ... : !fix.box<!fir.array<?xU>> 2369 // %idx = ... : index 2370 // %resultAddr = coordinate_of %box, %idx : !fir.ref<U> 2371 // 2.2 (`fir.derived`) 2372 // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>> 2373 // %idx = ... : i32 2374 // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32> 2375 // 2.3 (`fir.derived` inside `fir.array`) 2376 // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32, 2377 // field_2:f32}>>> %idx1 = ... : index %idx2 = ... : i32 %resultAddr = 2378 // coordinate_of %box, %idx1, %idx2 : !fir.ref<f32> 2379 // 2.4. TODO: Either document or disable any other case that the following 2380 // implementation might convert. 2381 mlir::LLVM::ConstantOp c0 = 2382 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2383 mlir::Value resultAddr = 2384 loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()), 2385 boxBaseAddr, rewriter); 2386 // Component Type 2387 auto cpnTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy); 2388 mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext()); 2389 2390 for (unsigned i = 1, last = operands.size(); i < last; ++i) { 2391 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2392 if (i != 1) 2393 TODO(loc, "fir.array nested inside other array and/or derived type"); 2394 // Applies byte strides from the box. Ignore lower bound from box 2395 // since fir.coordinate_of indexes are zero based. Lowering takes care 2396 // of lower bound aspects. This both accounts for dynamically sized 2397 // types and non contiguous arrays. 2398 auto idxTy = lowerTy().indexType(); 2399 mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0); 2400 for (unsigned index = i, lastIndex = i + arrTy.getDimension(); 2401 index < lastIndex; ++index) { 2402 mlir::Value stride = 2403 loadStrideFromBox(loc, operands[0], index - i, rewriter); 2404 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, 2405 operands[index], stride); 2406 off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off); 2407 } 2408 auto voidPtrBase = 2409 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr); 2410 llvm::SmallVector<mlir::Value> args = {off}; 2411 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, 2412 voidPtrBase, args); 2413 i += arrTy.getDimension() - 1; 2414 cpnTy = arrTy.getEleTy(); 2415 } else if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) { 2416 auto recRefTy = 2417 mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy)); 2418 mlir::Value nxtOpnd = operands[i]; 2419 auto memObj = 2420 rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr); 2421 llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd}; 2422 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2423 auto llvmCurrentObjTy = lowerTy().convertType(cpnTy); 2424 auto gep = rewriter.create<mlir::LLVM::GEPOp>( 2425 loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj, 2426 args); 2427 resultAddr = 2428 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep); 2429 } else { 2430 fir::emitFatalError(loc, "unexpected type in coordinate_of"); 2431 } 2432 } 2433 2434 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr); 2435 return mlir::success(); 2436 } 2437 2438 mlir::LogicalResult 2439 doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty, 2440 mlir::ValueRange operands, mlir::Location loc, 2441 mlir::ConversionPatternRewriter &rewriter) const { 2442 mlir::Type baseObjectTy = coor.getBaseType(); 2443 2444 // Component Type 2445 mlir::Type cpnTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2446 bool hasSubdimension = hasSubDimensions(cpnTy); 2447 bool columnIsDeferred = !hasSubdimension; 2448 2449 if (!supportedCoordinate(cpnTy, operands.drop_front(1))) 2450 TODO(loc, "unsupported combination of coordinate operands"); 2451 2452 const bool hasKnownShape = 2453 arraysHaveKnownShape(cpnTy, operands.drop_front(1)); 2454 2455 // If only the column is `?`, then we can simply place the column value in 2456 // the 0-th GEP position. 2457 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2458 if (!hasKnownShape) { 2459 const unsigned sz = arrTy.getDimension(); 2460 if (arraysHaveKnownShape(arrTy.getEleTy(), 2461 operands.drop_front(1 + sz))) { 2462 fir::SequenceType::ShapeRef shape = arrTy.getShape(); 2463 bool allConst = true; 2464 for (unsigned i = 0; i < sz - 1; ++i) { 2465 if (shape[i] < 0) { 2466 allConst = false; 2467 break; 2468 } 2469 } 2470 if (allConst) 2471 columnIsDeferred = true; 2472 } 2473 } 2474 } 2475 2476 if (fir::hasDynamicSize(fir::unwrapSequenceType(cpnTy))) 2477 return mlir::emitError( 2478 loc, "fir.coordinate_of with a dynamic element size is unsupported"); 2479 2480 if (hasKnownShape || columnIsDeferred) { 2481 llvm::SmallVector<mlir::Value> offs; 2482 if (hasKnownShape && hasSubdimension) { 2483 mlir::LLVM::ConstantOp c0 = 2484 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2485 offs.push_back(c0); 2486 } 2487 llvm::Optional<int> dims; 2488 llvm::SmallVector<mlir::Value> arrIdx; 2489 for (std::size_t i = 1, sz = operands.size(); i < sz; ++i) { 2490 mlir::Value nxtOpnd = operands[i]; 2491 2492 if (!cpnTy) 2493 return mlir::emitError(loc, "invalid coordinate/check failed"); 2494 2495 // check if the i-th coordinate relates to an array 2496 if (dims) { 2497 arrIdx.push_back(nxtOpnd); 2498 int dimsLeft = *dims; 2499 if (dimsLeft > 1) { 2500 dims = dimsLeft - 1; 2501 continue; 2502 } 2503 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2504 // append array range in reverse (FIR arrays are column-major) 2505 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2506 arrIdx.clear(); 2507 dims.reset(); 2508 continue; 2509 } 2510 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2511 int d = arrTy.getDimension() - 1; 2512 if (d > 0) { 2513 dims = d; 2514 arrIdx.push_back(nxtOpnd); 2515 continue; 2516 } 2517 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2518 offs.push_back(nxtOpnd); 2519 continue; 2520 } 2521 2522 // check if the i-th coordinate relates to a field 2523 if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) 2524 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2525 else if (auto tupTy = cpnTy.dyn_cast<mlir::TupleType>()) 2526 cpnTy = tupTy.getType(getIntValue(nxtOpnd)); 2527 else 2528 cpnTy = nullptr; 2529 2530 offs.push_back(nxtOpnd); 2531 } 2532 if (dims) 2533 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2534 mlir::Value base = operands[0]; 2535 mlir::Value retval = genGEP(loc, ty, rewriter, base, offs); 2536 rewriter.replaceOp(coor, retval); 2537 return mlir::success(); 2538 } 2539 2540 return mlir::emitError( 2541 loc, "fir.coordinate_of base operand has unsupported type"); 2542 } 2543 }; 2544 2545 /// Convert `fir.field_index`. The conversion depends on whether the size of 2546 /// the record is static or dynamic. 2547 struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> { 2548 using FIROpConversion::FIROpConversion; 2549 2550 // NB: most field references should be resolved by this point 2551 mlir::LogicalResult 2552 matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor, 2553 mlir::ConversionPatternRewriter &rewriter) const override { 2554 auto recTy = field.getOnType().cast<fir::RecordType>(); 2555 unsigned index = recTy.getFieldIndex(field.getFieldId()); 2556 2557 if (!fir::hasDynamicSize(recTy)) { 2558 // Derived type has compile-time constant layout. Return index of the 2559 // component type in the parent type (to be used in GEP). 2560 rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset( 2561 field.getLoc(), rewriter, index)}); 2562 return mlir::success(); 2563 } 2564 2565 // Derived type has compile-time constant layout. Call the compiler 2566 // generated function to determine the byte offset of the field at runtime. 2567 // This returns a non-constant. 2568 mlir::FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get( 2569 field.getContext(), getOffsetMethodName(recTy, field.getFieldId())); 2570 mlir::NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr); 2571 mlir::NamedAttribute fieldAttr = rewriter.getNamedAttr( 2572 "field", mlir::IntegerAttr::get(lowerTy().indexType(), index)); 2573 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 2574 field, lowerTy().offsetType(), adaptor.getOperands(), 2575 llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr}); 2576 return mlir::success(); 2577 } 2578 2579 // Re-Construct the name of the compiler generated method that calculates the 2580 // offset 2581 inline static std::string getOffsetMethodName(fir::RecordType recTy, 2582 llvm::StringRef field) { 2583 return recTy.getName().str() + "P." + field.str() + ".offset"; 2584 } 2585 }; 2586 2587 /// Convert `fir.end` 2588 struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> { 2589 using FIROpConversion::FIROpConversion; 2590 2591 mlir::LogicalResult 2592 matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor, 2593 mlir::ConversionPatternRewriter &rewriter) const override { 2594 TODO(firEnd.getLoc(), "fir.end codegen"); 2595 return mlir::failure(); 2596 } 2597 }; 2598 2599 /// Lower `fir.gentypedesc` to a global constant. 2600 struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> { 2601 using FIROpConversion::FIROpConversion; 2602 2603 mlir::LogicalResult 2604 matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor, 2605 mlir::ConversionPatternRewriter &rewriter) const override { 2606 TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen"); 2607 return mlir::failure(); 2608 } 2609 }; 2610 2611 /// Lower `fir.has_value` operation to `llvm.return` operation. 2612 struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> { 2613 using FIROpConversion::FIROpConversion; 2614 2615 mlir::LogicalResult 2616 matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor, 2617 mlir::ConversionPatternRewriter &rewriter) const override { 2618 rewriter.replaceOpWithNewOp<mlir::LLVM::ReturnOp>(op, 2619 adaptor.getOperands()); 2620 return mlir::success(); 2621 } 2622 }; 2623 2624 /// Lower `fir.global` operation to `llvm.global` operation. 2625 /// `fir.insert_on_range` operations are replaced with constant dense attribute 2626 /// if they are applied on the full range. 2627 struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> { 2628 using FIROpConversion::FIROpConversion; 2629 2630 mlir::LogicalResult 2631 matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor, 2632 mlir::ConversionPatternRewriter &rewriter) const override { 2633 auto tyAttr = convertType(global.getType()); 2634 if (global.getType().isa<fir::BoxType>()) 2635 tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType(); 2636 auto loc = global.getLoc(); 2637 mlir::Attribute initAttr; 2638 if (global.getInitVal()) 2639 initAttr = *global.getInitVal(); 2640 auto linkage = convertLinkage(global.getLinkName()); 2641 auto isConst = global.getConstant().has_value(); 2642 auto g = rewriter.create<mlir::LLVM::GlobalOp>( 2643 loc, tyAttr, isConst, linkage, global.getSymName(), initAttr); 2644 auto &gr = g.getInitializerRegion(); 2645 rewriter.inlineRegionBefore(global.getRegion(), gr, gr.end()); 2646 if (!gr.empty()) { 2647 // Replace insert_on_range with a constant dense attribute if the 2648 // initialization is on the full range. 2649 auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>(); 2650 for (auto insertOp : insertOnRangeOps) { 2651 if (isFullRange(insertOp.getCoor(), insertOp.getType())) { 2652 auto seqTyAttr = convertType(insertOp.getType()); 2653 auto *op = insertOp.getVal().getDefiningOp(); 2654 auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op); 2655 if (!constant) { 2656 auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op); 2657 if (!convertOp) 2658 continue; 2659 constant = mlir::cast<mlir::arith::ConstantOp>( 2660 convertOp.getValue().getDefiningOp()); 2661 } 2662 mlir::Type vecType = mlir::VectorType::get( 2663 insertOp.getType().getShape(), constant.getType()); 2664 auto denseAttr = mlir::DenseElementsAttr::get( 2665 vecType.cast<mlir::ShapedType>(), constant.getValue()); 2666 rewriter.setInsertionPointAfter(insertOp); 2667 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>( 2668 insertOp, seqTyAttr, denseAttr); 2669 } 2670 } 2671 } 2672 rewriter.eraseOp(global); 2673 return mlir::success(); 2674 } 2675 2676 bool isFullRange(mlir::DenseIntElementsAttr indexes, 2677 fir::SequenceType seqTy) const { 2678 auto extents = seqTy.getShape(); 2679 if (indexes.size() / 2 != static_cast<int64_t>(extents.size())) 2680 return false; 2681 auto cur_index = indexes.value_begin<int64_t>(); 2682 for (unsigned i = 0; i < indexes.size(); i += 2) { 2683 if (*(cur_index++) != 0) 2684 return false; 2685 if (*(cur_index++) != extents[i / 2] - 1) 2686 return false; 2687 } 2688 return true; 2689 } 2690 2691 // TODO: String comparaison should be avoided. Replace linkName with an 2692 // enumeration. 2693 mlir::LLVM::Linkage 2694 convertLinkage(llvm::Optional<llvm::StringRef> optLinkage) const { 2695 if (optLinkage) { 2696 auto name = *optLinkage; 2697 if (name == "internal") 2698 return mlir::LLVM::Linkage::Internal; 2699 if (name == "linkonce") 2700 return mlir::LLVM::Linkage::Linkonce; 2701 if (name == "linkonce_odr") 2702 return mlir::LLVM::Linkage::LinkonceODR; 2703 if (name == "common") 2704 return mlir::LLVM::Linkage::Common; 2705 if (name == "weak") 2706 return mlir::LLVM::Linkage::Weak; 2707 } 2708 return mlir::LLVM::Linkage::External; 2709 } 2710 }; 2711 2712 /// `fir.load` --> `llvm.load` 2713 struct LoadOpConversion : public FIROpConversion<fir::LoadOp> { 2714 using FIROpConversion::FIROpConversion; 2715 2716 mlir::LogicalResult 2717 matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor, 2718 mlir::ConversionPatternRewriter &rewriter) const override { 2719 // fir.box is a special case because it is considered as an ssa values in 2720 // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box> 2721 // and fir.box end up being the same llvm types and loading a 2722 // fir.ref<fir.box> is actually a no op in LLVM. 2723 if (load.getType().isa<fir::BoxType>()) { 2724 rewriter.replaceOp(load, adaptor.getOperands()[0]); 2725 } else { 2726 rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>( 2727 load, convertType(load.getType()), adaptor.getOperands(), 2728 load->getAttrs()); 2729 } 2730 return mlir::success(); 2731 } 2732 }; 2733 2734 /// Lower `fir.no_reassoc` to LLVM IR dialect. 2735 /// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast 2736 /// math flags? 2737 struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> { 2738 using FIROpConversion::FIROpConversion; 2739 2740 mlir::LogicalResult 2741 matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor, 2742 mlir::ConversionPatternRewriter &rewriter) const override { 2743 rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]); 2744 return mlir::success(); 2745 } 2746 }; 2747 2748 static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest, 2749 llvm::Optional<mlir::ValueRange> destOps, 2750 mlir::ConversionPatternRewriter &rewriter, 2751 mlir::Block *newBlock) { 2752 if (destOps) 2753 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, *destOps, newBlock, 2754 mlir::ValueRange()); 2755 else 2756 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock); 2757 } 2758 2759 template <typename A, typename B> 2760 static void genBrOp(A caseOp, mlir::Block *dest, llvm::Optional<B> destOps, 2761 mlir::ConversionPatternRewriter &rewriter) { 2762 if (destOps) 2763 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, *destOps, dest); 2764 else 2765 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest); 2766 } 2767 2768 static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp, 2769 mlir::Block *dest, 2770 llvm::Optional<mlir::ValueRange> destOps, 2771 mlir::ConversionPatternRewriter &rewriter) { 2772 auto *thisBlock = rewriter.getInsertionBlock(); 2773 auto *newBlock = createBlock(rewriter, dest); 2774 rewriter.setInsertionPointToEnd(thisBlock); 2775 genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock); 2776 rewriter.setInsertionPointToEnd(newBlock); 2777 } 2778 2779 /// Conversion of `fir.select_case` 2780 /// 2781 /// The `fir.select_case` operation is converted to a if-then-else ladder. 2782 /// Depending on the case condition type, one or several comparison and 2783 /// conditional branching can be generated. 2784 /// 2785 /// A a point value case such as `case(4)`, a lower bound case such as 2786 /// `case(5:)` or an upper bound case such as `case(:3)` are converted to a 2787 /// simple comparison between the selector value and the constant value in the 2788 /// case. The block associated with the case condition is then executed if 2789 /// the comparison succeed otherwise it branch to the next block with the 2790 /// comparison for the the next case conditon. 2791 /// 2792 /// A closed interval case condition such as `case(7:10)` is converted with a 2793 /// first comparison and conditional branching for the lower bound. If 2794 /// successful, it branch to a second block with the comparison for the 2795 /// upper bound in the same case condition. 2796 /// 2797 /// TODO: lowering of CHARACTER type cases is not handled yet. 2798 struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> { 2799 using FIROpConversion::FIROpConversion; 2800 2801 mlir::LogicalResult 2802 matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor, 2803 mlir::ConversionPatternRewriter &rewriter) const override { 2804 unsigned conds = caseOp.getNumConditions(); 2805 llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue(); 2806 // Type can be CHARACTER, INTEGER, or LOGICAL (C1145) 2807 auto ty = caseOp.getSelector().getType(); 2808 if (ty.isa<fir::CharacterType>()) { 2809 TODO(caseOp.getLoc(), "fir.select_case codegen with character type"); 2810 return mlir::failure(); 2811 } 2812 mlir::Value selector = caseOp.getSelector(adaptor.getOperands()); 2813 auto loc = caseOp.getLoc(); 2814 for (unsigned t = 0; t != conds; ++t) { 2815 mlir::Block *dest = caseOp.getSuccessor(t); 2816 llvm::Optional<mlir::ValueRange> destOps = 2817 caseOp.getSuccessorOperands(adaptor.getOperands(), t); 2818 llvm::Optional<mlir::ValueRange> cmpOps = 2819 *caseOp.getCompareOperands(adaptor.getOperands(), t); 2820 mlir::Value caseArg = *(cmpOps->begin()); 2821 mlir::Attribute attr = cases[t]; 2822 if (attr.isa<fir::PointIntervalAttr>()) { 2823 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2824 loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg); 2825 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2826 continue; 2827 } 2828 if (attr.isa<fir::LowerBoundAttr>()) { 2829 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2830 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2831 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2832 continue; 2833 } 2834 if (attr.isa<fir::UpperBoundAttr>()) { 2835 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2836 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg); 2837 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2838 continue; 2839 } 2840 if (attr.isa<fir::ClosedIntervalAttr>()) { 2841 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2842 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2843 auto *thisBlock = rewriter.getInsertionBlock(); 2844 auto *newBlock1 = createBlock(rewriter, dest); 2845 auto *newBlock2 = createBlock(rewriter, dest); 2846 rewriter.setInsertionPointToEnd(thisBlock); 2847 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2); 2848 rewriter.setInsertionPointToEnd(newBlock1); 2849 mlir::Value caseArg0 = *(cmpOps->begin() + 1); 2850 auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>( 2851 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0); 2852 genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2); 2853 rewriter.setInsertionPointToEnd(newBlock2); 2854 continue; 2855 } 2856 assert(attr.isa<mlir::UnitAttr>()); 2857 assert((t + 1 == conds) && "unit must be last"); 2858 genBrOp(caseOp, dest, destOps, rewriter); 2859 } 2860 return mlir::success(); 2861 } 2862 }; 2863 2864 template <typename OP> 2865 static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select, 2866 typename OP::Adaptor adaptor, 2867 mlir::ConversionPatternRewriter &rewriter) { 2868 unsigned conds = select.getNumConditions(); 2869 auto cases = select.getCases().getValue(); 2870 mlir::Value selector = adaptor.getSelector(); 2871 auto loc = select.getLoc(); 2872 assert(conds > 0 && "select must have cases"); 2873 2874 llvm::SmallVector<mlir::Block *> destinations; 2875 llvm::SmallVector<mlir::ValueRange> destinationsOperands; 2876 mlir::Block *defaultDestination; 2877 mlir::ValueRange defaultOperands; 2878 llvm::SmallVector<int32_t> caseValues; 2879 2880 for (unsigned t = 0; t != conds; ++t) { 2881 mlir::Block *dest = select.getSuccessor(t); 2882 auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t); 2883 const mlir::Attribute &attr = cases[t]; 2884 if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) { 2885 destinations.push_back(dest); 2886 destinationsOperands.push_back(destOps ? *destOps : mlir::ValueRange{}); 2887 caseValues.push_back(intAttr.getInt()); 2888 continue; 2889 } 2890 assert(attr.template dyn_cast_or_null<mlir::UnitAttr>()); 2891 assert((t + 1 == conds) && "unit must be last"); 2892 defaultDestination = dest; 2893 defaultOperands = destOps ? *destOps : mlir::ValueRange{}; 2894 } 2895 2896 // LLVM::SwitchOp takes a i32 type for the selector. 2897 if (select.getSelector().getType() != rewriter.getI32Type()) 2898 selector = rewriter.create<mlir::LLVM::TruncOp>(loc, rewriter.getI32Type(), 2899 selector); 2900 2901 rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>( 2902 select, selector, 2903 /*defaultDestination=*/defaultDestination, 2904 /*defaultOperands=*/defaultOperands, 2905 /*caseValues=*/caseValues, 2906 /*caseDestinations=*/destinations, 2907 /*caseOperands=*/destinationsOperands, 2908 /*branchWeights=*/llvm::ArrayRef<std::int32_t>()); 2909 } 2910 2911 /// conversion of fir::SelectOp to an if-then-else ladder 2912 struct SelectOpConversion : public FIROpConversion<fir::SelectOp> { 2913 using FIROpConversion::FIROpConversion; 2914 2915 mlir::LogicalResult 2916 matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor, 2917 mlir::ConversionPatternRewriter &rewriter) const override { 2918 selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter); 2919 return mlir::success(); 2920 } 2921 }; 2922 2923 /// conversion of fir::SelectRankOp to an if-then-else ladder 2924 struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> { 2925 using FIROpConversion::FIROpConversion; 2926 2927 mlir::LogicalResult 2928 matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor, 2929 mlir::ConversionPatternRewriter &rewriter) const override { 2930 selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter); 2931 return mlir::success(); 2932 } 2933 }; 2934 2935 /// Lower `fir.select_type` to LLVM IR dialect. 2936 struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> { 2937 using FIROpConversion::FIROpConversion; 2938 2939 mlir::LogicalResult 2940 matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor, 2941 mlir::ConversionPatternRewriter &rewriter) const override { 2942 mlir::emitError(select.getLoc(), 2943 "fir.select_type should have already been converted"); 2944 return mlir::failure(); 2945 } 2946 }; 2947 2948 /// `fir.store` --> `llvm.store` 2949 struct StoreOpConversion : public FIROpConversion<fir::StoreOp> { 2950 using FIROpConversion::FIROpConversion; 2951 2952 mlir::LogicalResult 2953 matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor, 2954 mlir::ConversionPatternRewriter &rewriter) const override { 2955 if (store.getValue().getType().isa<fir::BoxType>()) { 2956 // fir.box value is actually in memory, load it first before storing it. 2957 mlir::Location loc = store.getLoc(); 2958 mlir::Type boxPtrTy = adaptor.getOperands()[0].getType(); 2959 auto val = rewriter.create<mlir::LLVM::LoadOp>( 2960 loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(), 2961 adaptor.getOperands()[0]); 2962 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2963 store, val, adaptor.getOperands()[1]); 2964 } else { 2965 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2966 store, adaptor.getOperands()[0], adaptor.getOperands()[1]); 2967 } 2968 return mlir::success(); 2969 } 2970 }; 2971 2972 namespace { 2973 2974 /// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for 2975 /// the character buffer and one for the buffer length. 2976 struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> { 2977 using FIROpConversion::FIROpConversion; 2978 2979 mlir::LogicalResult 2980 matchAndRewrite(fir::UnboxCharOp unboxchar, OpAdaptor adaptor, 2981 mlir::ConversionPatternRewriter &rewriter) const override { 2982 auto *ctx = unboxchar.getContext(); 2983 2984 mlir::Type lenTy = convertType(unboxchar.getType(1)); 2985 mlir::Value tuple = adaptor.getOperands()[0]; 2986 mlir::Type tupleTy = tuple.getType(); 2987 2988 mlir::Location loc = unboxchar.getLoc(); 2989 mlir::Value ptrToBuffer = 2990 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 0); 2991 2992 mlir::LLVM::ExtractValueOp len = 2993 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 1); 2994 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, len); 2995 2996 rewriter.replaceOp(unboxchar, 2997 llvm::ArrayRef<mlir::Value>{ptrToBuffer, lenAfterCast}); 2998 return mlir::success(); 2999 } 3000 }; 3001 3002 /// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its 3003 /// components. 3004 /// TODO: Part of supporting Fortran 2003 procedure pointers. 3005 struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> { 3006 using FIROpConversion::FIROpConversion; 3007 3008 mlir::LogicalResult 3009 matchAndRewrite(fir::UnboxProcOp unboxproc, OpAdaptor adaptor, 3010 mlir::ConversionPatternRewriter &rewriter) const override { 3011 TODO(unboxproc.getLoc(), "fir.unboxproc codegen"); 3012 return mlir::failure(); 3013 } 3014 }; 3015 3016 /// convert to LLVM IR dialect `undef` 3017 struct UndefOpConversion : public FIROpConversion<fir::UndefOp> { 3018 using FIROpConversion::FIROpConversion; 3019 3020 mlir::LogicalResult 3021 matchAndRewrite(fir::UndefOp undef, OpAdaptor, 3022 mlir::ConversionPatternRewriter &rewriter) const override { 3023 rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>( 3024 undef, convertType(undef.getType())); 3025 return mlir::success(); 3026 } 3027 }; 3028 3029 struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> { 3030 using FIROpConversion::FIROpConversion; 3031 3032 mlir::LogicalResult 3033 matchAndRewrite(fir::ZeroOp zero, OpAdaptor, 3034 mlir::ConversionPatternRewriter &rewriter) const override { 3035 mlir::Type ty = convertType(zero.getType()); 3036 if (ty.isa<mlir::LLVM::LLVMPointerType>()) { 3037 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty); 3038 } else if (ty.isa<mlir::IntegerType>()) { 3039 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3040 zero, ty, mlir::IntegerAttr::get(zero.getType(), 0)); 3041 } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) { 3042 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3043 zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0)); 3044 } else { 3045 // TODO: create ConstantAggregateZero for FIR aggregate/array types. 3046 return rewriter.notifyMatchFailure( 3047 zero, 3048 "conversion of fir.zero with aggregate type not implemented yet"); 3049 } 3050 return mlir::success(); 3051 } 3052 }; 3053 3054 /// `fir.unreachable` --> `llvm.unreachable` 3055 struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> { 3056 using FIROpConversion::FIROpConversion; 3057 3058 mlir::LogicalResult 3059 matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor, 3060 mlir::ConversionPatternRewriter &rewriter) const override { 3061 rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach); 3062 return mlir::success(); 3063 } 3064 }; 3065 3066 /// `fir.is_present` --> 3067 /// ``` 3068 /// %0 = llvm.mlir.constant(0 : i64) 3069 /// %1 = llvm.ptrtoint %0 3070 /// %2 = llvm.icmp "ne" %1, %0 : i64 3071 /// ``` 3072 struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> { 3073 using FIROpConversion::FIROpConversion; 3074 3075 mlir::LogicalResult 3076 matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor, 3077 mlir::ConversionPatternRewriter &rewriter) const override { 3078 mlir::Type idxTy = lowerTy().indexType(); 3079 mlir::Location loc = isPresent.getLoc(); 3080 auto ptr = adaptor.getOperands()[0]; 3081 3082 if (isPresent.getVal().getType().isa<fir::BoxCharType>()) { 3083 auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>(); 3084 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3085 3086 mlir::Type ty = structTy.getBody()[0]; 3087 mlir::MLIRContext *ctx = isPresent.getContext(); 3088 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3089 ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0); 3090 } 3091 mlir::LLVM::ConstantOp c0 = 3092 genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0); 3093 auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr); 3094 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 3095 isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0); 3096 3097 return mlir::success(); 3098 } 3099 }; 3100 3101 /// Create value signaling an absent optional argument in a call, e.g. 3102 /// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>` 3103 struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> { 3104 using FIROpConversion::FIROpConversion; 3105 3106 mlir::LogicalResult 3107 matchAndRewrite(fir::AbsentOp absent, OpAdaptor, 3108 mlir::ConversionPatternRewriter &rewriter) const override { 3109 mlir::Type ty = convertType(absent.getType()); 3110 mlir::Location loc = absent.getLoc(); 3111 3112 if (absent.getType().isa<fir::BoxCharType>()) { 3113 auto structTy = ty.cast<mlir::LLVM::LLVMStructType>(); 3114 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3115 auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3116 auto nullField = 3117 rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]); 3118 mlir::MLIRContext *ctx = absent.getContext(); 3119 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3120 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 3121 absent, ty, undefStruct, nullField, c0); 3122 } else { 3123 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty); 3124 } 3125 return mlir::success(); 3126 } 3127 }; 3128 3129 // 3130 // Primitive operations on Complex types 3131 // 3132 3133 /// Generate inline code for complex addition/subtraction 3134 template <typename LLVMOP, typename OPTY> 3135 static mlir::LLVM::InsertValueOp 3136 complexSum(OPTY sumop, mlir::ValueRange opnds, 3137 mlir::ConversionPatternRewriter &rewriter, 3138 fir::LLVMTypeConverter &lowering) { 3139 mlir::Value a = opnds[0]; 3140 mlir::Value b = opnds[1]; 3141 auto loc = sumop.getLoc(); 3142 auto ctx = sumop.getContext(); 3143 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3144 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3145 mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType())); 3146 mlir::Type ty = lowering.convertType(sumop.getType()); 3147 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3148 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3149 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3150 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3151 auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1); 3152 auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1); 3153 auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3154 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0); 3155 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1); 3156 } 3157 } // namespace 3158 3159 namespace { 3160 struct AddcOpConversion : public FIROpConversion<fir::AddcOp> { 3161 using FIROpConversion::FIROpConversion; 3162 3163 mlir::LogicalResult 3164 matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor, 3165 mlir::ConversionPatternRewriter &rewriter) const override { 3166 // given: (x + iy) + (x' + iy') 3167 // result: (x + x') + i(y + y') 3168 auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(), 3169 rewriter, lowerTy()); 3170 rewriter.replaceOp(addc, r.getResult()); 3171 return mlir::success(); 3172 } 3173 }; 3174 3175 struct SubcOpConversion : public FIROpConversion<fir::SubcOp> { 3176 using FIROpConversion::FIROpConversion; 3177 3178 mlir::LogicalResult 3179 matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor, 3180 mlir::ConversionPatternRewriter &rewriter) const override { 3181 // given: (x + iy) - (x' + iy') 3182 // result: (x - x') + i(y - y') 3183 auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(), 3184 rewriter, lowerTy()); 3185 rewriter.replaceOp(subc, r.getResult()); 3186 return mlir::success(); 3187 } 3188 }; 3189 3190 /// Inlined complex multiply 3191 struct MulcOpConversion : public FIROpConversion<fir::MulcOp> { 3192 using FIROpConversion::FIROpConversion; 3193 3194 mlir::LogicalResult 3195 matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor, 3196 mlir::ConversionPatternRewriter &rewriter) const override { 3197 // TODO: Can we use a call to __muldc3 ? 3198 // given: (x + iy) * (x' + iy') 3199 // result: (xx'-yy')+i(xy'+yx') 3200 mlir::Value a = adaptor.getOperands()[0]; 3201 mlir::Value b = adaptor.getOperands()[1]; 3202 auto loc = mulc.getLoc(); 3203 auto *ctx = mulc.getContext(); 3204 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3205 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3206 mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType())); 3207 mlir::Type ty = convertType(mulc.getType()); 3208 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3209 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3210 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3211 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3212 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3213 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3214 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3215 auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx); 3216 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3217 auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy); 3218 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3219 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3220 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3221 rewriter.replaceOp(mulc, r0.getResult()); 3222 return mlir::success(); 3223 } 3224 }; 3225 3226 /// Inlined complex division 3227 struct DivcOpConversion : public FIROpConversion<fir::DivcOp> { 3228 using FIROpConversion::FIROpConversion; 3229 3230 mlir::LogicalResult 3231 matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor, 3232 mlir::ConversionPatternRewriter &rewriter) const override { 3233 // TODO: Can we use a call to __divdc3 instead? 3234 // Just generate inline code for now. 3235 // given: (x + iy) / (x' + iy') 3236 // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y' 3237 mlir::Value a = adaptor.getOperands()[0]; 3238 mlir::Value b = adaptor.getOperands()[1]; 3239 auto loc = divc.getLoc(); 3240 auto *ctx = divc.getContext(); 3241 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3242 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3243 mlir::Type eleTy = convertType(getComplexEleTy(divc.getType())); 3244 mlir::Type ty = convertType(divc.getType()); 3245 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3246 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3247 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3248 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3249 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3250 auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1); 3251 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3252 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3253 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3254 auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1); 3255 auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1); 3256 auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy); 3257 auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy); 3258 auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d); 3259 auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d); 3260 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3261 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3262 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3263 rewriter.replaceOp(divc, r0.getResult()); 3264 return mlir::success(); 3265 } 3266 }; 3267 3268 /// Inlined complex negation 3269 struct NegcOpConversion : public FIROpConversion<fir::NegcOp> { 3270 using FIROpConversion::FIROpConversion; 3271 3272 mlir::LogicalResult 3273 matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor, 3274 mlir::ConversionPatternRewriter &rewriter) const override { 3275 // given: -(x + iy) 3276 // result: -x - iy 3277 auto *ctxt = neg.getContext(); 3278 auto eleTy = convertType(getComplexEleTy(neg.getType())); 3279 auto ty = convertType(neg.getType()); 3280 auto loc = neg.getLoc(); 3281 mlir::Value o0 = adaptor.getOperands()[0]; 3282 auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 3283 auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 3284 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0); 3285 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1); 3286 auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp); 3287 auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip); 3288 auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0); 3289 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1); 3290 return mlir::success(); 3291 } 3292 }; 3293 3294 /// Conversion pattern for operation that must be dead. The information in these 3295 /// operations is used by other operation. At this point they should not have 3296 /// anymore uses. 3297 /// These operations are normally dead after the pre-codegen pass. 3298 template <typename FromOp> 3299 struct MustBeDeadConversion : public FIROpConversion<FromOp> { 3300 explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering, 3301 const fir::FIRToLLVMPassOptions &options) 3302 : FIROpConversion<FromOp>(lowering, options) {} 3303 using OpAdaptor = typename FromOp::Adaptor; 3304 3305 mlir::LogicalResult 3306 matchAndRewrite(FromOp op, OpAdaptor adaptor, 3307 mlir::ConversionPatternRewriter &rewriter) const final { 3308 if (!op->getUses().empty()) 3309 return rewriter.notifyMatchFailure(op, "op must be dead"); 3310 rewriter.eraseOp(op); 3311 return mlir::success(); 3312 } 3313 }; 3314 3315 struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> { 3316 using MustBeDeadConversion::MustBeDeadConversion; 3317 }; 3318 3319 struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> { 3320 using MustBeDeadConversion::MustBeDeadConversion; 3321 }; 3322 3323 struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> { 3324 using MustBeDeadConversion::MustBeDeadConversion; 3325 }; 3326 3327 struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> { 3328 using MustBeDeadConversion::MustBeDeadConversion; 3329 }; 3330 3331 } // namespace 3332 3333 namespace { 3334 /// Convert FIR dialect to LLVM dialect 3335 /// 3336 /// This pass lowers all FIR dialect operations to LLVM IR dialect. An 3337 /// MLIR pass is used to lower residual Std dialect to LLVM IR dialect. 3338 class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> { 3339 public: 3340 FIRToLLVMLowering() = default; 3341 FIRToLLVMLowering(fir::FIRToLLVMPassOptions options) : options{options} {} 3342 mlir::ModuleOp getModule() { return getOperation(); } 3343 3344 void runOnOperation() override final { 3345 auto mod = getModule(); 3346 if (!forcedTargetTriple.empty()) 3347 fir::setTargetTriple(mod, forcedTargetTriple); 3348 3349 auto *context = getModule().getContext(); 3350 fir::LLVMTypeConverter typeConverter{getModule()}; 3351 mlir::RewritePatternSet pattern(context); 3352 pattern.insert< 3353 AbsentOpConversion, AddcOpConversion, AddrOfOpConversion, 3354 AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion, 3355 BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion, 3356 BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion, 3357 BoxProcHostOpConversion, BoxRankOpConversion, BoxTypeDescOpConversion, 3358 CallOpConversion, CmpcOpConversion, ConstcOpConversion, 3359 ConvertOpConversion, CoordinateOpConversion, DispatchOpConversion, 3360 DispatchTableOpConversion, DTEntryOpConversion, DivcOpConversion, 3361 EmboxOpConversion, EmboxCharOpConversion, EmboxProcOpConversion, 3362 ExtractValueOpConversion, FieldIndexOpConversion, FirEndOpConversion, 3363 FreeMemOpConversion, GenTypeDescOpConversion, GlobalLenOpConversion, 3364 GlobalOpConversion, HasValueOpConversion, InsertOnRangeOpConversion, 3365 InsertValueOpConversion, IsPresentOpConversion, 3366 LenParamIndexOpConversion, LoadOpConversion, MulcOpConversion, 3367 NegcOpConversion, NoReassocOpConversion, SelectCaseOpConversion, 3368 SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion, 3369 ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion, 3370 SliceOpConversion, StoreOpConversion, StringLitOpConversion, 3371 SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion, 3372 UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion, 3373 XEmboxOpConversion, XReboxOpConversion, ZeroOpConversion>(typeConverter, 3374 options); 3375 mlir::populateFuncToLLVMConversionPatterns(typeConverter, pattern); 3376 mlir::populateOpenMPToLLVMConversionPatterns(typeConverter, pattern); 3377 mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter, 3378 pattern); 3379 mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter, 3380 pattern); 3381 mlir::ConversionTarget target{*context}; 3382 target.addLegalDialect<mlir::LLVM::LLVMDialect>(); 3383 // The OpenMP dialect is legal for Operations without regions, for those 3384 // which contains regions it is legal if the region contains only the 3385 // LLVM dialect. Add OpenMP dialect as a legal dialect for conversion and 3386 // legalize conversion of OpenMP operations without regions. 3387 mlir::configureOpenMPToLLVMConversionLegality(target, typeConverter); 3388 target.addLegalDialect<mlir::omp::OpenMPDialect>(); 3389 3390 // required NOPs for applying a full conversion 3391 target.addLegalOp<mlir::ModuleOp>(); 3392 3393 // apply the patterns 3394 if (mlir::failed(mlir::applyFullConversion(getModule(), target, 3395 std::move(pattern)))) { 3396 signalPassFailure(); 3397 } 3398 } 3399 3400 private: 3401 fir::FIRToLLVMPassOptions options; 3402 }; 3403 3404 /// Lower from LLVM IR dialect to proper LLVM-IR and dump the module 3405 struct LLVMIRLoweringPass 3406 : public mlir::PassWrapper<LLVMIRLoweringPass, 3407 mlir::OperationPass<mlir::ModuleOp>> { 3408 MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(LLVMIRLoweringPass) 3409 3410 LLVMIRLoweringPass(llvm::raw_ostream &output, fir::LLVMIRLoweringPrinter p) 3411 : output{output}, printer{p} {} 3412 3413 mlir::ModuleOp getModule() { return getOperation(); } 3414 3415 void runOnOperation() override final { 3416 auto *ctx = getModule().getContext(); 3417 auto optName = getModule().getName(); 3418 llvm::LLVMContext llvmCtx; 3419 if (auto llvmModule = mlir::translateModuleToLLVMIR( 3420 getModule(), llvmCtx, optName ? *optName : "FIRModule")) { 3421 printer(*llvmModule, output); 3422 return; 3423 } 3424 3425 mlir::emitError(mlir::UnknownLoc::get(ctx), "could not emit LLVM-IR\n"); 3426 signalPassFailure(); 3427 } 3428 3429 private: 3430 llvm::raw_ostream &output; 3431 fir::LLVMIRLoweringPrinter printer; 3432 }; 3433 3434 } // namespace 3435 3436 std::unique_ptr<mlir::Pass> fir::createFIRToLLVMPass() { 3437 return std::make_unique<FIRToLLVMLowering>(); 3438 } 3439 3440 std::unique_ptr<mlir::Pass> 3441 fir::createFIRToLLVMPass(fir::FIRToLLVMPassOptions options) { 3442 return std::make_unique<FIRToLLVMLowering>(options); 3443 } 3444 3445 std::unique_ptr<mlir::Pass> 3446 fir::createLLVMDialectToLLVMPass(llvm::raw_ostream &output, 3447 fir::LLVMIRLoweringPrinter printer) { 3448 return std::make_unique<LLVMIRLoweringPass>(output, printer); 3449 } 3450