1 //===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/ 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "flang/Optimizer/CodeGen/CodeGen.h" 14 #include "CGOps.h" 15 #include "PassDetail.h" 16 #include "flang/ISO_Fortran_binding.h" 17 #include "flang/Optimizer/Dialect/FIRAttr.h" 18 #include "flang/Optimizer/Dialect/FIROps.h" 19 #include "flang/Optimizer/Support/InternalNames.h" 20 #include "flang/Optimizer/Support/TypeCode.h" 21 #include "flang/Semantics/runtime-type-info.h" 22 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" 23 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" 24 #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" 25 #include "mlir/Conversion/LLVMCommon/Pattern.h" 26 #include "mlir/Conversion/OpenMPToLLVM/ConvertOpenMPToLLVM.h" 27 #include "mlir/IR/BuiltinTypes.h" 28 #include "mlir/IR/Matchers.h" 29 #include "mlir/Pass/Pass.h" 30 #include "mlir/Target/LLVMIR/ModuleTranslation.h" 31 #include "llvm/ADT/ArrayRef.h" 32 33 #define DEBUG_TYPE "flang-codegen" 34 35 // fir::LLVMTypeConverter for converting to LLVM IR dialect types. 36 #include "TypeConverter.h" 37 38 // TODO: This should really be recovered from the specified target. 39 static constexpr unsigned defaultAlign = 8; 40 41 /// `fir.box` attribute values as defined for CFI_attribute_t in 42 /// flang/ISO_Fortran_binding.h. 43 static constexpr unsigned kAttrPointer = CFI_attribute_pointer; 44 static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable; 45 46 static inline mlir::Type getVoidPtrType(mlir::MLIRContext *context) { 47 return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8)); 48 } 49 50 static mlir::LLVM::ConstantOp 51 genConstantIndex(mlir::Location loc, mlir::Type ity, 52 mlir::ConversionPatternRewriter &rewriter, 53 std::int64_t offset) { 54 auto cattr = rewriter.getI64IntegerAttr(offset); 55 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 56 } 57 58 static mlir::Block *createBlock(mlir::ConversionPatternRewriter &rewriter, 59 mlir::Block *insertBefore) { 60 assert(insertBefore && "expected valid insertion block"); 61 return rewriter.createBlock(insertBefore->getParent(), 62 mlir::Region::iterator(insertBefore)); 63 } 64 65 namespace { 66 /// FIR conversion pattern template 67 template <typename FromOp> 68 class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> { 69 public: 70 explicit FIROpConversion(fir::LLVMTypeConverter &lowering, 71 const fir::FIRToLLVMPassOptions &options) 72 : mlir::ConvertOpToLLVMPattern<FromOp>(lowering), options(options) {} 73 74 protected: 75 mlir::Type convertType(mlir::Type ty) const { 76 return lowerTy().convertType(ty); 77 } 78 mlir::Type voidPtrTy() const { return getVoidPtrType(); } 79 80 mlir::Type getVoidPtrType() const { 81 return mlir::LLVM::LLVMPointerType::get( 82 mlir::IntegerType::get(&lowerTy().getContext(), 8)); 83 } 84 85 mlir::LLVM::ConstantOp 86 genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 87 int value) const { 88 mlir::Type i32Ty = rewriter.getI32Type(); 89 mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value); 90 return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr); 91 } 92 93 mlir::LLVM::ConstantOp 94 genConstantOffset(mlir::Location loc, 95 mlir::ConversionPatternRewriter &rewriter, 96 int offset) const { 97 mlir::Type ity = lowerTy().offsetType(); 98 mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset); 99 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 100 } 101 102 /// Perform an extension or truncation as needed on an integer value. Lowering 103 /// to the specific target may involve some sign-extending or truncation of 104 /// values, particularly to fit them from abstract box types to the 105 /// appropriate reified structures. 106 mlir::Value integerCast(mlir::Location loc, 107 mlir::ConversionPatternRewriter &rewriter, 108 mlir::Type ty, mlir::Value val) const { 109 auto valTy = val.getType(); 110 // If the value was not yet lowered, lower its type so that it can 111 // be used in getPrimitiveTypeSizeInBits. 112 if (!valTy.isa<mlir::IntegerType>()) 113 valTy = convertType(valTy); 114 auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 115 auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy); 116 if (toSize < fromSize) 117 return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val); 118 if (toSize > fromSize) 119 return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val); 120 return val; 121 } 122 123 /// Construct code sequence to extract the specifc value from a `fir.box`. 124 mlir::Value getValueFromBox(mlir::Location loc, mlir::Value box, 125 mlir::Type resultTy, 126 mlir::ConversionPatternRewriter &rewriter, 127 unsigned boxValue) const { 128 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 129 mlir::LLVM::ConstantOp cValuePos = 130 genConstantOffset(loc, rewriter, boxValue); 131 auto pty = mlir::LLVM::LLVMPointerType::get(resultTy); 132 auto p = rewriter.create<mlir::LLVM::GEPOp>( 133 loc, pty, box, mlir::ValueRange{c0, cValuePos}); 134 return rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p); 135 } 136 137 /// Method to construct code sequence to get the triple for dimension `dim` 138 /// from a box. 139 llvm::SmallVector<mlir::Value, 3> 140 getDimsFromBox(mlir::Location loc, llvm::ArrayRef<mlir::Type> retTys, 141 mlir::Value box, mlir::Value dim, 142 mlir::ConversionPatternRewriter &rewriter) const { 143 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 144 mlir::LLVM::ConstantOp cDims = 145 genConstantOffset(loc, rewriter, kDimsPosInBox); 146 mlir::LLVM::LoadOp l0 = 147 loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter); 148 mlir::LLVM::LoadOp l1 = 149 loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter); 150 mlir::LLVM::LoadOp l2 = 151 loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter); 152 return {l0.getResult(), l1.getResult(), l2.getResult()}; 153 } 154 155 mlir::LLVM::LoadOp 156 loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0, 157 mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off, 158 mlir::Type ty, 159 mlir::ConversionPatternRewriter &rewriter) const { 160 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 161 mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off); 162 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c); 163 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 164 } 165 166 mlir::Value 167 loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim, 168 mlir::ConversionPatternRewriter &rewriter) const { 169 auto idxTy = lowerTy().indexType(); 170 auto c0 = genConstantOffset(loc, rewriter, 0); 171 auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox); 172 auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim); 173 return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy, 174 rewriter); 175 } 176 177 /// Read base address from a fir.box. Returned address has type ty. 178 mlir::Value 179 loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 180 mlir::ConversionPatternRewriter &rewriter) const { 181 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 182 mlir::LLVM::ConstantOp cAddr = 183 genConstantOffset(loc, rewriter, kAddrPosInBox); 184 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 185 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr); 186 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 187 } 188 189 mlir::Value 190 loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 191 mlir::ConversionPatternRewriter &rewriter) const { 192 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 193 mlir::LLVM::ConstantOp cElemLen = 194 genConstantOffset(loc, rewriter, kElemLenPosInBox); 195 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 196 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen); 197 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 198 } 199 200 // Get the element type given an LLVM type that is of the form 201 // [llvm.ptr](array|struct|vector)+ and the provided indexes. 202 static mlir::Type getBoxEleTy(mlir::Type type, 203 llvm::ArrayRef<unsigned> indexes) { 204 if (auto t = type.dyn_cast<mlir::LLVM::LLVMPointerType>()) 205 type = t.getElementType(); 206 for (auto i : indexes) { 207 if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) { 208 assert(!t.isOpaque() && i < t.getBody().size()); 209 type = t.getBody()[i]; 210 } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 211 type = t.getElementType(); 212 } else if (auto t = type.dyn_cast<mlir::VectorType>()) { 213 type = t.getElementType(); 214 } else { 215 fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()), 216 "request for invalid box element type"); 217 } 218 } 219 return type; 220 } 221 222 // Return LLVM type of the base address given the LLVM type 223 // of the related descriptor (lowered fir.box type). 224 static mlir::Type getBaseAddrTypeFromBox(mlir::Type type) { 225 return getBoxEleTy(type, {kAddrPosInBox}); 226 } 227 228 // Load the attribute from the \p box and perform a check against \p maskValue 229 // The final comparison is implemented as `(attribute & maskValue) != 0`. 230 mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box, 231 mlir::ConversionPatternRewriter &rewriter, 232 unsigned maskValue) const { 233 mlir::Type attrTy = rewriter.getI32Type(); 234 mlir::Value attribute = 235 getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox); 236 mlir::LLVM::ConstantOp attrMask = 237 genConstantOffset(loc, rewriter, maskValue); 238 auto maskRes = 239 rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask); 240 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 241 return rewriter.create<mlir::LLVM::ICmpOp>( 242 loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0); 243 } 244 245 template <typename... ARGS> 246 mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty, 247 mlir::ConversionPatternRewriter &rewriter, 248 mlir::Value base, ARGS... args) const { 249 llvm::SmallVector<mlir::Value> cv = {args...}; 250 return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv); 251 } 252 253 fir::LLVMTypeConverter &lowerTy() const { 254 return *static_cast<fir::LLVMTypeConverter *>(this->getTypeConverter()); 255 } 256 257 const fir::FIRToLLVMPassOptions &options; 258 }; 259 260 /// FIR conversion pattern template 261 template <typename FromOp> 262 class FIROpAndTypeConversion : public FIROpConversion<FromOp> { 263 public: 264 using FIROpConversion<FromOp>::FIROpConversion; 265 using OpAdaptor = typename FromOp::Adaptor; 266 267 mlir::LogicalResult 268 matchAndRewrite(FromOp op, OpAdaptor adaptor, 269 mlir::ConversionPatternRewriter &rewriter) const final { 270 mlir::Type ty = this->convertType(op.getType()); 271 return doRewrite(op, ty, adaptor, rewriter); 272 } 273 274 virtual mlir::LogicalResult 275 doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor, 276 mlir::ConversionPatternRewriter &rewriter) const = 0; 277 }; 278 } // namespace 279 280 namespace { 281 /// Lower `fir.address_of` operation to `llvm.address_of` operation. 282 struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> { 283 using FIROpConversion::FIROpConversion; 284 285 mlir::LogicalResult 286 matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor, 287 mlir::ConversionPatternRewriter &rewriter) const override { 288 auto ty = convertType(addr.getType()); 289 rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>( 290 addr, ty, addr.getSymbol().getRootReference().getValue()); 291 return mlir::success(); 292 } 293 }; 294 } // namespace 295 296 /// Lookup the function to compute the memory size of this parametric derived 297 /// type. The size of the object may depend on the LEN type parameters of the 298 /// derived type. 299 static mlir::LLVM::LLVMFuncOp 300 getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op, 301 mlir::ConversionPatternRewriter &rewriter) { 302 auto module = op->getParentOfType<mlir::ModuleOp>(); 303 std::string name = recTy.getName().str() + "P.mem.size"; 304 if (auto memSizeFunc = module.lookupSymbol<mlir::LLVM::LLVMFuncOp>(name)) 305 return memSizeFunc; 306 TODO(op.getLoc(), "did not find allocation function"); 307 } 308 309 // Compute the alloc scale size (constant factors encoded in the array type). 310 // We do this for arrays without a constant interior or arrays of character with 311 // dynamic length arrays, since those are the only ones that get decayed to a 312 // pointer to the element type. 313 template <typename OP> 314 static mlir::Value 315 genAllocationScaleSize(OP op, mlir::Type ity, 316 mlir::ConversionPatternRewriter &rewriter) { 317 mlir::Location loc = op.getLoc(); 318 mlir::Type dataTy = op.getInType(); 319 mlir::Type scalarType = fir::unwrapSequenceType(dataTy); 320 auto seqTy = dataTy.dyn_cast<fir::SequenceType>(); 321 if ((op.hasShapeOperands() && seqTy && !seqTy.hasConstantInterior()) || 322 (seqTy && fir::characterWithDynamicLen(scalarType))) { 323 fir::SequenceType::Extent constSize = 1; 324 for (auto extent : seqTy.getShape()) 325 if (extent != fir::SequenceType::getUnknownExtent()) 326 constSize *= extent; 327 if (constSize != 1) { 328 mlir::Value constVal{ 329 genConstantIndex(loc, ity, rewriter, constSize).getResult()}; 330 return constVal; 331 } 332 } 333 return nullptr; 334 } 335 336 namespace { 337 /// convert to LLVM IR dialect `alloca` 338 struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> { 339 using FIROpConversion::FIROpConversion; 340 341 mlir::LogicalResult 342 matchAndRewrite(fir::AllocaOp alloc, OpAdaptor adaptor, 343 mlir::ConversionPatternRewriter &rewriter) const override { 344 mlir::ValueRange operands = adaptor.getOperands(); 345 auto loc = alloc.getLoc(); 346 mlir::Type ity = lowerTy().indexType(); 347 unsigned i = 0; 348 mlir::Value size = genConstantIndex(loc, ity, rewriter, 1).getResult(); 349 mlir::Type ty = convertType(alloc.getType()); 350 mlir::Type resultTy = ty; 351 if (alloc.hasLenParams()) { 352 unsigned end = alloc.numLenParams(); 353 llvm::SmallVector<mlir::Value> lenParams; 354 for (; i < end; ++i) 355 lenParams.push_back(operands[i]); 356 mlir::Type scalarType = fir::unwrapSequenceType(alloc.getInType()); 357 if (auto chrTy = scalarType.dyn_cast<fir::CharacterType>()) { 358 fir::CharacterType rawCharTy = fir::CharacterType::getUnknownLen( 359 chrTy.getContext(), chrTy.getFKind()); 360 ty = mlir::LLVM::LLVMPointerType::get(convertType(rawCharTy)); 361 assert(end == 1); 362 size = integerCast(loc, rewriter, ity, lenParams[0]); 363 } else if (auto recTy = scalarType.dyn_cast<fir::RecordType>()) { 364 mlir::LLVM::LLVMFuncOp memSizeFn = 365 getDependentTypeMemSizeFn(recTy, alloc, rewriter); 366 if (!memSizeFn) 367 emitError(loc, "did not find allocation function"); 368 mlir::NamedAttribute attr = rewriter.getNamedAttr( 369 "callee", mlir::SymbolRefAttr::get(memSizeFn)); 370 auto call = rewriter.create<mlir::LLVM::CallOp>( 371 loc, ity, lenParams, llvm::ArrayRef<mlir::NamedAttribute>{attr}); 372 size = call.getResult(0); 373 ty = ::getVoidPtrType(alloc.getContext()); 374 } else { 375 return emitError(loc, "unexpected type ") 376 << scalarType << " with type parameters"; 377 } 378 } 379 if (auto scaleSize = genAllocationScaleSize(alloc, ity, rewriter)) 380 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize); 381 if (alloc.hasShapeOperands()) { 382 unsigned end = operands.size(); 383 for (; i < end; ++i) 384 size = rewriter.create<mlir::LLVM::MulOp>( 385 loc, ity, size, integerCast(loc, rewriter, ity, operands[i])); 386 } 387 if (ty == resultTy) { 388 // Do not emit the bitcast if ty and resultTy are the same. 389 rewriter.replaceOpWithNewOp<mlir::LLVM::AllocaOp>(alloc, ty, size, 390 alloc->getAttrs()); 391 } else { 392 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, ty, size, 393 alloc->getAttrs()); 394 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(alloc, resultTy, al); 395 } 396 return mlir::success(); 397 } 398 }; 399 } // namespace 400 401 /// Construct an `llvm.extractvalue` instruction. It will return value at 402 /// element \p x from \p tuple. 403 static mlir::LLVM::ExtractValueOp 404 genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty, 405 mlir::ConversionPatternRewriter &rewriter, 406 mlir::MLIRContext *ctx, int x) { 407 auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x)); 408 auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x]; 409 return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx); 410 } 411 412 namespace { 413 /// Lower `fir.box_addr` to the sequence of operations to extract the first 414 /// element of the box. 415 struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> { 416 using FIROpConversion::FIROpConversion; 417 418 mlir::LogicalResult 419 matchAndRewrite(fir::BoxAddrOp boxaddr, OpAdaptor adaptor, 420 mlir::ConversionPatternRewriter &rewriter) const override { 421 mlir::Value a = adaptor.getOperands()[0]; 422 auto loc = boxaddr.getLoc(); 423 mlir::Type ty = convertType(boxaddr.getType()); 424 if (auto argty = boxaddr.getVal().getType().dyn_cast<fir::BoxType>()) { 425 rewriter.replaceOp(boxaddr, loadBaseAddrFromBox(loc, ty, a, rewriter)); 426 } else { 427 auto c0attr = rewriter.getI32IntegerAttr(0); 428 auto c0 = mlir::ArrayAttr::get(boxaddr.getContext(), c0attr); 429 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(boxaddr, ty, a, 430 c0); 431 } 432 return mlir::success(); 433 } 434 }; 435 436 /// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the 437 /// boxchar. 438 struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> { 439 using FIROpConversion::FIROpConversion; 440 441 mlir::LogicalResult 442 matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor, 443 mlir::ConversionPatternRewriter &rewriter) const override { 444 mlir::Value boxChar = adaptor.getOperands()[0]; 445 mlir::Location loc = boxChar.getLoc(); 446 mlir::MLIRContext *ctx = boxChar.getContext(); 447 mlir::Type returnValTy = boxCharLen.getResult().getType(); 448 449 constexpr int boxcharLenIdx = 1; 450 mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex( 451 loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx); 452 mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len); 453 rewriter.replaceOp(boxCharLen, lenAfterCast); 454 455 return mlir::success(); 456 } 457 }; 458 459 /// Lower `fir.box_dims` to a sequence of operations to extract the requested 460 /// dimension infomartion from the boxed value. 461 /// Result in a triple set of GEPs and loads. 462 struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> { 463 using FIROpConversion::FIROpConversion; 464 465 mlir::LogicalResult 466 matchAndRewrite(fir::BoxDimsOp boxdims, OpAdaptor adaptor, 467 mlir::ConversionPatternRewriter &rewriter) const override { 468 llvm::SmallVector<mlir::Type, 3> resultTypes = { 469 convertType(boxdims.getResult(0).getType()), 470 convertType(boxdims.getResult(1).getType()), 471 convertType(boxdims.getResult(2).getType()), 472 }; 473 auto results = 474 getDimsFromBox(boxdims.getLoc(), resultTypes, adaptor.getOperands()[0], 475 adaptor.getOperands()[1], rewriter); 476 rewriter.replaceOp(boxdims, results); 477 return mlir::success(); 478 } 479 }; 480 481 /// Lower `fir.box_elesize` to a sequence of operations ro extract the size of 482 /// an element in the boxed value. 483 struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> { 484 using FIROpConversion::FIROpConversion; 485 486 mlir::LogicalResult 487 matchAndRewrite(fir::BoxEleSizeOp boxelesz, OpAdaptor adaptor, 488 mlir::ConversionPatternRewriter &rewriter) const override { 489 mlir::Value a = adaptor.getOperands()[0]; 490 auto loc = boxelesz.getLoc(); 491 auto ty = convertType(boxelesz.getType()); 492 auto elemSize = getValueFromBox(loc, a, ty, rewriter, kElemLenPosInBox); 493 rewriter.replaceOp(boxelesz, elemSize); 494 return mlir::success(); 495 } 496 }; 497 498 /// Lower `fir.box_isalloc` to a sequence of operations to determine if the 499 /// boxed value was from an ALLOCATABLE entity. 500 struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> { 501 using FIROpConversion::FIROpConversion; 502 503 mlir::LogicalResult 504 matchAndRewrite(fir::BoxIsAllocOp boxisalloc, OpAdaptor adaptor, 505 mlir::ConversionPatternRewriter &rewriter) const override { 506 mlir::Value box = adaptor.getOperands()[0]; 507 auto loc = boxisalloc.getLoc(); 508 mlir::Value check = 509 genBoxAttributeCheck(loc, box, rewriter, kAttrAllocatable); 510 rewriter.replaceOp(boxisalloc, check); 511 return mlir::success(); 512 } 513 }; 514 515 /// Lower `fir.box_isarray` to a sequence of operations to determine if the 516 /// boxed is an array. 517 struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> { 518 using FIROpConversion::FIROpConversion; 519 520 mlir::LogicalResult 521 matchAndRewrite(fir::BoxIsArrayOp boxisarray, OpAdaptor adaptor, 522 mlir::ConversionPatternRewriter &rewriter) const override { 523 mlir::Value a = adaptor.getOperands()[0]; 524 auto loc = boxisarray.getLoc(); 525 auto rank = 526 getValueFromBox(loc, a, rewriter.getI32Type(), rewriter, kRankPosInBox); 527 auto c0 = genConstantOffset(loc, rewriter, 0); 528 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 529 boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0); 530 return mlir::success(); 531 } 532 }; 533 534 /// Lower `fir.box_isptr` to a sequence of operations to determined if the 535 /// boxed value was from a POINTER entity. 536 struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> { 537 using FIROpConversion::FIROpConversion; 538 539 mlir::LogicalResult 540 matchAndRewrite(fir::BoxIsPtrOp boxisptr, OpAdaptor adaptor, 541 mlir::ConversionPatternRewriter &rewriter) const override { 542 mlir::Value box = adaptor.getOperands()[0]; 543 auto loc = boxisptr.getLoc(); 544 mlir::Value check = genBoxAttributeCheck(loc, box, rewriter, kAttrPointer); 545 rewriter.replaceOp(boxisptr, check); 546 return mlir::success(); 547 } 548 }; 549 550 /// Lower `fir.box_rank` to the sequence of operation to extract the rank from 551 /// the box. 552 struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> { 553 using FIROpConversion::FIROpConversion; 554 555 mlir::LogicalResult 556 matchAndRewrite(fir::BoxRankOp boxrank, OpAdaptor adaptor, 557 mlir::ConversionPatternRewriter &rewriter) const override { 558 mlir::Value a = adaptor.getOperands()[0]; 559 auto loc = boxrank.getLoc(); 560 mlir::Type ty = convertType(boxrank.getType()); 561 auto result = getValueFromBox(loc, a, ty, rewriter, kRankPosInBox); 562 rewriter.replaceOp(boxrank, result); 563 return mlir::success(); 564 } 565 }; 566 567 /// Lower `fir.boxproc_host` operation. Extracts the host pointer from the 568 /// boxproc. 569 /// TODO: Part of supporting Fortran 2003 procedure pointers. 570 struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> { 571 using FIROpConversion::FIROpConversion; 572 573 mlir::LogicalResult 574 matchAndRewrite(fir::BoxProcHostOp boxprochost, OpAdaptor adaptor, 575 mlir::ConversionPatternRewriter &rewriter) const override { 576 TODO(boxprochost.getLoc(), "fir.boxproc_host codegen"); 577 return mlir::failure(); 578 } 579 }; 580 581 /// Lower `fir.box_tdesc` to the sequence of operations to extract the type 582 /// descriptor from the box. 583 struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> { 584 using FIROpConversion::FIROpConversion; 585 586 mlir::LogicalResult 587 matchAndRewrite(fir::BoxTypeDescOp boxtypedesc, OpAdaptor adaptor, 588 mlir::ConversionPatternRewriter &rewriter) const override { 589 mlir::Value box = adaptor.getOperands()[0]; 590 auto loc = boxtypedesc.getLoc(); 591 mlir::Type typeTy = 592 fir::getDescFieldTypeModel<kTypePosInBox>()(boxtypedesc.getContext()); 593 auto result = getValueFromBox(loc, box, typeTy, rewriter, kTypePosInBox); 594 auto typePtrTy = mlir::LLVM::LLVMPointerType::get(typeTy); 595 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(boxtypedesc, typePtrTy, 596 result); 597 return mlir::success(); 598 } 599 }; 600 601 /// Lower `fir.string_lit` to LLVM IR dialect operation. 602 struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> { 603 using FIROpConversion::FIROpConversion; 604 605 mlir::LogicalResult 606 matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor, 607 mlir::ConversionPatternRewriter &rewriter) const override { 608 auto ty = convertType(constop.getType()); 609 auto attr = constop.getValue(); 610 if (attr.isa<mlir::StringAttr>()) { 611 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr); 612 return mlir::success(); 613 } 614 615 auto charTy = constop.getType().cast<fir::CharacterType>(); 616 unsigned bits = lowerTy().characterBitsize(charTy); 617 mlir::Type intTy = rewriter.getIntegerType(bits); 618 mlir::Location loc = constop.getLoc(); 619 mlir::Value cst = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 620 if (auto arr = attr.dyn_cast<mlir::DenseElementsAttr>()) { 621 cst = rewriter.create<mlir::LLVM::ConstantOp>(loc, ty, arr); 622 } else if (auto arr = attr.dyn_cast<mlir::ArrayAttr>()) { 623 for (auto a : llvm::enumerate(arr.getValue())) { 624 // convert each character to a precise bitsize 625 auto elemAttr = mlir::IntegerAttr::get( 626 intTy, 627 a.value().cast<mlir::IntegerAttr>().getValue().zextOrTrunc(bits)); 628 auto elemCst = 629 rewriter.create<mlir::LLVM::ConstantOp>(loc, intTy, elemAttr); 630 auto index = mlir::ArrayAttr::get( 631 constop.getContext(), rewriter.getI32IntegerAttr(a.index())); 632 cst = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, cst, elemCst, 633 index); 634 } 635 } else { 636 return mlir::failure(); 637 } 638 rewriter.replaceOp(constop, cst); 639 return mlir::success(); 640 } 641 }; 642 643 /// `fir.call` -> `llvm.call` 644 struct CallOpConversion : public FIROpConversion<fir::CallOp> { 645 using FIROpConversion::FIROpConversion; 646 647 mlir::LogicalResult 648 matchAndRewrite(fir::CallOp call, OpAdaptor adaptor, 649 mlir::ConversionPatternRewriter &rewriter) const override { 650 llvm::SmallVector<mlir::Type> resultTys; 651 for (auto r : call.getResults()) 652 resultTys.push_back(convertType(r.getType())); 653 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 654 call, resultTys, adaptor.getOperands(), call->getAttrs()); 655 return mlir::success(); 656 } 657 }; 658 } // namespace 659 660 static mlir::Type getComplexEleTy(mlir::Type complex) { 661 if (auto cc = complex.dyn_cast<mlir::ComplexType>()) 662 return cc.getElementType(); 663 return complex.cast<fir::ComplexType>().getElementType(); 664 } 665 666 namespace { 667 /// Compare complex values 668 /// 669 /// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une). 670 /// 671 /// For completeness, all other comparison are done on the real component only. 672 struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> { 673 using FIROpConversion::FIROpConversion; 674 675 mlir::LogicalResult 676 matchAndRewrite(fir::CmpcOp cmp, OpAdaptor adaptor, 677 mlir::ConversionPatternRewriter &rewriter) const override { 678 mlir::ValueRange operands = adaptor.getOperands(); 679 mlir::MLIRContext *ctxt = cmp.getContext(); 680 mlir::Type eleTy = convertType(getComplexEleTy(cmp.getLhs().getType())); 681 mlir::Type resTy = convertType(cmp.getType()); 682 mlir::Location loc = cmp.getLoc(); 683 auto pos0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 684 llvm::SmallVector<mlir::Value, 2> rp = { 685 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 686 pos0), 687 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 688 pos0)}; 689 auto rcp = 690 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, rp, cmp->getAttrs()); 691 auto pos1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 692 llvm::SmallVector<mlir::Value, 2> ip = { 693 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 694 pos1), 695 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 696 pos1)}; 697 auto icp = 698 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, ip, cmp->getAttrs()); 699 llvm::SmallVector<mlir::Value, 2> cp = {rcp, icp}; 700 switch (cmp.getPredicate()) { 701 case mlir::arith::CmpFPredicate::OEQ: // .EQ. 702 rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(cmp, resTy, cp); 703 break; 704 case mlir::arith::CmpFPredicate::UNE: // .NE. 705 rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(cmp, resTy, cp); 706 break; 707 default: 708 rewriter.replaceOp(cmp, rcp.getResult()); 709 break; 710 } 711 return mlir::success(); 712 } 713 }; 714 715 /// Lower complex constants 716 struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> { 717 using FIROpConversion::FIROpConversion; 718 719 mlir::LogicalResult 720 matchAndRewrite(fir::ConstcOp conc, OpAdaptor, 721 mlir::ConversionPatternRewriter &rewriter) const override { 722 mlir::Location loc = conc.getLoc(); 723 mlir::MLIRContext *ctx = conc.getContext(); 724 mlir::Type ty = convertType(conc.getType()); 725 mlir::Type ety = convertType(getComplexEleTy(conc.getType())); 726 auto realFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getReal())); 727 auto realPart = 728 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, realFloatAttr); 729 auto imFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getImaginary())); 730 auto imPart = 731 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, imFloatAttr); 732 auto realIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 733 auto imIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 734 auto undef = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 735 auto setReal = rewriter.create<mlir::LLVM::InsertValueOp>( 736 loc, ty, undef, realPart, realIndex); 737 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(conc, ty, setReal, 738 imPart, imIndex); 739 return mlir::success(); 740 } 741 742 inline llvm::APFloat getValue(mlir::Attribute attr) const { 743 return attr.cast<fir::RealAttr>().getValue(); 744 } 745 }; 746 747 /// convert value of from-type to value of to-type 748 struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> { 749 using FIROpConversion::FIROpConversion; 750 751 static bool isFloatingPointTy(mlir::Type ty) { 752 return ty.isa<mlir::FloatType>(); 753 } 754 755 mlir::LogicalResult 756 matchAndRewrite(fir::ConvertOp convert, OpAdaptor adaptor, 757 mlir::ConversionPatternRewriter &rewriter) const override { 758 auto fromFirTy = convert.getValue().getType(); 759 auto toFirTy = convert.getRes().getType(); 760 auto fromTy = convertType(fromFirTy); 761 auto toTy = convertType(toFirTy); 762 mlir::Value op0 = adaptor.getOperands()[0]; 763 if (fromTy == toTy) { 764 rewriter.replaceOp(convert, op0); 765 return mlir::success(); 766 } 767 auto loc = convert.getLoc(); 768 auto convertFpToFp = [&](mlir::Value val, unsigned fromBits, 769 unsigned toBits, mlir::Type toTy) -> mlir::Value { 770 if (fromBits == toBits) { 771 // TODO: Converting between two floating-point representations with the 772 // same bitwidth is not allowed for now. 773 mlir::emitError(loc, 774 "cannot implicitly convert between two floating-point " 775 "representations of the same bitwidth"); 776 return {}; 777 } 778 if (fromBits > toBits) 779 return rewriter.create<mlir::LLVM::FPTruncOp>(loc, toTy, val); 780 return rewriter.create<mlir::LLVM::FPExtOp>(loc, toTy, val); 781 }; 782 // Complex to complex conversion. 783 if (fir::isa_complex(fromFirTy) && fir::isa_complex(toFirTy)) { 784 // Special case: handle the conversion of a complex such that both the 785 // real and imaginary parts are converted together. 786 auto zero = mlir::ArrayAttr::get(convert.getContext(), 787 rewriter.getI32IntegerAttr(0)); 788 auto one = mlir::ArrayAttr::get(convert.getContext(), 789 rewriter.getI32IntegerAttr(1)); 790 auto ty = convertType(getComplexEleTy(convert.getValue().getType())); 791 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, zero); 792 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, one); 793 auto nt = convertType(getComplexEleTy(convert.getRes().getType())); 794 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 795 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(nt); 796 auto rc = convertFpToFp(rp, fromBits, toBits, nt); 797 auto ic = convertFpToFp(ip, fromBits, toBits, nt); 798 auto un = rewriter.create<mlir::LLVM::UndefOp>(loc, toTy); 799 auto i1 = 800 rewriter.create<mlir::LLVM::InsertValueOp>(loc, toTy, un, rc, zero); 801 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(convert, toTy, i1, 802 ic, one); 803 return mlir::success(); 804 } 805 806 // Follow UNIX F77 convention for logicals: 807 // 1. underlying integer is not zero => logical is .TRUE. 808 // 2. logical is .TRUE. => set underlying integer to 1. 809 auto i1Type = mlir::IntegerType::get(convert.getContext(), 1); 810 if (fromFirTy.isa<fir::LogicalType>() && toFirTy == i1Type) { 811 mlir::Value zero = genConstantIndex(loc, fromTy, rewriter, 0); 812 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 813 convert, mlir::LLVM::ICmpPredicate::ne, op0, zero); 814 return mlir::success(); 815 } 816 if (fromFirTy == i1Type && toFirTy.isa<fir::LogicalType>()) { 817 rewriter.replaceOpWithNewOp<mlir::LLVM::ZExtOp>(convert, toTy, op0); 818 return mlir::success(); 819 } 820 821 // Floating point to floating point conversion. 822 if (isFloatingPointTy(fromTy)) { 823 if (isFloatingPointTy(toTy)) { 824 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 825 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 826 auto v = convertFpToFp(op0, fromBits, toBits, toTy); 827 rewriter.replaceOp(convert, v); 828 return mlir::success(); 829 } 830 if (toTy.isa<mlir::IntegerType>()) { 831 rewriter.replaceOpWithNewOp<mlir::LLVM::FPToSIOp>(convert, toTy, op0); 832 return mlir::success(); 833 } 834 } else if (fromTy.isa<mlir::IntegerType>()) { 835 // Integer to integer conversion. 836 if (toTy.isa<mlir::IntegerType>()) { 837 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 838 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 839 assert(fromBits != toBits); 840 if (fromBits > toBits) { 841 rewriter.replaceOpWithNewOp<mlir::LLVM::TruncOp>(convert, toTy, op0); 842 return mlir::success(); 843 } 844 rewriter.replaceOpWithNewOp<mlir::LLVM::SExtOp>(convert, toTy, op0); 845 return mlir::success(); 846 } 847 // Integer to floating point conversion. 848 if (isFloatingPointTy(toTy)) { 849 rewriter.replaceOpWithNewOp<mlir::LLVM::SIToFPOp>(convert, toTy, op0); 850 return mlir::success(); 851 } 852 // Integer to pointer conversion. 853 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 854 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(convert, toTy, op0); 855 return mlir::success(); 856 } 857 } else if (fromTy.isa<mlir::LLVM::LLVMPointerType>()) { 858 // Pointer to integer conversion. 859 if (toTy.isa<mlir::IntegerType>()) { 860 rewriter.replaceOpWithNewOp<mlir::LLVM::PtrToIntOp>(convert, toTy, op0); 861 return mlir::success(); 862 } 863 // Pointer to pointer conversion. 864 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 865 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(convert, toTy, op0); 866 return mlir::success(); 867 } 868 } 869 return emitError(loc) << "cannot convert " << fromTy << " to " << toTy; 870 } 871 }; 872 873 /// Lower `fir.dispatch` operation. A virtual call to a method in a dispatch 874 /// table. 875 struct DispatchOpConversion : public FIROpConversion<fir::DispatchOp> { 876 using FIROpConversion::FIROpConversion; 877 878 mlir::LogicalResult 879 matchAndRewrite(fir::DispatchOp dispatch, OpAdaptor adaptor, 880 mlir::ConversionPatternRewriter &rewriter) const override { 881 TODO(dispatch.getLoc(), "fir.dispatch codegen"); 882 return mlir::failure(); 883 } 884 }; 885 886 /// Lower `fir.dispatch_table` operation. The dispatch table for a Fortran 887 /// derived type. 888 struct DispatchTableOpConversion 889 : public FIROpConversion<fir::DispatchTableOp> { 890 using FIROpConversion::FIROpConversion; 891 892 mlir::LogicalResult 893 matchAndRewrite(fir::DispatchTableOp dispTab, OpAdaptor adaptor, 894 mlir::ConversionPatternRewriter &rewriter) const override { 895 TODO(dispTab.getLoc(), "fir.dispatch_table codegen"); 896 return mlir::failure(); 897 } 898 }; 899 900 /// Lower `fir.dt_entry` operation. An entry in a dispatch table; binds a 901 /// method-name to a function. 902 struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> { 903 using FIROpConversion::FIROpConversion; 904 905 mlir::LogicalResult 906 matchAndRewrite(fir::DTEntryOp dtEnt, OpAdaptor adaptor, 907 mlir::ConversionPatternRewriter &rewriter) const override { 908 TODO(dtEnt.getLoc(), "fir.dt_entry codegen"); 909 return mlir::failure(); 910 } 911 }; 912 913 /// Lower `fir.global_len` operation. 914 struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> { 915 using FIROpConversion::FIROpConversion; 916 917 mlir::LogicalResult 918 matchAndRewrite(fir::GlobalLenOp globalLen, OpAdaptor adaptor, 919 mlir::ConversionPatternRewriter &rewriter) const override { 920 TODO(globalLen.getLoc(), "fir.global_len codegen"); 921 return mlir::failure(); 922 } 923 }; 924 925 /// Lower fir.len_param_index 926 struct LenParamIndexOpConversion 927 : public FIROpConversion<fir::LenParamIndexOp> { 928 using FIROpConversion::FIROpConversion; 929 930 // FIXME: this should be specialized by the runtime target 931 mlir::LogicalResult 932 matchAndRewrite(fir::LenParamIndexOp lenp, OpAdaptor, 933 mlir::ConversionPatternRewriter &rewriter) const override { 934 TODO(lenp.getLoc(), "fir.len_param_index codegen"); 935 } 936 }; 937 938 /// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of 939 /// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element 940 /// in this struct is a pointer. Its type is determined from `KIND`. The 2nd 941 /// element is the length of the character buffer (`#n`). 942 struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> { 943 using FIROpConversion::FIROpConversion; 944 945 mlir::LogicalResult 946 matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor, 947 mlir::ConversionPatternRewriter &rewriter) const override { 948 mlir::ValueRange operands = adaptor.getOperands(); 949 auto *ctx = emboxChar.getContext(); 950 951 mlir::Value charBuffer = operands[0]; 952 mlir::Value charBufferLen = operands[1]; 953 954 mlir::Location loc = emboxChar.getLoc(); 955 mlir::Type llvmStructTy = convertType(emboxChar.getType()); 956 auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy); 957 958 mlir::Type lenTy = 959 llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1]; 960 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen); 961 962 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 963 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 964 auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>( 965 loc, llvmStructTy, llvmStruct, charBuffer, c0); 966 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 967 emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1); 968 969 return mlir::success(); 970 } 971 }; 972 } // namespace 973 974 /// Return the LLVMFuncOp corresponding to the standard malloc call. 975 static mlir::LLVM::LLVMFuncOp 976 getMalloc(fir::AllocMemOp op, mlir::ConversionPatternRewriter &rewriter) { 977 auto module = op->getParentOfType<mlir::ModuleOp>(); 978 if (mlir::LLVM::LLVMFuncOp mallocFunc = 979 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("malloc")) 980 return mallocFunc; 981 mlir::OpBuilder moduleBuilder( 982 op->getParentOfType<mlir::ModuleOp>().getBodyRegion()); 983 auto indexType = mlir::IntegerType::get(op.getContext(), 64); 984 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 985 rewriter.getUnknownLoc(), "malloc", 986 mlir::LLVM::LLVMFunctionType::get(getVoidPtrType(op.getContext()), 987 indexType, 988 /*isVarArg=*/false)); 989 } 990 991 /// Helper function for generating the LLVM IR that computes the size 992 /// in bytes for a derived type. 993 static mlir::Value 994 computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy, 995 mlir::ConversionPatternRewriter &rewriter) { 996 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 997 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 998 llvm::SmallVector<mlir::Value> args = {one}; 999 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, args); 1000 return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep); 1001 } 1002 1003 namespace { 1004 /// Lower a `fir.allocmem` instruction into `llvm.call @malloc` 1005 struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> { 1006 using FIROpConversion::FIROpConversion; 1007 1008 mlir::LogicalResult 1009 matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor, 1010 mlir::ConversionPatternRewriter &rewriter) const override { 1011 mlir::Type heapTy = heap.getType(); 1012 mlir::Type ty = convertType(heapTy); 1013 mlir::LLVM::LLVMFuncOp mallocFunc = getMalloc(heap, rewriter); 1014 mlir::Location loc = heap.getLoc(); 1015 auto ity = lowerTy().indexType(); 1016 mlir::Type dataTy = fir::unwrapRefType(heapTy); 1017 if (fir::isRecordWithTypeParameters(fir::unwrapSequenceType(dataTy))) 1018 TODO(loc, "fir.allocmem codegen of derived type with length parameters"); 1019 mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, ty); 1020 if (auto scaleSize = genAllocationScaleSize(heap, ity, rewriter)) 1021 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize); 1022 for (mlir::Value opnd : adaptor.getOperands()) 1023 size = rewriter.create<mlir::LLVM::MulOp>( 1024 loc, ity, size, integerCast(loc, rewriter, ity, opnd)); 1025 heap->setAttr("callee", mlir::SymbolRefAttr::get(mallocFunc)); 1026 auto malloc = rewriter.create<mlir::LLVM::CallOp>( 1027 loc, ::getVoidPtrType(heap.getContext()), size, heap->getAttrs()); 1028 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(heap, ty, 1029 malloc.getResult(0)); 1030 return mlir::success(); 1031 } 1032 1033 // Compute the (allocation) size of the allocmem type in bytes. 1034 mlir::Value genTypeSizeInBytes(mlir::Location loc, mlir::Type idxTy, 1035 mlir::ConversionPatternRewriter &rewriter, 1036 mlir::Type llTy) const { 1037 // Use the primitive size, if available. 1038 auto ptrTy = llTy.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1039 if (auto size = 1040 mlir::LLVM::getPrimitiveTypeSizeInBits(ptrTy.getElementType())) 1041 return genConstantIndex(loc, idxTy, rewriter, size / 8); 1042 1043 // Otherwise, generate the GEP trick in LLVM IR to compute the size. 1044 return computeDerivedTypeSize(loc, ptrTy, idxTy, rewriter); 1045 } 1046 }; 1047 } // namespace 1048 1049 /// Return the LLVMFuncOp corresponding to the standard free call. 1050 static mlir::LLVM::LLVMFuncOp 1051 getFree(fir::FreeMemOp op, mlir::ConversionPatternRewriter &rewriter) { 1052 auto module = op->getParentOfType<mlir::ModuleOp>(); 1053 if (mlir::LLVM::LLVMFuncOp freeFunc = 1054 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("free")) 1055 return freeFunc; 1056 mlir::OpBuilder moduleBuilder(module.getBodyRegion()); 1057 auto voidType = mlir::LLVM::LLVMVoidType::get(op.getContext()); 1058 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 1059 rewriter.getUnknownLoc(), "free", 1060 mlir::LLVM::LLVMFunctionType::get(voidType, 1061 getVoidPtrType(op.getContext()), 1062 /*isVarArg=*/false)); 1063 } 1064 1065 namespace { 1066 /// Lower a `fir.freemem` instruction into `llvm.call @free` 1067 struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> { 1068 using FIROpConversion::FIROpConversion; 1069 1070 mlir::LogicalResult 1071 matchAndRewrite(fir::FreeMemOp freemem, OpAdaptor adaptor, 1072 mlir::ConversionPatternRewriter &rewriter) const override { 1073 mlir::LLVM::LLVMFuncOp freeFunc = getFree(freemem, rewriter); 1074 mlir::Location loc = freemem.getLoc(); 1075 auto bitcast = rewriter.create<mlir::LLVM::BitcastOp>( 1076 freemem.getLoc(), voidPtrTy(), adaptor.getOperands()[0]); 1077 freemem->setAttr("callee", mlir::SymbolRefAttr::get(freeFunc)); 1078 rewriter.create<mlir::LLVM::CallOp>( 1079 loc, mlir::TypeRange{}, mlir::ValueRange{bitcast}, freemem->getAttrs()); 1080 rewriter.eraseOp(freemem); 1081 return mlir::success(); 1082 } 1083 }; 1084 } // namespace 1085 1086 /// Common base class for embox to descriptor conversion. 1087 template <typename OP> 1088 struct EmboxCommonConversion : public FIROpConversion<OP> { 1089 using FIROpConversion<OP>::FIROpConversion; 1090 1091 // Find the LLVMFuncOp in whose entry block the alloca should be inserted. 1092 // The order to find the LLVMFuncOp is as follows: 1093 // 1. The parent operation of the current block if it is a LLVMFuncOp. 1094 // 2. The first ancestor that is a LLVMFuncOp. 1095 mlir::LLVM::LLVMFuncOp 1096 getFuncForAllocaInsert(mlir::ConversionPatternRewriter &rewriter) const { 1097 mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp(); 1098 return mlir::isa<mlir::LLVM::LLVMFuncOp>(parentOp) 1099 ? mlir::cast<mlir::LLVM::LLVMFuncOp>(parentOp) 1100 : parentOp->getParentOfType<mlir::LLVM::LLVMFuncOp>(); 1101 } 1102 1103 // Generate an alloca of size 1 and type \p toTy. 1104 mlir::LLVM::AllocaOp 1105 genAllocaWithType(mlir::Location loc, mlir::Type toTy, unsigned alignment, 1106 mlir::ConversionPatternRewriter &rewriter) const { 1107 auto thisPt = rewriter.saveInsertionPoint(); 1108 mlir::LLVM::LLVMFuncOp func = getFuncForAllocaInsert(rewriter); 1109 rewriter.setInsertionPointToStart(&func.front()); 1110 auto size = this->genI32Constant(loc, rewriter, 1); 1111 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, toTy, size, alignment); 1112 rewriter.restoreInsertionPoint(thisPt); 1113 return al; 1114 } 1115 1116 static int getCFIAttr(fir::BoxType boxTy) { 1117 auto eleTy = boxTy.getEleTy(); 1118 if (eleTy.isa<fir::PointerType>()) 1119 return CFI_attribute_pointer; 1120 if (eleTy.isa<fir::HeapType>()) 1121 return CFI_attribute_allocatable; 1122 return CFI_attribute_other; 1123 } 1124 1125 static fir::RecordType unwrapIfDerived(fir::BoxType boxTy) { 1126 return fir::unwrapSequenceType(fir::dyn_cast_ptrOrBoxEleTy(boxTy)) 1127 .template dyn_cast<fir::RecordType>(); 1128 } 1129 static bool isDerivedTypeWithLenParams(fir::BoxType boxTy) { 1130 auto recTy = unwrapIfDerived(boxTy); 1131 return recTy && recTy.getNumLenParams() > 0; 1132 } 1133 static bool isDerivedType(fir::BoxType boxTy) { 1134 return static_cast<bool>(unwrapIfDerived(boxTy)); 1135 } 1136 1137 // Get the element size and CFI type code of the boxed value. 1138 std::tuple<mlir::Value, mlir::Value> getSizeAndTypeCode( 1139 mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 1140 mlir::Type boxEleTy, mlir::ValueRange lenParams = {}) const { 1141 auto doInteger = 1142 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1143 int typeCode = fir::integerBitsToTypeCode(width); 1144 return {this->genConstantOffset(loc, rewriter, width / 8), 1145 this->genConstantOffset(loc, rewriter, typeCode)}; 1146 }; 1147 auto doLogical = 1148 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1149 int typeCode = fir::logicalBitsToTypeCode(width); 1150 return {this->genConstantOffset(loc, rewriter, width / 8), 1151 this->genConstantOffset(loc, rewriter, typeCode)}; 1152 }; 1153 auto doFloat = [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1154 int typeCode = fir::realBitsToTypeCode(width); 1155 return {this->genConstantOffset(loc, rewriter, width / 8), 1156 this->genConstantOffset(loc, rewriter, typeCode)}; 1157 }; 1158 auto doComplex = 1159 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1160 auto typeCode = fir::complexBitsToTypeCode(width); 1161 return {this->genConstantOffset(loc, rewriter, width / 8 * 2), 1162 this->genConstantOffset(loc, rewriter, typeCode)}; 1163 }; 1164 auto doCharacter = 1165 [&](unsigned width, 1166 mlir::Value len) -> std::tuple<mlir::Value, mlir::Value> { 1167 auto typeCode = fir::characterBitsToTypeCode(width); 1168 auto typeCodeVal = this->genConstantOffset(loc, rewriter, typeCode); 1169 if (width == 8) 1170 return {len, typeCodeVal}; 1171 auto i64Ty = mlir::IntegerType::get(&this->lowerTy().getContext(), 64); 1172 auto byteWidth = genConstantIndex(loc, i64Ty, rewriter, width / 8); 1173 auto len64 = FIROpConversion<OP>::integerCast(loc, rewriter, i64Ty, len); 1174 auto size = 1175 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len64); 1176 return {size, typeCodeVal}; 1177 }; 1178 auto getKindMap = [&]() -> fir::KindMapping & { 1179 return this->lowerTy().getKindMap(); 1180 }; 1181 // Pointer-like types. 1182 if (auto eleTy = fir::dyn_cast_ptrEleTy(boxEleTy)) 1183 boxEleTy = eleTy; 1184 // Integer types. 1185 if (fir::isa_integer(boxEleTy)) { 1186 if (auto ty = boxEleTy.dyn_cast<mlir::IntegerType>()) 1187 return doInteger(ty.getWidth()); 1188 auto ty = boxEleTy.cast<fir::IntegerType>(); 1189 return doInteger(getKindMap().getIntegerBitsize(ty.getFKind())); 1190 } 1191 // Floating point types. 1192 if (fir::isa_real(boxEleTy)) { 1193 if (auto ty = boxEleTy.dyn_cast<mlir::FloatType>()) 1194 return doFloat(ty.getWidth()); 1195 auto ty = boxEleTy.cast<fir::RealType>(); 1196 return doFloat(getKindMap().getRealBitsize(ty.getFKind())); 1197 } 1198 // Complex types. 1199 if (fir::isa_complex(boxEleTy)) { 1200 if (auto ty = boxEleTy.dyn_cast<mlir::ComplexType>()) 1201 return doComplex( 1202 ty.getElementType().cast<mlir::FloatType>().getWidth()); 1203 auto ty = boxEleTy.cast<fir::ComplexType>(); 1204 return doComplex(getKindMap().getRealBitsize(ty.getFKind())); 1205 } 1206 // Character types. 1207 if (auto ty = boxEleTy.dyn_cast<fir::CharacterType>()) { 1208 auto charWidth = getKindMap().getCharacterBitsize(ty.getFKind()); 1209 if (ty.getLen() != fir::CharacterType::unknownLen()) { 1210 auto len = this->genConstantOffset(loc, rewriter, ty.getLen()); 1211 return doCharacter(charWidth, len); 1212 } 1213 assert(!lenParams.empty()); 1214 return doCharacter(charWidth, lenParams.back()); 1215 } 1216 // Logical type. 1217 if (auto ty = boxEleTy.dyn_cast<fir::LogicalType>()) 1218 return doLogical(getKindMap().getLogicalBitsize(ty.getFKind())); 1219 // Array types. 1220 if (auto seqTy = boxEleTy.dyn_cast<fir::SequenceType>()) 1221 return getSizeAndTypeCode(loc, rewriter, seqTy.getEleTy(), lenParams); 1222 // Derived-type types. 1223 if (boxEleTy.isa<fir::RecordType>()) { 1224 auto ptrTy = mlir::LLVM::LLVMPointerType::get( 1225 this->lowerTy().convertType(boxEleTy)); 1226 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 1227 auto one = 1228 genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1); 1229 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, 1230 mlir::ValueRange{one}); 1231 auto eleSize = rewriter.create<mlir::LLVM::PtrToIntOp>( 1232 loc, this->lowerTy().indexType(), gep); 1233 return {eleSize, 1234 this->genConstantOffset(loc, rewriter, fir::derivedToTypeCode())}; 1235 } 1236 // Reference type. 1237 if (fir::isa_ref_type(boxEleTy)) { 1238 // FIXME: use the target pointer size rather than sizeof(void*) 1239 return {this->genConstantOffset(loc, rewriter, sizeof(void *)), 1240 this->genConstantOffset(loc, rewriter, CFI_type_cptr)}; 1241 } 1242 fir::emitFatalError(loc, "unhandled type in fir.box code generation"); 1243 } 1244 1245 /// Basic pattern to write a field in the descriptor 1246 mlir::Value insertField(mlir::ConversionPatternRewriter &rewriter, 1247 mlir::Location loc, mlir::Value dest, 1248 llvm::ArrayRef<unsigned> fldIndexes, 1249 mlir::Value value, bool bitcast = false) const { 1250 auto boxTy = dest.getType(); 1251 auto fldTy = this->getBoxEleTy(boxTy, fldIndexes); 1252 if (bitcast) 1253 value = rewriter.create<mlir::LLVM::BitcastOp>(loc, fldTy, value); 1254 else 1255 value = this->integerCast(loc, rewriter, fldTy, value); 1256 llvm::SmallVector<mlir::Attribute, 2> attrs; 1257 for (auto i : fldIndexes) 1258 attrs.push_back(rewriter.getI32IntegerAttr(i)); 1259 auto indexesAttr = mlir::ArrayAttr::get(rewriter.getContext(), attrs); 1260 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, boxTy, dest, value, 1261 indexesAttr); 1262 } 1263 1264 inline mlir::Value 1265 insertBaseAddress(mlir::ConversionPatternRewriter &rewriter, 1266 mlir::Location loc, mlir::Value dest, 1267 mlir::Value base) const { 1268 return insertField(rewriter, loc, dest, {kAddrPosInBox}, base, 1269 /*bitCast=*/true); 1270 } 1271 1272 inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter, 1273 mlir::Location loc, mlir::Value dest, 1274 unsigned dim, mlir::Value lb) const { 1275 return insertField(rewriter, loc, dest, 1276 {kDimsPosInBox, dim, kDimLowerBoundPos}, lb); 1277 } 1278 1279 inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter, 1280 mlir::Location loc, mlir::Value dest, 1281 unsigned dim, mlir::Value extent) const { 1282 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos}, 1283 extent); 1284 } 1285 1286 inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter, 1287 mlir::Location loc, mlir::Value dest, 1288 unsigned dim, mlir::Value stride) const { 1289 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos}, 1290 stride); 1291 } 1292 1293 /// Get the address of the type descriptor global variable that was created by 1294 /// lowering for derived type \p recType. 1295 template <typename BOX> 1296 mlir::Value 1297 getTypeDescriptor(BOX box, mlir::ConversionPatternRewriter &rewriter, 1298 mlir::Location loc, fir::RecordType recType) const { 1299 std::string name = 1300 fir::NameUniquer::getTypeDescriptorName(recType.getName()); 1301 auto module = box->template getParentOfType<mlir::ModuleOp>(); 1302 if (auto global = module.template lookupSymbol<fir::GlobalOp>(name)) { 1303 auto ty = mlir::LLVM::LLVMPointerType::get( 1304 this->lowerTy().convertType(global.getType())); 1305 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1306 global.getSymName()); 1307 } 1308 if (auto global = 1309 module.template lookupSymbol<mlir::LLVM::GlobalOp>(name)) { 1310 // The global may have already been translated to LLVM. 1311 auto ty = mlir::LLVM::LLVMPointerType::get(global.getType()); 1312 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1313 global.getSymName()); 1314 } 1315 // Type info derived types do not have type descriptors since they are the 1316 // types defining type descriptors. 1317 if (!this->options.ignoreMissingTypeDescriptors && 1318 !fir::NameUniquer::belongsToModule( 1319 name, Fortran::semantics::typeInfoBuiltinModule)) 1320 fir::emitFatalError( 1321 loc, "runtime derived type info descriptor was not generated"); 1322 return rewriter.create<mlir::LLVM::NullOp>( 1323 loc, ::getVoidPtrType(box.getContext())); 1324 } 1325 1326 template <typename BOX> 1327 std::tuple<fir::BoxType, mlir::Value, mlir::Value> 1328 consDescriptorPrefix(BOX box, mlir::ConversionPatternRewriter &rewriter, 1329 unsigned rank, mlir::ValueRange lenParams) const { 1330 auto loc = box.getLoc(); 1331 auto boxTy = box.getType().template dyn_cast<fir::BoxType>(); 1332 auto convTy = this->lowerTy().convertBoxType(boxTy, rank); 1333 auto llvmBoxPtrTy = convTy.template cast<mlir::LLVM::LLVMPointerType>(); 1334 auto llvmBoxTy = llvmBoxPtrTy.getElementType(); 1335 mlir::Value descriptor = 1336 rewriter.create<mlir::LLVM::UndefOp>(loc, llvmBoxTy); 1337 1338 llvm::SmallVector<mlir::Value> typeparams = lenParams; 1339 if constexpr (!std::is_same_v<BOX, fir::EmboxOp>) { 1340 if (!box.substr().empty() && fir::hasDynamicSize(boxTy.getEleTy())) 1341 typeparams.push_back(box.substr()[1]); 1342 } 1343 1344 // Write each of the fields with the appropriate values 1345 auto [eleSize, cfiTy] = 1346 getSizeAndTypeCode(loc, rewriter, boxTy.getEleTy(), typeparams); 1347 descriptor = 1348 insertField(rewriter, loc, descriptor, {kElemLenPosInBox}, eleSize); 1349 descriptor = insertField(rewriter, loc, descriptor, {kVersionPosInBox}, 1350 this->genI32Constant(loc, rewriter, CFI_VERSION)); 1351 descriptor = insertField(rewriter, loc, descriptor, {kRankPosInBox}, 1352 this->genI32Constant(loc, rewriter, rank)); 1353 descriptor = insertField(rewriter, loc, descriptor, {kTypePosInBox}, cfiTy); 1354 descriptor = 1355 insertField(rewriter, loc, descriptor, {kAttributePosInBox}, 1356 this->genI32Constant(loc, rewriter, getCFIAttr(boxTy))); 1357 const bool hasAddendum = isDerivedType(boxTy); 1358 descriptor = 1359 insertField(rewriter, loc, descriptor, {kF18AddendumPosInBox}, 1360 this->genI32Constant(loc, rewriter, hasAddendum ? 1 : 0)); 1361 1362 if (hasAddendum) { 1363 auto isArray = 1364 fir::dyn_cast_ptrOrBoxEleTy(boxTy).template isa<fir::SequenceType>(); 1365 unsigned typeDescFieldId = isArray ? kOptTypePtrPosInBox : kDimsPosInBox; 1366 auto typeDesc = 1367 getTypeDescriptor(box, rewriter, loc, unwrapIfDerived(boxTy)); 1368 descriptor = 1369 insertField(rewriter, loc, descriptor, {typeDescFieldId}, typeDesc, 1370 /*bitCast=*/true); 1371 } 1372 1373 return {boxTy, descriptor, eleSize}; 1374 } 1375 1376 /// Compute the base address of a substring given the base address of a scalar 1377 /// string and the zero based string lower bound. 1378 mlir::Value shiftSubstringBase(mlir::ConversionPatternRewriter &rewriter, 1379 mlir::Location loc, mlir::Value base, 1380 mlir::Value lowerBound) const { 1381 llvm::SmallVector<mlir::Value> gepOperands; 1382 auto baseType = 1383 base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1384 if (baseType.isa<mlir::LLVM::LLVMArrayType>()) { 1385 auto idxTy = this->lowerTy().indexType(); 1386 gepOperands.push_back(genConstantIndex(loc, idxTy, rewriter, 0)); 1387 gepOperands.push_back(lowerBound); 1388 } else { 1389 gepOperands.push_back(lowerBound); 1390 } 1391 return this->genGEP(loc, base.getType(), rewriter, base, gepOperands); 1392 } 1393 1394 /// If the embox is not in a globalOp body, allocate storage for the box; 1395 /// store the value inside and return the generated alloca. Return the input 1396 /// value otherwise. 1397 mlir::Value 1398 placeInMemoryIfNotGlobalInit(mlir::ConversionPatternRewriter &rewriter, 1399 mlir::Location loc, mlir::Value boxValue) const { 1400 auto *thisBlock = rewriter.getInsertionBlock(); 1401 if (thisBlock && mlir::isa<mlir::LLVM::GlobalOp>(thisBlock->getParentOp())) 1402 return boxValue; 1403 auto boxPtrTy = mlir::LLVM::LLVMPointerType::get(boxValue.getType()); 1404 auto alloca = genAllocaWithType(loc, boxPtrTy, defaultAlign, rewriter); 1405 rewriter.create<mlir::LLVM::StoreOp>(loc, boxValue, alloca); 1406 return alloca; 1407 } 1408 }; 1409 1410 /// Compute the extent of a triplet slice (lb:ub:step). 1411 static mlir::Value 1412 computeTripletExtent(mlir::ConversionPatternRewriter &rewriter, 1413 mlir::Location loc, mlir::Value lb, mlir::Value ub, 1414 mlir::Value step, mlir::Value zero, mlir::Type type) { 1415 mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb); 1416 extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step); 1417 extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step); 1418 // If the resulting extent is negative (`ub-lb` and `step` have different 1419 // signs), zero must be returned instead. 1420 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1421 loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero); 1422 return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero); 1423 } 1424 1425 /// Create a generic box on a memory reference. This conversions lowers the 1426 /// abstract box to the appropriate, initialized descriptor. 1427 struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> { 1428 using EmboxCommonConversion::EmboxCommonConversion; 1429 1430 mlir::LogicalResult 1431 matchAndRewrite(fir::EmboxOp embox, OpAdaptor adaptor, 1432 mlir::ConversionPatternRewriter &rewriter) const override { 1433 assert(!embox.getShape() && "There should be no dims on this embox op"); 1434 auto [boxTy, dest, eleSize] = 1435 consDescriptorPrefix(embox, rewriter, /*rank=*/0, 1436 /*lenParams=*/adaptor.getOperands().drop_front(1)); 1437 dest = insertBaseAddress(rewriter, embox.getLoc(), dest, 1438 adaptor.getOperands()[0]); 1439 if (isDerivedTypeWithLenParams(boxTy)) { 1440 TODO(embox.getLoc(), 1441 "fir.embox codegen of derived with length parameters"); 1442 return mlir::failure(); 1443 } 1444 auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest); 1445 rewriter.replaceOp(embox, result); 1446 return mlir::success(); 1447 } 1448 }; 1449 1450 /// Create a generic box on a memory reference. 1451 struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> { 1452 using EmboxCommonConversion::EmboxCommonConversion; 1453 1454 mlir::LogicalResult 1455 matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor, 1456 mlir::ConversionPatternRewriter &rewriter) const override { 1457 auto [boxTy, dest, eleSize] = consDescriptorPrefix( 1458 xbox, rewriter, xbox.getOutRank(), 1459 adaptor.getOperands().drop_front(xbox.lenParamOffset())); 1460 // Generate the triples in the dims field of the descriptor 1461 mlir::ValueRange operands = adaptor.getOperands(); 1462 auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64); 1463 mlir::Value base = operands[0]; 1464 assert(!xbox.shape().empty() && "must have a shape"); 1465 unsigned shapeOffset = xbox.shapeOffset(); 1466 bool hasShift = !xbox.shift().empty(); 1467 unsigned shiftOffset = xbox.shiftOffset(); 1468 bool hasSlice = !xbox.slice().empty(); 1469 unsigned sliceOffset = xbox.sliceOffset(); 1470 mlir::Location loc = xbox.getLoc(); 1471 mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0); 1472 mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1); 1473 mlir::Value prevPtrOff = one; 1474 mlir::Type eleTy = boxTy.getEleTy(); 1475 const unsigned rank = xbox.getRank(); 1476 llvm::SmallVector<mlir::Value> gepArgs; 1477 unsigned constRows = 0; 1478 mlir::Value ptrOffset = zero; 1479 mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()); 1480 assert(memEleTy.isa<fir::SequenceType>()); 1481 auto seqTy = memEleTy.cast<fir::SequenceType>(); 1482 mlir::Type seqEleTy = seqTy.getEleTy(); 1483 // Adjust the element scaling factor if the element is a dependent type. 1484 if (fir::hasDynamicSize(seqEleTy)) { 1485 if (auto charTy = seqEleTy.dyn_cast<fir::CharacterType>()) { 1486 assert(xbox.lenParams().size() == 1); 1487 mlir::LLVM::ConstantOp charSize = genConstantIndex( 1488 loc, i64Ty, rewriter, lowerTy().characterBitsize(charTy) / 8); 1489 mlir::Value castedLen = 1490 integerCast(loc, rewriter, i64Ty, operands[xbox.lenParamOffset()]); 1491 auto byteOffset = 1492 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, charSize, castedLen); 1493 prevPtrOff = integerCast(loc, rewriter, i64Ty, byteOffset); 1494 } else if (seqEleTy.isa<fir::RecordType>()) { 1495 // prevPtrOff = ; 1496 TODO(loc, "generate call to calculate size of PDT"); 1497 } else { 1498 fir::emitFatalError(loc, "unexpected dynamic type"); 1499 } 1500 } else { 1501 constRows = seqTy.getConstantRows(); 1502 } 1503 1504 const auto hasSubcomp = !xbox.subcomponent().empty(); 1505 const bool hasSubstr = !xbox.substr().empty(); 1506 /// Compute initial element stride that will be use to compute the step in 1507 /// each dimension. 1508 mlir::Value prevDimByteStride = integerCast(loc, rewriter, i64Ty, eleSize); 1509 if (hasSubcomp) { 1510 // We have a subcomponent. The step value needs to be the number of 1511 // bytes per element (which is a derived type). 1512 auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy)); 1513 prevDimByteStride = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter); 1514 } else if (hasSubstr) { 1515 // We have a substring. The step value needs to be the number of bytes 1516 // per CHARACTER element. 1517 auto charTy = seqEleTy.cast<fir::CharacterType>(); 1518 if (fir::hasDynamicSize(charTy)) { 1519 prevDimByteStride = prevPtrOff; 1520 } else { 1521 prevDimByteStride = genConstantIndex( 1522 loc, i64Ty, rewriter, 1523 charTy.getLen() * lowerTy().characterBitsize(charTy) / 8); 1524 } 1525 } 1526 1527 // Process the array subspace arguments (shape, shift, etc.), if any, 1528 // translating everything to values in the descriptor wherever the entity 1529 // has a dynamic array dimension. 1530 for (unsigned di = 0, descIdx = 0; di < rank; ++di) { 1531 mlir::Value extent = operands[shapeOffset]; 1532 mlir::Value outerExtent = extent; 1533 bool skipNext = false; 1534 if (hasSlice) { 1535 mlir::Value off = operands[sliceOffset]; 1536 mlir::Value adj = one; 1537 if (hasShift) 1538 adj = operands[shiftOffset]; 1539 auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj); 1540 if (constRows > 0) { 1541 gepArgs.push_back(ao); 1542 } else { 1543 auto dimOff = 1544 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff); 1545 ptrOffset = 1546 rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset); 1547 } 1548 if (mlir::isa_and_nonnull<fir::UndefOp>( 1549 xbox.slice()[3 * di + 1].getDefiningOp())) { 1550 // This dimension contains a scalar expression in the array slice op. 1551 // The dimension is loop invariant, will be dropped, and will not 1552 // appear in the descriptor. 1553 skipNext = true; 1554 } 1555 } 1556 if (!skipNext) { 1557 // store extent 1558 if (hasSlice) 1559 extent = computeTripletExtent(rewriter, loc, operands[sliceOffset], 1560 operands[sliceOffset + 1], 1561 operands[sliceOffset + 2], zero, i64Ty); 1562 // Lower bound is normalized to 0 for BIND(C) interoperability. 1563 mlir::Value lb = zero; 1564 const bool isaPointerOrAllocatable = 1565 eleTy.isa<fir::PointerType>() || eleTy.isa<fir::HeapType>(); 1566 // Lower bound is defaults to 1 for POINTER, ALLOCATABLE, and 1567 // denormalized descriptors. 1568 if (isaPointerOrAllocatable || !normalizedLowerBound(xbox)) 1569 lb = one; 1570 // If there is a shifted origin, and no fir.slice, and this is not 1571 // a normalized descriptor then use the value from the shift op as 1572 // the lower bound. 1573 if (hasShift && !(hasSlice || hasSubcomp || hasSubstr) && 1574 (isaPointerOrAllocatable || !normalizedLowerBound(xbox))) { 1575 lb = operands[shiftOffset]; 1576 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1577 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1578 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, 1579 lb); 1580 } 1581 dest = insertLowerBound(rewriter, loc, dest, descIdx, lb); 1582 1583 dest = insertExtent(rewriter, loc, dest, descIdx, extent); 1584 1585 // store step (scaled by shaped extent) 1586 mlir::Value step = prevDimByteStride; 1587 if (hasSlice) 1588 step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step, 1589 operands[sliceOffset + 2]); 1590 dest = insertStride(rewriter, loc, dest, descIdx, step); 1591 ++descIdx; 1592 } 1593 1594 // compute the stride and offset for the next natural dimension 1595 prevDimByteStride = rewriter.create<mlir::LLVM::MulOp>( 1596 loc, i64Ty, prevDimByteStride, outerExtent); 1597 if (constRows == 0) 1598 prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff, 1599 outerExtent); 1600 else 1601 --constRows; 1602 1603 // increment iterators 1604 ++shapeOffset; 1605 if (hasShift) 1606 ++shiftOffset; 1607 if (hasSlice) 1608 sliceOffset += 3; 1609 } 1610 if (hasSlice || hasSubcomp || hasSubstr) { 1611 llvm::SmallVector<mlir::Value> args = {ptrOffset}; 1612 args.append(gepArgs.rbegin(), gepArgs.rend()); 1613 if (hasSubcomp) { 1614 // For each field in the path add the offset to base via the args list. 1615 // In the most general case, some offsets must be computed since 1616 // they are not be known until runtime. 1617 if (fir::hasDynamicSize(fir::unwrapSequenceType( 1618 fir::unwrapPassByRefType(xbox.memref().getType())))) 1619 TODO(loc, "fir.embox codegen dynamic size component in derived type"); 1620 args.append(operands.begin() + xbox.subcomponentOffset(), 1621 operands.begin() + xbox.subcomponentOffset() + 1622 xbox.subcomponent().size()); 1623 } 1624 base = 1625 rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), base, args); 1626 if (hasSubstr) 1627 base = shiftSubstringBase(rewriter, loc, base, 1628 operands[xbox.substrOffset()]); 1629 } 1630 dest = insertBaseAddress(rewriter, loc, dest, base); 1631 if (isDerivedTypeWithLenParams(boxTy)) 1632 TODO(loc, "fir.embox codegen of derived with length parameters"); 1633 1634 mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest); 1635 rewriter.replaceOp(xbox, result); 1636 return mlir::success(); 1637 } 1638 1639 /// Return true if `xbox` has a normalized lower bounds attribute. A box value 1640 /// that is neither a POINTER nor an ALLOCATABLE should be normalized to a 1641 /// zero origin lower bound for interoperability with BIND(C). 1642 inline static bool normalizedLowerBound(fir::cg::XEmboxOp xbox) { 1643 return xbox->hasAttr(fir::getNormalizedLowerBoundAttrName()); 1644 } 1645 }; 1646 1647 /// Create a new box given a box reference. 1648 struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> { 1649 using EmboxCommonConversion::EmboxCommonConversion; 1650 1651 mlir::LogicalResult 1652 matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor, 1653 mlir::ConversionPatternRewriter &rewriter) const override { 1654 mlir::Location loc = rebox.getLoc(); 1655 mlir::Type idxTy = lowerTy().indexType(); 1656 mlir::Value loweredBox = adaptor.getOperands()[0]; 1657 mlir::ValueRange operands = adaptor.getOperands(); 1658 1659 // Create new descriptor and fill its non-shape related data. 1660 llvm::SmallVector<mlir::Value, 2> lenParams; 1661 mlir::Type inputEleTy = getInputEleTy(rebox); 1662 if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) { 1663 mlir::Value len = 1664 loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter); 1665 if (charTy.getFKind() != 1) { 1666 mlir::Value width = 1667 genConstantIndex(loc, idxTy, rewriter, charTy.getFKind()); 1668 len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width); 1669 } 1670 lenParams.emplace_back(len); 1671 } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) { 1672 if (recTy.getNumLenParams() != 0) 1673 TODO(loc, "reboxing descriptor of derived type with length parameters"); 1674 } 1675 auto [boxTy, dest, eleSize] = 1676 consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams); 1677 1678 // Read input extents, strides, and base address 1679 llvm::SmallVector<mlir::Value> inputExtents; 1680 llvm::SmallVector<mlir::Value> inputStrides; 1681 const unsigned inputRank = rebox.getRank(); 1682 for (unsigned i = 0; i < inputRank; ++i) { 1683 mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i); 1684 llvm::SmallVector<mlir::Value, 3> dimInfo = 1685 getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter); 1686 inputExtents.emplace_back(dimInfo[1]); 1687 inputStrides.emplace_back(dimInfo[2]); 1688 } 1689 1690 mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType()); 1691 mlir::Value baseAddr = 1692 loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter); 1693 1694 if (!rebox.slice().empty() || !rebox.subcomponent().empty()) 1695 return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1696 operands, rewriter); 1697 return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1698 operands, rewriter); 1699 } 1700 1701 private: 1702 /// Write resulting shape and base address in descriptor, and replace rebox 1703 /// op. 1704 mlir::LogicalResult 1705 finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1706 mlir::ValueRange lbounds, mlir::ValueRange extents, 1707 mlir::ValueRange strides, 1708 mlir::ConversionPatternRewriter &rewriter) const { 1709 mlir::Location loc = rebox.getLoc(); 1710 mlir::Value zero = 1711 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 1712 mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1); 1713 for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) { 1714 mlir::Value extent = std::get<0>(iter.value()); 1715 unsigned dim = iter.index(); 1716 mlir::Value lb = one; 1717 if (!lbounds.empty()) { 1718 lb = lbounds[dim]; 1719 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1720 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1721 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, lb); 1722 }; 1723 dest = insertLowerBound(rewriter, loc, dest, dim, lb); 1724 dest = insertExtent(rewriter, loc, dest, dim, extent); 1725 dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value())); 1726 } 1727 dest = insertBaseAddress(rewriter, loc, dest, base); 1728 mlir::Value result = 1729 placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest); 1730 rewriter.replaceOp(rebox, result); 1731 return mlir::success(); 1732 } 1733 1734 // Apply slice given the base address, extents and strides of the input box. 1735 mlir::LogicalResult 1736 sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1737 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1738 mlir::ValueRange operands, 1739 mlir::ConversionPatternRewriter &rewriter) const { 1740 mlir::Location loc = rebox.getLoc(); 1741 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1742 mlir::Type idxTy = lowerTy().indexType(); 1743 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1744 // Apply subcomponent and substring shift on base address. 1745 if (!rebox.subcomponent().empty() || !rebox.substr().empty()) { 1746 // Cast to inputEleTy* so that a GEP can be used. 1747 mlir::Type inputEleTy = getInputEleTy(rebox); 1748 auto llvmElePtrTy = 1749 mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy)); 1750 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base); 1751 1752 if (!rebox.subcomponent().empty()) { 1753 llvm::SmallVector<mlir::Value> gepOperands = {zero}; 1754 for (unsigned i = 0; i < rebox.subcomponent().size(); ++i) 1755 gepOperands.push_back(operands[rebox.subcomponentOffset() + i]); 1756 base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands); 1757 } 1758 if (!rebox.substr().empty()) 1759 base = shiftSubstringBase(rewriter, loc, base, 1760 operands[rebox.substrOffset()]); 1761 } 1762 1763 if (rebox.slice().empty()) 1764 // The array section is of the form array[%component][substring], keep 1765 // the input array extents and strides. 1766 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1767 inputExtents, inputStrides, rewriter); 1768 1769 // Strides from the fir.box are in bytes. 1770 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1771 1772 // The slice is of the form array(i:j:k)[%component]. Compute new extents 1773 // and strides. 1774 llvm::SmallVector<mlir::Value> slicedExtents; 1775 llvm::SmallVector<mlir::Value> slicedStrides; 1776 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 1777 const bool sliceHasOrigins = !rebox.shift().empty(); 1778 unsigned sliceOps = rebox.sliceOffset(); 1779 unsigned shiftOps = rebox.shiftOffset(); 1780 auto strideOps = inputStrides.begin(); 1781 const unsigned inputRank = inputStrides.size(); 1782 for (unsigned i = 0; i < inputRank; 1783 ++i, ++strideOps, ++shiftOps, sliceOps += 3) { 1784 mlir::Value sliceLb = 1785 integerCast(loc, rewriter, idxTy, operands[sliceOps]); 1786 mlir::Value inputStride = *strideOps; // already idxTy 1787 // Apply origin shift: base += (lb-shift)*input_stride 1788 mlir::Value sliceOrigin = 1789 sliceHasOrigins 1790 ? integerCast(loc, rewriter, idxTy, operands[shiftOps]) 1791 : one; 1792 mlir::Value diff = 1793 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin); 1794 mlir::Value offset = 1795 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride); 1796 base = genGEP(loc, voidPtrTy, rewriter, base, offset); 1797 // Apply upper bound and step if this is a triplet. Otherwise, the 1798 // dimension is dropped and no extents/strides are computed. 1799 mlir::Value upper = operands[sliceOps + 1]; 1800 const bool isTripletSlice = 1801 !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp()); 1802 if (isTripletSlice) { 1803 mlir::Value step = 1804 integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]); 1805 // extent = ub-lb+step/step 1806 mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper); 1807 mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb, 1808 sliceUb, step, zero, idxTy); 1809 slicedExtents.emplace_back(extent); 1810 // stride = step*input_stride 1811 mlir::Value stride = 1812 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride); 1813 slicedStrides.emplace_back(stride); 1814 } 1815 } 1816 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1817 slicedExtents, slicedStrides, rewriter); 1818 } 1819 1820 /// Apply a new shape to the data described by a box given the base address, 1821 /// extents and strides of the box. 1822 mlir::LogicalResult 1823 reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1824 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1825 mlir::ValueRange operands, 1826 mlir::ConversionPatternRewriter &rewriter) const { 1827 mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(), 1828 operands.begin() + rebox.shiftOffset() + 1829 rebox.shift().size()}; 1830 if (rebox.shape().empty()) { 1831 // Only setting new lower bounds. 1832 return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents, 1833 inputStrides, rewriter); 1834 } 1835 1836 mlir::Location loc = rebox.getLoc(); 1837 // Strides from the fir.box are in bytes. 1838 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1839 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1840 1841 llvm::SmallVector<mlir::Value> newStrides; 1842 llvm::SmallVector<mlir::Value> newExtents; 1843 mlir::Type idxTy = lowerTy().indexType(); 1844 // First stride from input box is kept. The rest is assumed contiguous 1845 // (it is not possible to reshape otherwise). If the input is scalar, 1846 // which may be OK if all new extents are ones, the stride does not 1847 // matter, use one. 1848 mlir::Value stride = inputStrides.empty() 1849 ? genConstantIndex(loc, idxTy, rewriter, 1) 1850 : inputStrides[0]; 1851 for (unsigned i = 0; i < rebox.shape().size(); ++i) { 1852 mlir::Value rawExtent = operands[rebox.shapeOffset() + i]; 1853 mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent); 1854 newExtents.emplace_back(extent); 1855 newStrides.emplace_back(stride); 1856 // nextStride = extent * stride; 1857 stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride); 1858 } 1859 return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides, 1860 rewriter); 1861 } 1862 1863 /// Return scalar element type of the input box. 1864 static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) { 1865 auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType()); 1866 if (auto seqTy = ty.dyn_cast<fir::SequenceType>()) 1867 return seqTy.getEleTy(); 1868 return ty; 1869 } 1870 }; 1871 1872 /// Lower `fir.emboxproc` operation. Creates a procedure box. 1873 /// TODO: Part of supporting Fortran 2003 procedure pointers. 1874 struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> { 1875 using FIROpConversion::FIROpConversion; 1876 1877 mlir::LogicalResult 1878 matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor, 1879 mlir::ConversionPatternRewriter &rewriter) const override { 1880 TODO(emboxproc.getLoc(), "fir.emboxproc codegen"); 1881 return mlir::failure(); 1882 } 1883 }; 1884 1885 // Code shared between insert_value and extract_value Ops. 1886 struct ValueOpCommon { 1887 // Translate the arguments pertaining to any multidimensional array to 1888 // row-major order for LLVM-IR. 1889 static void toRowMajor(llvm::SmallVectorImpl<mlir::Attribute> &attrs, 1890 mlir::Type ty) { 1891 assert(ty && "type is null"); 1892 const auto end = attrs.size(); 1893 for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) { 1894 if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 1895 const auto dim = getDimension(seq); 1896 if (dim > 1) { 1897 auto ub = std::min(i + dim, end); 1898 std::reverse(attrs.begin() + i, attrs.begin() + ub); 1899 i += dim - 1; 1900 } 1901 ty = getArrayElementType(seq); 1902 } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) { 1903 ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()]; 1904 } else { 1905 llvm_unreachable("index into invalid type"); 1906 } 1907 } 1908 } 1909 1910 static llvm::SmallVector<mlir::Attribute> 1911 collectIndices(mlir::ConversionPatternRewriter &rewriter, 1912 mlir::ArrayAttr arrAttr) { 1913 llvm::SmallVector<mlir::Attribute> attrs; 1914 for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) { 1915 if (i->isa<mlir::IntegerAttr>()) { 1916 attrs.push_back(*i); 1917 } else { 1918 auto fieldName = i->cast<mlir::StringAttr>().getValue(); 1919 ++i; 1920 auto ty = i->cast<mlir::TypeAttr>().getValue(); 1921 auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName); 1922 attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index)); 1923 } 1924 } 1925 return attrs; 1926 } 1927 1928 private: 1929 static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) { 1930 unsigned result = 1; 1931 for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>(); 1932 eleTy; 1933 eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>()) 1934 ++result; 1935 return result; 1936 } 1937 1938 static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) { 1939 auto eleTy = ty.getElementType(); 1940 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 1941 eleTy = arrTy.getElementType(); 1942 return eleTy; 1943 } 1944 }; 1945 1946 namespace { 1947 /// Extract a subobject value from an ssa-value of aggregate type 1948 struct ExtractValueOpConversion 1949 : public FIROpAndTypeConversion<fir::ExtractValueOp>, 1950 public ValueOpCommon { 1951 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1952 1953 mlir::LogicalResult 1954 doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor, 1955 mlir::ConversionPatternRewriter &rewriter) const override { 1956 auto attrs = collectIndices(rewriter, extractVal.getCoor()); 1957 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 1958 auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs); 1959 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>( 1960 extractVal, ty, adaptor.getOperands()[0], position); 1961 return mlir::success(); 1962 } 1963 }; 1964 1965 /// InsertValue is the generalized instruction for the composition of new 1966 /// aggregate type values. 1967 struct InsertValueOpConversion 1968 : public FIROpAndTypeConversion<fir::InsertValueOp>, 1969 public ValueOpCommon { 1970 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1971 1972 mlir::LogicalResult 1973 doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor, 1974 mlir::ConversionPatternRewriter &rewriter) const override { 1975 auto attrs = collectIndices(rewriter, insertVal.getCoor()); 1976 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 1977 auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs); 1978 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 1979 insertVal, ty, adaptor.getOperands()[0], adaptor.getOperands()[1], 1980 position); 1981 return mlir::success(); 1982 } 1983 }; 1984 1985 /// InsertOnRange inserts a value into a sequence over a range of offsets. 1986 struct InsertOnRangeOpConversion 1987 : public FIROpAndTypeConversion<fir::InsertOnRangeOp> { 1988 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1989 1990 // Increments an array of subscripts in a row major fasion. 1991 void incrementSubscripts(const llvm::SmallVector<uint64_t> &dims, 1992 llvm::SmallVector<uint64_t> &subscripts) const { 1993 for (size_t i = dims.size(); i > 0; --i) { 1994 if (++subscripts[i - 1] < dims[i - 1]) { 1995 return; 1996 } 1997 subscripts[i - 1] = 0; 1998 } 1999 } 2000 2001 mlir::LogicalResult 2002 doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor, 2003 mlir::ConversionPatternRewriter &rewriter) const override { 2004 2005 llvm::SmallVector<uint64_t> dims; 2006 auto type = adaptor.getOperands()[0].getType(); 2007 2008 // Iteratively extract the array dimensions from the type. 2009 while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 2010 dims.push_back(t.getNumElements()); 2011 type = t.getElementType(); 2012 } 2013 2014 llvm::SmallVector<std::uint64_t> lBounds; 2015 llvm::SmallVector<std::uint64_t> uBounds; 2016 2017 // Unzip the upper and lower bound and convert to a row major format. 2018 mlir::DenseIntElementsAttr coor = range.getCoor(); 2019 auto reversedCoor = llvm::reverse(coor.getValues<int64_t>()); 2020 for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) { 2021 uBounds.push_back(*i++); 2022 lBounds.push_back(*i); 2023 } 2024 2025 auto &subscripts = lBounds; 2026 auto loc = range.getLoc(); 2027 mlir::Value lastOp = adaptor.getOperands()[0]; 2028 mlir::Value insertVal = adaptor.getOperands()[1]; 2029 2030 auto i64Ty = rewriter.getI64Type(); 2031 while (subscripts != uBounds) { 2032 // Convert uint64_t's to Attribute's. 2033 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2034 for (const auto &subscript : subscripts) 2035 subscriptAttrs.push_back(mlir::IntegerAttr::get(i64Ty, subscript)); 2036 lastOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2037 loc, ty, lastOp, insertVal, 2038 mlir::ArrayAttr::get(range.getContext(), subscriptAttrs)); 2039 2040 incrementSubscripts(dims, subscripts); 2041 } 2042 2043 // Convert uint64_t's to Attribute's. 2044 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2045 for (const auto &subscript : subscripts) 2046 subscriptAttrs.push_back( 2047 mlir::IntegerAttr::get(rewriter.getI64Type(), subscript)); 2048 mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs); 2049 2050 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2051 range, ty, lastOp, insertVal, 2052 mlir::ArrayAttr::get(range.getContext(), arrayRef)); 2053 2054 return mlir::success(); 2055 } 2056 }; 2057 } // namespace 2058 2059 namespace { 2060 /// XArrayCoor is the address arithmetic on a dynamically shaped, sliced, 2061 /// shifted etc. array. 2062 /// (See the static restriction on coordinate_of.) array_coor determines the 2063 /// coordinate (location) of a specific element. 2064 struct XArrayCoorOpConversion 2065 : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> { 2066 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2067 2068 mlir::LogicalResult 2069 doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor, 2070 mlir::ConversionPatternRewriter &rewriter) const override { 2071 auto loc = coor.getLoc(); 2072 mlir::ValueRange operands = adaptor.getOperands(); 2073 unsigned rank = coor.getRank(); 2074 assert(coor.indices().size() == rank); 2075 assert(coor.shape().empty() || coor.shape().size() == rank); 2076 assert(coor.shift().empty() || coor.shift().size() == rank); 2077 assert(coor.slice().empty() || coor.slice().size() == 3 * rank); 2078 mlir::Type idxTy = lowerTy().indexType(); 2079 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2080 mlir::Value prevExt = one; 2081 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2082 mlir::Value offset = zero; 2083 const bool isShifted = !coor.shift().empty(); 2084 const bool isSliced = !coor.slice().empty(); 2085 const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>(); 2086 2087 // For each dimension of the array, generate the offset calculation. 2088 for (unsigned i = 0; i < rank; ++i) { 2089 mlir::Value index = 2090 integerCast(loc, rewriter, idxTy, operands[coor.indicesOffset() + i]); 2091 mlir::Value lb = isShifted ? integerCast(loc, rewriter, idxTy, 2092 operands[coor.shiftOffset() + i]) 2093 : one; 2094 mlir::Value step = one; 2095 bool normalSlice = isSliced; 2096 // Compute zero based index in dimension i of the element, applying 2097 // potential triplets and lower bounds. 2098 if (isSliced) { 2099 mlir::Value ub = operands[coor.sliceOffset() + i + 1]; 2100 normalSlice = !mlir::isa_and_nonnull<fir::UndefOp>(ub.getDefiningOp()); 2101 if (normalSlice) 2102 step = integerCast(loc, rewriter, idxTy, 2103 operands[coor.sliceOffset() + i + 2]); 2104 } 2105 auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb); 2106 mlir::Value diff = 2107 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step); 2108 if (normalSlice) { 2109 mlir::Value sliceLb = 2110 integerCast(loc, rewriter, idxTy, operands[coor.sliceOffset() + i]); 2111 auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb); 2112 diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj); 2113 } 2114 // Update the offset given the stride and the zero based index `diff` 2115 // that was just computed. 2116 if (baseIsBoxed) { 2117 // Use stride in bytes from the descriptor. 2118 mlir::Value stride = 2119 loadStrideFromBox(loc, adaptor.getOperands()[0], i, rewriter); 2120 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride); 2121 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2122 } else { 2123 // Use stride computed at last iteration. 2124 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt); 2125 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2126 // Compute next stride assuming contiguity of the base array 2127 // (in element number). 2128 auto nextExt = 2129 integerCast(loc, rewriter, idxTy, operands[coor.shapeOffset() + i]); 2130 prevExt = 2131 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt); 2132 } 2133 } 2134 2135 // Add computed offset to the base address. 2136 if (baseIsBoxed) { 2137 // Working with byte offsets. The base address is read from the fir.box. 2138 // and need to be casted to i8* to do the pointer arithmetic. 2139 mlir::Type baseTy = 2140 getBaseAddrTypeFromBox(adaptor.getOperands()[0].getType()); 2141 mlir::Value base = 2142 loadBaseAddrFromBox(loc, baseTy, adaptor.getOperands()[0], rewriter); 2143 mlir::Type voidPtrTy = getVoidPtrType(); 2144 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2145 llvm::SmallVector<mlir::Value> args{offset}; 2146 auto addr = 2147 rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args); 2148 if (coor.subcomponent().empty()) { 2149 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, baseTy, addr); 2150 return mlir::success(); 2151 } 2152 auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr); 2153 args.clear(); 2154 args.push_back(zero); 2155 if (!coor.lenParams().empty()) { 2156 // If type parameters are present, then we don't want to use a GEPOp 2157 // as below, as the LLVM struct type cannot be statically defined. 2158 TODO(loc, "derived type with type parameters"); 2159 } 2160 // TODO: array offset subcomponents must be converted to LLVM's 2161 // row-major layout here. 2162 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2163 args.push_back(operands[i]); 2164 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, baseTy, casted, 2165 args); 2166 return mlir::success(); 2167 } 2168 2169 // The array was not boxed, so it must be contiguous. offset is therefore an 2170 // element offset and the base type is kept in the GEP unless the element 2171 // type size is itself dynamic. 2172 mlir::Value base; 2173 if (coor.subcomponent().empty()) { 2174 // No subcomponent. 2175 if (!coor.lenParams().empty()) { 2176 // Type parameters. Adjust element size explicitly. 2177 auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType()); 2178 assert(eleTy && "result must be a reference-like type"); 2179 if (fir::characterWithDynamicLen(eleTy)) { 2180 assert(coor.lenParams().size() == 1); 2181 auto bitsInChar = lowerTy().getKindMap().getCharacterBitsize( 2182 eleTy.cast<fir::CharacterType>().getFKind()); 2183 auto scaling = genConstantIndex(loc, idxTy, rewriter, bitsInChar / 8); 2184 auto scaledBySize = 2185 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, scaling); 2186 auto length = 2187 integerCast(loc, rewriter, idxTy, 2188 adaptor.getOperands()[coor.lenParamsOffset()]); 2189 offset = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, scaledBySize, 2190 length); 2191 } else { 2192 TODO(loc, "compute size of derived type with type parameters"); 2193 } 2194 } 2195 // Cast the base address to a pointer to T. 2196 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty, 2197 adaptor.getOperands()[0]); 2198 } else { 2199 // Operand #0 must have a pointer type. For subcomponent slicing, we 2200 // want to cast away the array type and have a plain struct type. 2201 mlir::Type ty0 = adaptor.getOperands()[0].getType(); 2202 auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 2203 assert(ptrTy && "expected pointer type"); 2204 mlir::Type eleTy = ptrTy.getElementType(); 2205 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2206 eleTy = arrTy.getElementType(); 2207 auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy); 2208 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy, 2209 adaptor.getOperands()[0]); 2210 } 2211 llvm::SmallVector<mlir::Value> args = {offset}; 2212 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2213 args.push_back(operands[i]); 2214 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args); 2215 return mlir::success(); 2216 } 2217 }; 2218 } // namespace 2219 2220 /// Convert to (memory) reference to a reference to a subobject. 2221 /// The coordinate_of op is a Swiss army knife operation that can be used on 2222 /// (memory) references to records, arrays, complex, etc. as well as boxes. 2223 /// With unboxed arrays, there is the restriction that the array have a static 2224 /// shape in all but the last column. 2225 struct CoordinateOpConversion 2226 : public FIROpAndTypeConversion<fir::CoordinateOp> { 2227 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2228 2229 mlir::LogicalResult 2230 doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor, 2231 mlir::ConversionPatternRewriter &rewriter) const override { 2232 mlir::ValueRange operands = adaptor.getOperands(); 2233 2234 mlir::Location loc = coor.getLoc(); 2235 mlir::Value base = operands[0]; 2236 mlir::Type baseObjectTy = coor.getBaseType(); 2237 mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2238 assert(objectTy && "fir.coordinate_of expects a reference type"); 2239 2240 // Complex type - basically, extract the real or imaginary part 2241 if (fir::isa_complex(objectTy)) { 2242 mlir::LLVM::ConstantOp c0 = 2243 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2244 llvm::SmallVector<mlir::Value> offs = {c0, operands[1]}; 2245 mlir::Value gep = genGEP(loc, ty, rewriter, base, offs); 2246 rewriter.replaceOp(coor, gep); 2247 return mlir::success(); 2248 } 2249 2250 // Boxed type - get the base pointer from the box 2251 if (baseObjectTy.dyn_cast<fir::BoxType>()) 2252 return doRewriteBox(coor, ty, operands, loc, rewriter); 2253 2254 // Reference, pointer or a heap type 2255 if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType, fir::HeapType>()) 2256 return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter); 2257 2258 return rewriter.notifyMatchFailure( 2259 coor, "fir.coordinate_of base operand has unsupported type"); 2260 } 2261 2262 static unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) { 2263 return fir::hasDynamicSize(ty) 2264 ? op.getDefiningOp() 2265 ->getAttrOfType<mlir::IntegerAttr>("field") 2266 .getInt() 2267 : getIntValue(op); 2268 } 2269 2270 static int64_t getIntValue(mlir::Value val) { 2271 assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value"); 2272 mlir::Operation *defop = val.getDefiningOp(); 2273 2274 if (auto constOp = mlir::dyn_cast<mlir::arith::ConstantIntOp>(defop)) 2275 return constOp.value(); 2276 if (auto llConstOp = mlir::dyn_cast<mlir::LLVM::ConstantOp>(defop)) 2277 if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>()) 2278 return attr.getValue().getSExtValue(); 2279 fir::emitFatalError(val.getLoc(), "must be a constant"); 2280 } 2281 2282 static bool hasSubDimensions(mlir::Type type) { 2283 return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>(); 2284 } 2285 2286 /// Check whether this form of `!fir.coordinate_of` is supported. These 2287 /// additional checks are required, because we are not yet able to convert 2288 /// all valid forms of `!fir.coordinate_of`. 2289 /// TODO: Either implement the unsupported cases or extend the verifier 2290 /// in FIROps.cpp instead. 2291 static bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) { 2292 const std::size_t numOfCoors = coors.size(); 2293 std::size_t i = 0; 2294 bool subEle = false; 2295 bool ptrEle = false; 2296 for (; i < numOfCoors; ++i) { 2297 mlir::Value nxtOpnd = coors[i]; 2298 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2299 subEle = true; 2300 i += arrTy.getDimension() - 1; 2301 type = arrTy.getEleTy(); 2302 } else if (auto recTy = type.dyn_cast<fir::RecordType>()) { 2303 subEle = true; 2304 type = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2305 } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) { 2306 subEle = true; 2307 type = tupTy.getType(getIntValue(nxtOpnd)); 2308 } else { 2309 ptrEle = true; 2310 } 2311 } 2312 if (ptrEle) 2313 return (!subEle) && (numOfCoors == 1); 2314 return subEle && (i >= numOfCoors); 2315 } 2316 2317 /// Walk the abstract memory layout and determine if the path traverses any 2318 /// array types with unknown shape. Return true iff all the array types have a 2319 /// constant shape along the path. 2320 static bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) { 2321 for (std::size_t i = 0, sz = coors.size(); i < sz; ++i) { 2322 mlir::Value nxtOpnd = coors[i]; 2323 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2324 if (fir::sequenceWithNonConstantShape(arrTy)) 2325 return false; 2326 i += arrTy.getDimension() - 1; 2327 type = arrTy.getEleTy(); 2328 } else if (auto strTy = type.dyn_cast<fir::RecordType>()) { 2329 type = strTy.getType(getFieldNumber(strTy, nxtOpnd)); 2330 } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) { 2331 type = strTy.getType(getIntValue(nxtOpnd)); 2332 } else { 2333 return true; 2334 } 2335 } 2336 return true; 2337 } 2338 2339 private: 2340 mlir::LogicalResult 2341 doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands, 2342 mlir::Location loc, 2343 mlir::ConversionPatternRewriter &rewriter) const { 2344 mlir::Type boxObjTy = coor.getBaseType(); 2345 assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`"); 2346 2347 mlir::Value boxBaseAddr = operands[0]; 2348 2349 // 1. SPECIAL CASE (uses `fir.len_param_index`): 2350 // %box = ... : !fir.box<!fir.type<derived{len1:i32}>> 2351 // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}> 2352 // %addr = coordinate_of %box, %lenp 2353 if (coor.getNumOperands() == 2) { 2354 mlir::Operation *coordinateDef = 2355 (*coor.getCoor().begin()).getDefiningOp(); 2356 if (mlir::isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef)) 2357 TODO(loc, 2358 "fir.coordinate_of - fir.len_param_index is not supported yet"); 2359 } 2360 2361 // 2. GENERAL CASE: 2362 // 2.1. (`fir.array`) 2363 // %box = ... : !fix.box<!fir.array<?xU>> 2364 // %idx = ... : index 2365 // %resultAddr = coordinate_of %box, %idx : !fir.ref<U> 2366 // 2.2 (`fir.derived`) 2367 // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>> 2368 // %idx = ... : i32 2369 // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32> 2370 // 2.3 (`fir.derived` inside `fir.array`) 2371 // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32, 2372 // field_2:f32}>>> %idx1 = ... : index %idx2 = ... : i32 %resultAddr = 2373 // coordinate_of %box, %idx1, %idx2 : !fir.ref<f32> 2374 // 2.4. TODO: Either document or disable any other case that the following 2375 // implementation might convert. 2376 mlir::LLVM::ConstantOp c0 = 2377 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2378 mlir::Value resultAddr = 2379 loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()), 2380 boxBaseAddr, rewriter); 2381 // Component Type 2382 auto cpnTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy); 2383 mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext()); 2384 2385 for (unsigned i = 1, last = operands.size(); i < last; ++i) { 2386 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2387 if (i != 1) 2388 TODO(loc, "fir.array nested inside other array and/or derived type"); 2389 // Applies byte strides from the box. Ignore lower bound from box 2390 // since fir.coordinate_of indexes are zero based. Lowering takes care 2391 // of lower bound aspects. This both accounts for dynamically sized 2392 // types and non contiguous arrays. 2393 auto idxTy = lowerTy().indexType(); 2394 mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0); 2395 for (unsigned index = i, lastIndex = i + arrTy.getDimension(); 2396 index < lastIndex; ++index) { 2397 mlir::Value stride = 2398 loadStrideFromBox(loc, operands[0], index - i, rewriter); 2399 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, 2400 operands[index], stride); 2401 off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off); 2402 } 2403 auto voidPtrBase = 2404 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr); 2405 llvm::SmallVector<mlir::Value> args = {off}; 2406 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, 2407 voidPtrBase, args); 2408 i += arrTy.getDimension() - 1; 2409 cpnTy = arrTy.getEleTy(); 2410 } else if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) { 2411 auto recRefTy = 2412 mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy)); 2413 mlir::Value nxtOpnd = operands[i]; 2414 auto memObj = 2415 rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr); 2416 llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd}; 2417 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2418 auto llvmCurrentObjTy = lowerTy().convertType(cpnTy); 2419 auto gep = rewriter.create<mlir::LLVM::GEPOp>( 2420 loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj, 2421 args); 2422 resultAddr = 2423 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep); 2424 } else { 2425 fir::emitFatalError(loc, "unexpected type in coordinate_of"); 2426 } 2427 } 2428 2429 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr); 2430 return mlir::success(); 2431 } 2432 2433 mlir::LogicalResult 2434 doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty, 2435 mlir::ValueRange operands, mlir::Location loc, 2436 mlir::ConversionPatternRewriter &rewriter) const { 2437 mlir::Type baseObjectTy = coor.getBaseType(); 2438 2439 // Component Type 2440 mlir::Type cpnTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2441 bool hasSubdimension = hasSubDimensions(cpnTy); 2442 bool columnIsDeferred = !hasSubdimension; 2443 2444 if (!supportedCoordinate(cpnTy, operands.drop_front(1))) 2445 TODO(loc, "unsupported combination of coordinate operands"); 2446 2447 const bool hasKnownShape = 2448 arraysHaveKnownShape(cpnTy, operands.drop_front(1)); 2449 2450 // If only the column is `?`, then we can simply place the column value in 2451 // the 0-th GEP position. 2452 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2453 if (!hasKnownShape) { 2454 const unsigned sz = arrTy.getDimension(); 2455 if (arraysHaveKnownShape(arrTy.getEleTy(), 2456 operands.drop_front(1 + sz))) { 2457 fir::SequenceType::ShapeRef shape = arrTy.getShape(); 2458 bool allConst = true; 2459 for (unsigned i = 0; i < sz - 1; ++i) { 2460 if (shape[i] < 0) { 2461 allConst = false; 2462 break; 2463 } 2464 } 2465 if (allConst) 2466 columnIsDeferred = true; 2467 } 2468 } 2469 } 2470 2471 if (fir::hasDynamicSize(fir::unwrapSequenceType(cpnTy))) 2472 return mlir::emitError( 2473 loc, "fir.coordinate_of with a dynamic element size is unsupported"); 2474 2475 if (hasKnownShape || columnIsDeferred) { 2476 llvm::SmallVector<mlir::Value> offs; 2477 if (hasKnownShape && hasSubdimension) { 2478 mlir::LLVM::ConstantOp c0 = 2479 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2480 offs.push_back(c0); 2481 } 2482 llvm::Optional<int> dims; 2483 llvm::SmallVector<mlir::Value> arrIdx; 2484 for (std::size_t i = 1, sz = operands.size(); i < sz; ++i) { 2485 mlir::Value nxtOpnd = operands[i]; 2486 2487 if (!cpnTy) 2488 return mlir::emitError(loc, "invalid coordinate/check failed"); 2489 2490 // check if the i-th coordinate relates to an array 2491 if (dims.hasValue()) { 2492 arrIdx.push_back(nxtOpnd); 2493 int dimsLeft = *dims; 2494 if (dimsLeft > 1) { 2495 dims = dimsLeft - 1; 2496 continue; 2497 } 2498 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2499 // append array range in reverse (FIR arrays are column-major) 2500 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2501 arrIdx.clear(); 2502 dims.reset(); 2503 continue; 2504 } 2505 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2506 int d = arrTy.getDimension() - 1; 2507 if (d > 0) { 2508 dims = d; 2509 arrIdx.push_back(nxtOpnd); 2510 continue; 2511 } 2512 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2513 offs.push_back(nxtOpnd); 2514 continue; 2515 } 2516 2517 // check if the i-th coordinate relates to a field 2518 if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) 2519 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2520 else if (auto tupTy = cpnTy.dyn_cast<mlir::TupleType>()) 2521 cpnTy = tupTy.getType(getIntValue(nxtOpnd)); 2522 else 2523 cpnTy = nullptr; 2524 2525 offs.push_back(nxtOpnd); 2526 } 2527 if (dims.hasValue()) 2528 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2529 mlir::Value base = operands[0]; 2530 mlir::Value retval = genGEP(loc, ty, rewriter, base, offs); 2531 rewriter.replaceOp(coor, retval); 2532 return mlir::success(); 2533 } 2534 2535 return mlir::emitError( 2536 loc, "fir.coordinate_of base operand has unsupported type"); 2537 } 2538 }; 2539 2540 /// Convert `fir.field_index`. The conversion depends on whether the size of 2541 /// the record is static or dynamic. 2542 struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> { 2543 using FIROpConversion::FIROpConversion; 2544 2545 // NB: most field references should be resolved by this point 2546 mlir::LogicalResult 2547 matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor, 2548 mlir::ConversionPatternRewriter &rewriter) const override { 2549 auto recTy = field.getOnType().cast<fir::RecordType>(); 2550 unsigned index = recTy.getFieldIndex(field.getFieldId()); 2551 2552 if (!fir::hasDynamicSize(recTy)) { 2553 // Derived type has compile-time constant layout. Return index of the 2554 // component type in the parent type (to be used in GEP). 2555 rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset( 2556 field.getLoc(), rewriter, index)}); 2557 return mlir::success(); 2558 } 2559 2560 // Derived type has compile-time constant layout. Call the compiler 2561 // generated function to determine the byte offset of the field at runtime. 2562 // This returns a non-constant. 2563 mlir::FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get( 2564 field.getContext(), getOffsetMethodName(recTy, field.getFieldId())); 2565 mlir::NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr); 2566 mlir::NamedAttribute fieldAttr = rewriter.getNamedAttr( 2567 "field", mlir::IntegerAttr::get(lowerTy().indexType(), index)); 2568 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 2569 field, lowerTy().offsetType(), adaptor.getOperands(), 2570 llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr}); 2571 return mlir::success(); 2572 } 2573 2574 // Re-Construct the name of the compiler generated method that calculates the 2575 // offset 2576 inline static std::string getOffsetMethodName(fir::RecordType recTy, 2577 llvm::StringRef field) { 2578 return recTy.getName().str() + "P." + field.str() + ".offset"; 2579 } 2580 }; 2581 2582 /// Convert `fir.end` 2583 struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> { 2584 using FIROpConversion::FIROpConversion; 2585 2586 mlir::LogicalResult 2587 matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor, 2588 mlir::ConversionPatternRewriter &rewriter) const override { 2589 TODO(firEnd.getLoc(), "fir.end codegen"); 2590 return mlir::failure(); 2591 } 2592 }; 2593 2594 /// Lower `fir.gentypedesc` to a global constant. 2595 struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> { 2596 using FIROpConversion::FIROpConversion; 2597 2598 mlir::LogicalResult 2599 matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor, 2600 mlir::ConversionPatternRewriter &rewriter) const override { 2601 TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen"); 2602 return mlir::failure(); 2603 } 2604 }; 2605 2606 /// Lower `fir.has_value` operation to `llvm.return` operation. 2607 struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> { 2608 using FIROpConversion::FIROpConversion; 2609 2610 mlir::LogicalResult 2611 matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor, 2612 mlir::ConversionPatternRewriter &rewriter) const override { 2613 rewriter.replaceOpWithNewOp<mlir::LLVM::ReturnOp>(op, 2614 adaptor.getOperands()); 2615 return mlir::success(); 2616 } 2617 }; 2618 2619 /// Lower `fir.global` operation to `llvm.global` operation. 2620 /// `fir.insert_on_range` operations are replaced with constant dense attribute 2621 /// if they are applied on the full range. 2622 struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> { 2623 using FIROpConversion::FIROpConversion; 2624 2625 mlir::LogicalResult 2626 matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor, 2627 mlir::ConversionPatternRewriter &rewriter) const override { 2628 auto tyAttr = convertType(global.getType()); 2629 if (global.getType().isa<fir::BoxType>()) 2630 tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType(); 2631 auto loc = global.getLoc(); 2632 mlir::Attribute initAttr; 2633 if (global.getInitVal()) 2634 initAttr = global.getInitVal().getValue(); 2635 auto linkage = convertLinkage(global.getLinkName()); 2636 auto isConst = global.getConstant().hasValue(); 2637 auto g = rewriter.create<mlir::LLVM::GlobalOp>( 2638 loc, tyAttr, isConst, linkage, global.getSymName(), initAttr); 2639 auto &gr = g.getInitializerRegion(); 2640 rewriter.inlineRegionBefore(global.getRegion(), gr, gr.end()); 2641 if (!gr.empty()) { 2642 // Replace insert_on_range with a constant dense attribute if the 2643 // initialization is on the full range. 2644 auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>(); 2645 for (auto insertOp : insertOnRangeOps) { 2646 if (isFullRange(insertOp.getCoor(), insertOp.getType())) { 2647 auto seqTyAttr = convertType(insertOp.getType()); 2648 auto *op = insertOp.getVal().getDefiningOp(); 2649 auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op); 2650 if (!constant) { 2651 auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op); 2652 if (!convertOp) 2653 continue; 2654 constant = mlir::cast<mlir::arith::ConstantOp>( 2655 convertOp.getValue().getDefiningOp()); 2656 } 2657 mlir::Type vecType = mlir::VectorType::get( 2658 insertOp.getType().getShape(), constant.getType()); 2659 auto denseAttr = mlir::DenseElementsAttr::get( 2660 vecType.cast<mlir::ShapedType>(), constant.getValue()); 2661 rewriter.setInsertionPointAfter(insertOp); 2662 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>( 2663 insertOp, seqTyAttr, denseAttr); 2664 } 2665 } 2666 } 2667 rewriter.eraseOp(global); 2668 return mlir::success(); 2669 } 2670 2671 bool isFullRange(mlir::DenseIntElementsAttr indexes, 2672 fir::SequenceType seqTy) const { 2673 auto extents = seqTy.getShape(); 2674 if (indexes.size() / 2 != static_cast<int64_t>(extents.size())) 2675 return false; 2676 auto cur_index = indexes.value_begin<int64_t>(); 2677 for (unsigned i = 0; i < indexes.size(); i += 2) { 2678 if (*(cur_index++) != 0) 2679 return false; 2680 if (*(cur_index++) != extents[i / 2] - 1) 2681 return false; 2682 } 2683 return true; 2684 } 2685 2686 // TODO: String comparaison should be avoided. Replace linkName with an 2687 // enumeration. 2688 mlir::LLVM::Linkage 2689 convertLinkage(llvm::Optional<llvm::StringRef> optLinkage) const { 2690 if (optLinkage.hasValue()) { 2691 auto name = optLinkage.getValue(); 2692 if (name == "internal") 2693 return mlir::LLVM::Linkage::Internal; 2694 if (name == "linkonce") 2695 return mlir::LLVM::Linkage::Linkonce; 2696 if (name == "linkonce_odr") 2697 return mlir::LLVM::Linkage::LinkonceODR; 2698 if (name == "common") 2699 return mlir::LLVM::Linkage::Common; 2700 if (name == "weak") 2701 return mlir::LLVM::Linkage::Weak; 2702 } 2703 return mlir::LLVM::Linkage::External; 2704 } 2705 }; 2706 2707 /// `fir.load` --> `llvm.load` 2708 struct LoadOpConversion : public FIROpConversion<fir::LoadOp> { 2709 using FIROpConversion::FIROpConversion; 2710 2711 mlir::LogicalResult 2712 matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor, 2713 mlir::ConversionPatternRewriter &rewriter) const override { 2714 // fir.box is a special case because it is considered as an ssa values in 2715 // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box> 2716 // and fir.box end up being the same llvm types and loading a 2717 // fir.ref<fir.box> is actually a no op in LLVM. 2718 if (load.getType().isa<fir::BoxType>()) { 2719 rewriter.replaceOp(load, adaptor.getOperands()[0]); 2720 } else { 2721 rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>( 2722 load, convertType(load.getType()), adaptor.getOperands(), 2723 load->getAttrs()); 2724 } 2725 return mlir::success(); 2726 } 2727 }; 2728 2729 /// Lower `fir.no_reassoc` to LLVM IR dialect. 2730 /// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast 2731 /// math flags? 2732 struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> { 2733 using FIROpConversion::FIROpConversion; 2734 2735 mlir::LogicalResult 2736 matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor, 2737 mlir::ConversionPatternRewriter &rewriter) const override { 2738 rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]); 2739 return mlir::success(); 2740 } 2741 }; 2742 2743 static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest, 2744 llvm::Optional<mlir::ValueRange> destOps, 2745 mlir::ConversionPatternRewriter &rewriter, 2746 mlir::Block *newBlock) { 2747 if (destOps.hasValue()) 2748 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, destOps.getValue(), 2749 newBlock, mlir::ValueRange()); 2750 else 2751 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock); 2752 } 2753 2754 template <typename A, typename B> 2755 static void genBrOp(A caseOp, mlir::Block *dest, llvm::Optional<B> destOps, 2756 mlir::ConversionPatternRewriter &rewriter) { 2757 if (destOps.hasValue()) 2758 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, destOps.getValue(), 2759 dest); 2760 else 2761 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest); 2762 } 2763 2764 static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp, 2765 mlir::Block *dest, 2766 llvm::Optional<mlir::ValueRange> destOps, 2767 mlir::ConversionPatternRewriter &rewriter) { 2768 auto *thisBlock = rewriter.getInsertionBlock(); 2769 auto *newBlock = createBlock(rewriter, dest); 2770 rewriter.setInsertionPointToEnd(thisBlock); 2771 genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock); 2772 rewriter.setInsertionPointToEnd(newBlock); 2773 } 2774 2775 /// Conversion of `fir.select_case` 2776 /// 2777 /// The `fir.select_case` operation is converted to a if-then-else ladder. 2778 /// Depending on the case condition type, one or several comparison and 2779 /// conditional branching can be generated. 2780 /// 2781 /// A a point value case such as `case(4)`, a lower bound case such as 2782 /// `case(5:)` or an upper bound case such as `case(:3)` are converted to a 2783 /// simple comparison between the selector value and the constant value in the 2784 /// case. The block associated with the case condition is then executed if 2785 /// the comparison succeed otherwise it branch to the next block with the 2786 /// comparison for the the next case conditon. 2787 /// 2788 /// A closed interval case condition such as `case(7:10)` is converted with a 2789 /// first comparison and conditional branching for the lower bound. If 2790 /// successful, it branch to a second block with the comparison for the 2791 /// upper bound in the same case condition. 2792 /// 2793 /// TODO: lowering of CHARACTER type cases is not handled yet. 2794 struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> { 2795 using FIROpConversion::FIROpConversion; 2796 2797 mlir::LogicalResult 2798 matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor, 2799 mlir::ConversionPatternRewriter &rewriter) const override { 2800 unsigned conds = caseOp.getNumConditions(); 2801 llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue(); 2802 // Type can be CHARACTER, INTEGER, or LOGICAL (C1145) 2803 auto ty = caseOp.getSelector().getType(); 2804 if (ty.isa<fir::CharacterType>()) { 2805 TODO(caseOp.getLoc(), "fir.select_case codegen with character type"); 2806 return mlir::failure(); 2807 } 2808 mlir::Value selector = caseOp.getSelector(adaptor.getOperands()); 2809 auto loc = caseOp.getLoc(); 2810 for (unsigned t = 0; t != conds; ++t) { 2811 mlir::Block *dest = caseOp.getSuccessor(t); 2812 llvm::Optional<mlir::ValueRange> destOps = 2813 caseOp.getSuccessorOperands(adaptor.getOperands(), t); 2814 llvm::Optional<mlir::ValueRange> cmpOps = 2815 *caseOp.getCompareOperands(adaptor.getOperands(), t); 2816 mlir::Value caseArg = *(cmpOps.getValue().begin()); 2817 mlir::Attribute attr = cases[t]; 2818 if (attr.isa<fir::PointIntervalAttr>()) { 2819 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2820 loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg); 2821 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2822 continue; 2823 } 2824 if (attr.isa<fir::LowerBoundAttr>()) { 2825 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2826 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2827 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2828 continue; 2829 } 2830 if (attr.isa<fir::UpperBoundAttr>()) { 2831 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2832 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg); 2833 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2834 continue; 2835 } 2836 if (attr.isa<fir::ClosedIntervalAttr>()) { 2837 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2838 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2839 auto *thisBlock = rewriter.getInsertionBlock(); 2840 auto *newBlock1 = createBlock(rewriter, dest); 2841 auto *newBlock2 = createBlock(rewriter, dest); 2842 rewriter.setInsertionPointToEnd(thisBlock); 2843 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2); 2844 rewriter.setInsertionPointToEnd(newBlock1); 2845 mlir::Value caseArg0 = *(cmpOps.getValue().begin() + 1); 2846 auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>( 2847 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0); 2848 genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2); 2849 rewriter.setInsertionPointToEnd(newBlock2); 2850 continue; 2851 } 2852 assert(attr.isa<mlir::UnitAttr>()); 2853 assert((t + 1 == conds) && "unit must be last"); 2854 genBrOp(caseOp, dest, destOps, rewriter); 2855 } 2856 return mlir::success(); 2857 } 2858 }; 2859 2860 template <typename OP> 2861 static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select, 2862 typename OP::Adaptor adaptor, 2863 mlir::ConversionPatternRewriter &rewriter) { 2864 unsigned conds = select.getNumConditions(); 2865 auto cases = select.getCases().getValue(); 2866 mlir::Value selector = adaptor.getSelector(); 2867 auto loc = select.getLoc(); 2868 assert(conds > 0 && "select must have cases"); 2869 2870 llvm::SmallVector<mlir::Block *> destinations; 2871 llvm::SmallVector<mlir::ValueRange> destinationsOperands; 2872 mlir::Block *defaultDestination; 2873 mlir::ValueRange defaultOperands; 2874 llvm::SmallVector<int32_t> caseValues; 2875 2876 for (unsigned t = 0; t != conds; ++t) { 2877 mlir::Block *dest = select.getSuccessor(t); 2878 auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t); 2879 const mlir::Attribute &attr = cases[t]; 2880 if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) { 2881 destinations.push_back(dest); 2882 destinationsOperands.push_back(destOps.hasValue() ? *destOps 2883 : mlir::ValueRange{}); 2884 caseValues.push_back(intAttr.getInt()); 2885 continue; 2886 } 2887 assert(attr.template dyn_cast_or_null<mlir::UnitAttr>()); 2888 assert((t + 1 == conds) && "unit must be last"); 2889 defaultDestination = dest; 2890 defaultOperands = destOps.hasValue() ? *destOps : mlir::ValueRange{}; 2891 } 2892 2893 // LLVM::SwitchOp takes a i32 type for the selector. 2894 if (select.getSelector().getType() != rewriter.getI32Type()) 2895 selector = rewriter.create<mlir::LLVM::TruncOp>(loc, rewriter.getI32Type(), 2896 selector); 2897 2898 rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>( 2899 select, selector, 2900 /*defaultDestination=*/defaultDestination, 2901 /*defaultOperands=*/defaultOperands, 2902 /*caseValues=*/caseValues, 2903 /*caseDestinations=*/destinations, 2904 /*caseOperands=*/destinationsOperands, 2905 /*branchWeights=*/llvm::ArrayRef<std::int32_t>()); 2906 } 2907 2908 /// conversion of fir::SelectOp to an if-then-else ladder 2909 struct SelectOpConversion : public FIROpConversion<fir::SelectOp> { 2910 using FIROpConversion::FIROpConversion; 2911 2912 mlir::LogicalResult 2913 matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor, 2914 mlir::ConversionPatternRewriter &rewriter) const override { 2915 selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter); 2916 return mlir::success(); 2917 } 2918 }; 2919 2920 /// conversion of fir::SelectRankOp to an if-then-else ladder 2921 struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> { 2922 using FIROpConversion::FIROpConversion; 2923 2924 mlir::LogicalResult 2925 matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor, 2926 mlir::ConversionPatternRewriter &rewriter) const override { 2927 selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter); 2928 return mlir::success(); 2929 } 2930 }; 2931 2932 /// Lower `fir.select_type` to LLVM IR dialect. 2933 struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> { 2934 using FIROpConversion::FIROpConversion; 2935 2936 mlir::LogicalResult 2937 matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor, 2938 mlir::ConversionPatternRewriter &rewriter) const override { 2939 mlir::emitError(select.getLoc(), 2940 "fir.select_type should have already been converted"); 2941 return mlir::failure(); 2942 } 2943 }; 2944 2945 /// `fir.store` --> `llvm.store` 2946 struct StoreOpConversion : public FIROpConversion<fir::StoreOp> { 2947 using FIROpConversion::FIROpConversion; 2948 2949 mlir::LogicalResult 2950 matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor, 2951 mlir::ConversionPatternRewriter &rewriter) const override { 2952 if (store.getValue().getType().isa<fir::BoxType>()) { 2953 // fir.box value is actually in memory, load it first before storing it. 2954 mlir::Location loc = store.getLoc(); 2955 mlir::Type boxPtrTy = adaptor.getOperands()[0].getType(); 2956 auto val = rewriter.create<mlir::LLVM::LoadOp>( 2957 loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(), 2958 adaptor.getOperands()[0]); 2959 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2960 store, val, adaptor.getOperands()[1]); 2961 } else { 2962 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2963 store, adaptor.getOperands()[0], adaptor.getOperands()[1]); 2964 } 2965 return mlir::success(); 2966 } 2967 }; 2968 2969 namespace { 2970 2971 /// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for 2972 /// the character buffer and one for the buffer length. 2973 struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> { 2974 using FIROpConversion::FIROpConversion; 2975 2976 mlir::LogicalResult 2977 matchAndRewrite(fir::UnboxCharOp unboxchar, OpAdaptor adaptor, 2978 mlir::ConversionPatternRewriter &rewriter) const override { 2979 auto *ctx = unboxchar.getContext(); 2980 2981 mlir::Type lenTy = convertType(unboxchar.getType(1)); 2982 mlir::Value tuple = adaptor.getOperands()[0]; 2983 mlir::Type tupleTy = tuple.getType(); 2984 2985 mlir::Location loc = unboxchar.getLoc(); 2986 mlir::Value ptrToBuffer = 2987 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 0); 2988 2989 mlir::LLVM::ExtractValueOp len = 2990 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 1); 2991 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, len); 2992 2993 rewriter.replaceOp(unboxchar, 2994 llvm::ArrayRef<mlir::Value>{ptrToBuffer, lenAfterCast}); 2995 return mlir::success(); 2996 } 2997 }; 2998 2999 /// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its 3000 /// components. 3001 /// TODO: Part of supporting Fortran 2003 procedure pointers. 3002 struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> { 3003 using FIROpConversion::FIROpConversion; 3004 3005 mlir::LogicalResult 3006 matchAndRewrite(fir::UnboxProcOp unboxproc, OpAdaptor adaptor, 3007 mlir::ConversionPatternRewriter &rewriter) const override { 3008 TODO(unboxproc.getLoc(), "fir.unboxproc codegen"); 3009 return mlir::failure(); 3010 } 3011 }; 3012 3013 /// convert to LLVM IR dialect `undef` 3014 struct UndefOpConversion : public FIROpConversion<fir::UndefOp> { 3015 using FIROpConversion::FIROpConversion; 3016 3017 mlir::LogicalResult 3018 matchAndRewrite(fir::UndefOp undef, OpAdaptor, 3019 mlir::ConversionPatternRewriter &rewriter) const override { 3020 rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>( 3021 undef, convertType(undef.getType())); 3022 return mlir::success(); 3023 } 3024 }; 3025 3026 struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> { 3027 using FIROpConversion::FIROpConversion; 3028 3029 mlir::LogicalResult 3030 matchAndRewrite(fir::ZeroOp zero, OpAdaptor, 3031 mlir::ConversionPatternRewriter &rewriter) const override { 3032 mlir::Type ty = convertType(zero.getType()); 3033 if (ty.isa<mlir::LLVM::LLVMPointerType>()) { 3034 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty); 3035 } else if (ty.isa<mlir::IntegerType>()) { 3036 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3037 zero, ty, mlir::IntegerAttr::get(zero.getType(), 0)); 3038 } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) { 3039 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3040 zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0)); 3041 } else { 3042 // TODO: create ConstantAggregateZero for FIR aggregate/array types. 3043 return rewriter.notifyMatchFailure( 3044 zero, 3045 "conversion of fir.zero with aggregate type not implemented yet"); 3046 } 3047 return mlir::success(); 3048 } 3049 }; 3050 3051 /// `fir.unreachable` --> `llvm.unreachable` 3052 struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> { 3053 using FIROpConversion::FIROpConversion; 3054 3055 mlir::LogicalResult 3056 matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor, 3057 mlir::ConversionPatternRewriter &rewriter) const override { 3058 rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach); 3059 return mlir::success(); 3060 } 3061 }; 3062 3063 /// `fir.is_present` --> 3064 /// ``` 3065 /// %0 = llvm.mlir.constant(0 : i64) 3066 /// %1 = llvm.ptrtoint %0 3067 /// %2 = llvm.icmp "ne" %1, %0 : i64 3068 /// ``` 3069 struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> { 3070 using FIROpConversion::FIROpConversion; 3071 3072 mlir::LogicalResult 3073 matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor, 3074 mlir::ConversionPatternRewriter &rewriter) const override { 3075 mlir::Type idxTy = lowerTy().indexType(); 3076 mlir::Location loc = isPresent.getLoc(); 3077 auto ptr = adaptor.getOperands()[0]; 3078 3079 if (isPresent.getVal().getType().isa<fir::BoxCharType>()) { 3080 auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>(); 3081 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3082 3083 mlir::Type ty = structTy.getBody()[0]; 3084 mlir::MLIRContext *ctx = isPresent.getContext(); 3085 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3086 ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0); 3087 } 3088 mlir::LLVM::ConstantOp c0 = 3089 genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0); 3090 auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr); 3091 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 3092 isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0); 3093 3094 return mlir::success(); 3095 } 3096 }; 3097 3098 /// Create value signaling an absent optional argument in a call, e.g. 3099 /// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>` 3100 struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> { 3101 using FIROpConversion::FIROpConversion; 3102 3103 mlir::LogicalResult 3104 matchAndRewrite(fir::AbsentOp absent, OpAdaptor, 3105 mlir::ConversionPatternRewriter &rewriter) const override { 3106 mlir::Type ty = convertType(absent.getType()); 3107 mlir::Location loc = absent.getLoc(); 3108 3109 if (absent.getType().isa<fir::BoxCharType>()) { 3110 auto structTy = ty.cast<mlir::LLVM::LLVMStructType>(); 3111 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3112 auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3113 auto nullField = 3114 rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]); 3115 mlir::MLIRContext *ctx = absent.getContext(); 3116 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3117 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 3118 absent, ty, undefStruct, nullField, c0); 3119 } else { 3120 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty); 3121 } 3122 return mlir::success(); 3123 } 3124 }; 3125 3126 // 3127 // Primitive operations on Complex types 3128 // 3129 3130 /// Generate inline code for complex addition/subtraction 3131 template <typename LLVMOP, typename OPTY> 3132 static mlir::LLVM::InsertValueOp 3133 complexSum(OPTY sumop, mlir::ValueRange opnds, 3134 mlir::ConversionPatternRewriter &rewriter, 3135 fir::LLVMTypeConverter &lowering) { 3136 mlir::Value a = opnds[0]; 3137 mlir::Value b = opnds[1]; 3138 auto loc = sumop.getLoc(); 3139 auto ctx = sumop.getContext(); 3140 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3141 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3142 mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType())); 3143 mlir::Type ty = lowering.convertType(sumop.getType()); 3144 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3145 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3146 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3147 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3148 auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1); 3149 auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1); 3150 auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3151 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0); 3152 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1); 3153 } 3154 } // namespace 3155 3156 namespace { 3157 struct AddcOpConversion : public FIROpConversion<fir::AddcOp> { 3158 using FIROpConversion::FIROpConversion; 3159 3160 mlir::LogicalResult 3161 matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor, 3162 mlir::ConversionPatternRewriter &rewriter) const override { 3163 // given: (x + iy) + (x' + iy') 3164 // result: (x + x') + i(y + y') 3165 auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(), 3166 rewriter, lowerTy()); 3167 rewriter.replaceOp(addc, r.getResult()); 3168 return mlir::success(); 3169 } 3170 }; 3171 3172 struct SubcOpConversion : public FIROpConversion<fir::SubcOp> { 3173 using FIROpConversion::FIROpConversion; 3174 3175 mlir::LogicalResult 3176 matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor, 3177 mlir::ConversionPatternRewriter &rewriter) const override { 3178 // given: (x + iy) - (x' + iy') 3179 // result: (x - x') + i(y - y') 3180 auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(), 3181 rewriter, lowerTy()); 3182 rewriter.replaceOp(subc, r.getResult()); 3183 return mlir::success(); 3184 } 3185 }; 3186 3187 /// Inlined complex multiply 3188 struct MulcOpConversion : public FIROpConversion<fir::MulcOp> { 3189 using FIROpConversion::FIROpConversion; 3190 3191 mlir::LogicalResult 3192 matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor, 3193 mlir::ConversionPatternRewriter &rewriter) const override { 3194 // TODO: Can we use a call to __muldc3 ? 3195 // given: (x + iy) * (x' + iy') 3196 // result: (xx'-yy')+i(xy'+yx') 3197 mlir::Value a = adaptor.getOperands()[0]; 3198 mlir::Value b = adaptor.getOperands()[1]; 3199 auto loc = mulc.getLoc(); 3200 auto *ctx = mulc.getContext(); 3201 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3202 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3203 mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType())); 3204 mlir::Type ty = convertType(mulc.getType()); 3205 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3206 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3207 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3208 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3209 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3210 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3211 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3212 auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx); 3213 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3214 auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy); 3215 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3216 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3217 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3218 rewriter.replaceOp(mulc, r0.getResult()); 3219 return mlir::success(); 3220 } 3221 }; 3222 3223 /// Inlined complex division 3224 struct DivcOpConversion : public FIROpConversion<fir::DivcOp> { 3225 using FIROpConversion::FIROpConversion; 3226 3227 mlir::LogicalResult 3228 matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor, 3229 mlir::ConversionPatternRewriter &rewriter) const override { 3230 // TODO: Can we use a call to __divdc3 instead? 3231 // Just generate inline code for now. 3232 // given: (x + iy) / (x' + iy') 3233 // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y' 3234 mlir::Value a = adaptor.getOperands()[0]; 3235 mlir::Value b = adaptor.getOperands()[1]; 3236 auto loc = divc.getLoc(); 3237 auto *ctx = divc.getContext(); 3238 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3239 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3240 mlir::Type eleTy = convertType(getComplexEleTy(divc.getType())); 3241 mlir::Type ty = convertType(divc.getType()); 3242 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3243 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3244 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3245 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3246 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3247 auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1); 3248 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3249 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3250 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3251 auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1); 3252 auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1); 3253 auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy); 3254 auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy); 3255 auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d); 3256 auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d); 3257 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3258 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3259 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3260 rewriter.replaceOp(divc, r0.getResult()); 3261 return mlir::success(); 3262 } 3263 }; 3264 3265 /// Inlined complex negation 3266 struct NegcOpConversion : public FIROpConversion<fir::NegcOp> { 3267 using FIROpConversion::FIROpConversion; 3268 3269 mlir::LogicalResult 3270 matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor, 3271 mlir::ConversionPatternRewriter &rewriter) const override { 3272 // given: -(x + iy) 3273 // result: -x - iy 3274 auto *ctxt = neg.getContext(); 3275 auto eleTy = convertType(getComplexEleTy(neg.getType())); 3276 auto ty = convertType(neg.getType()); 3277 auto loc = neg.getLoc(); 3278 mlir::Value o0 = adaptor.getOperands()[0]; 3279 auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 3280 auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 3281 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0); 3282 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1); 3283 auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp); 3284 auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip); 3285 auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0); 3286 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1); 3287 return mlir::success(); 3288 } 3289 }; 3290 3291 /// Conversion pattern for operation that must be dead. The information in these 3292 /// operations is used by other operation. At this point they should not have 3293 /// anymore uses. 3294 /// These operations are normally dead after the pre-codegen pass. 3295 template <typename FromOp> 3296 struct MustBeDeadConversion : public FIROpConversion<FromOp> { 3297 explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering, 3298 const fir::FIRToLLVMPassOptions &options) 3299 : FIROpConversion<FromOp>(lowering, options) {} 3300 using OpAdaptor = typename FromOp::Adaptor; 3301 3302 mlir::LogicalResult 3303 matchAndRewrite(FromOp op, OpAdaptor adaptor, 3304 mlir::ConversionPatternRewriter &rewriter) const final { 3305 if (!op->getUses().empty()) 3306 return rewriter.notifyMatchFailure(op, "op must be dead"); 3307 rewriter.eraseOp(op); 3308 return mlir::success(); 3309 } 3310 }; 3311 3312 struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> { 3313 using MustBeDeadConversion::MustBeDeadConversion; 3314 }; 3315 3316 struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> { 3317 using MustBeDeadConversion::MustBeDeadConversion; 3318 }; 3319 3320 struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> { 3321 using MustBeDeadConversion::MustBeDeadConversion; 3322 }; 3323 3324 struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> { 3325 using MustBeDeadConversion::MustBeDeadConversion; 3326 }; 3327 3328 } // namespace 3329 3330 namespace { 3331 /// Convert FIR dialect to LLVM dialect 3332 /// 3333 /// This pass lowers all FIR dialect operations to LLVM IR dialect. An 3334 /// MLIR pass is used to lower residual Std dialect to LLVM IR dialect. 3335 class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> { 3336 public: 3337 FIRToLLVMLowering() = default; 3338 FIRToLLVMLowering(fir::FIRToLLVMPassOptions options) : options{options} {} 3339 mlir::ModuleOp getModule() { return getOperation(); } 3340 3341 void runOnOperation() override final { 3342 auto mod = getModule(); 3343 if (!forcedTargetTriple.empty()) 3344 fir::setTargetTriple(mod, forcedTargetTriple); 3345 3346 auto *context = getModule().getContext(); 3347 fir::LLVMTypeConverter typeConverter{getModule()}; 3348 mlir::RewritePatternSet pattern(context); 3349 pattern.insert< 3350 AbsentOpConversion, AddcOpConversion, AddrOfOpConversion, 3351 AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion, 3352 BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion, 3353 BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion, 3354 BoxProcHostOpConversion, BoxRankOpConversion, BoxTypeDescOpConversion, 3355 CallOpConversion, CmpcOpConversion, ConstcOpConversion, 3356 ConvertOpConversion, CoordinateOpConversion, DispatchOpConversion, 3357 DispatchTableOpConversion, DTEntryOpConversion, DivcOpConversion, 3358 EmboxOpConversion, EmboxCharOpConversion, EmboxProcOpConversion, 3359 ExtractValueOpConversion, FieldIndexOpConversion, FirEndOpConversion, 3360 FreeMemOpConversion, GenTypeDescOpConversion, GlobalLenOpConversion, 3361 GlobalOpConversion, HasValueOpConversion, InsertOnRangeOpConversion, 3362 InsertValueOpConversion, IsPresentOpConversion, 3363 LenParamIndexOpConversion, LoadOpConversion, MulcOpConversion, 3364 NegcOpConversion, NoReassocOpConversion, SelectCaseOpConversion, 3365 SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion, 3366 ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion, 3367 SliceOpConversion, StoreOpConversion, StringLitOpConversion, 3368 SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion, 3369 UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion, 3370 XEmboxOpConversion, XReboxOpConversion, ZeroOpConversion>(typeConverter, 3371 options); 3372 mlir::populateFuncToLLVMConversionPatterns(typeConverter, pattern); 3373 mlir::populateOpenMPToLLVMConversionPatterns(typeConverter, pattern); 3374 mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter, 3375 pattern); 3376 mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter, 3377 pattern); 3378 mlir::ConversionTarget target{*context}; 3379 target.addLegalDialect<mlir::LLVM::LLVMDialect>(); 3380 // The OpenMP dialect is legal for Operations without regions, for those 3381 // which contains regions it is legal if the region contains only the 3382 // LLVM dialect. Add OpenMP dialect as a legal dialect for conversion and 3383 // legalize conversion of OpenMP operations without regions. 3384 mlir::configureOpenMPToLLVMConversionLegality(target, typeConverter); 3385 target.addLegalDialect<mlir::omp::OpenMPDialect>(); 3386 3387 // required NOPs for applying a full conversion 3388 target.addLegalOp<mlir::ModuleOp>(); 3389 3390 // apply the patterns 3391 if (mlir::failed(mlir::applyFullConversion(getModule(), target, 3392 std::move(pattern)))) { 3393 signalPassFailure(); 3394 } 3395 } 3396 3397 private: 3398 fir::FIRToLLVMPassOptions options; 3399 }; 3400 3401 /// Lower from LLVM IR dialect to proper LLVM-IR and dump the module 3402 struct LLVMIRLoweringPass 3403 : public mlir::PassWrapper<LLVMIRLoweringPass, 3404 mlir::OperationPass<mlir::ModuleOp>> { 3405 MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(LLVMIRLoweringPass) 3406 3407 LLVMIRLoweringPass(llvm::raw_ostream &output, fir::LLVMIRLoweringPrinter p) 3408 : output{output}, printer{p} {} 3409 3410 mlir::ModuleOp getModule() { return getOperation(); } 3411 3412 void runOnOperation() override final { 3413 auto *ctx = getModule().getContext(); 3414 auto optName = getModule().getName(); 3415 llvm::LLVMContext llvmCtx; 3416 if (auto llvmModule = mlir::translateModuleToLLVMIR( 3417 getModule(), llvmCtx, optName ? *optName : "FIRModule")) { 3418 printer(*llvmModule, output); 3419 return; 3420 } 3421 3422 mlir::emitError(mlir::UnknownLoc::get(ctx), "could not emit LLVM-IR\n"); 3423 signalPassFailure(); 3424 } 3425 3426 private: 3427 llvm::raw_ostream &output; 3428 fir::LLVMIRLoweringPrinter printer; 3429 }; 3430 3431 } // namespace 3432 3433 std::unique_ptr<mlir::Pass> fir::createFIRToLLVMPass() { 3434 return std::make_unique<FIRToLLVMLowering>(); 3435 } 3436 3437 std::unique_ptr<mlir::Pass> 3438 fir::createFIRToLLVMPass(fir::FIRToLLVMPassOptions options) { 3439 return std::make_unique<FIRToLLVMLowering>(options); 3440 } 3441 3442 std::unique_ptr<mlir::Pass> 3443 fir::createLLVMDialectToLLVMPass(llvm::raw_ostream &output, 3444 fir::LLVMIRLoweringPrinter printer) { 3445 return std::make_unique<LLVMIRLoweringPass>(output, printer); 3446 } 3447