1 //===-- CodeGen.cpp -- bridge to lower to LLVM ----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/ 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "flang/Optimizer/CodeGen/CodeGen.h" 14 #include "CGOps.h" 15 #include "PassDetail.h" 16 #include "flang/ISO_Fortran_binding.h" 17 #include "flang/Optimizer/Dialect/FIRAttr.h" 18 #include "flang/Optimizer/Dialect/FIROps.h" 19 #include "flang/Optimizer/Support/InternalNames.h" 20 #include "flang/Optimizer/Support/TypeCode.h" 21 #include "flang/Semantics/runtime-type-info.h" 22 #include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" 23 #include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h" 24 #include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h" 25 #include "mlir/Conversion/LLVMCommon/Pattern.h" 26 #include "mlir/Conversion/OpenMPToLLVM/ConvertOpenMPToLLVM.h" 27 #include "mlir/IR/BuiltinTypes.h" 28 #include "mlir/IR/Matchers.h" 29 #include "mlir/Pass/Pass.h" 30 #include "mlir/Target/LLVMIR/ModuleTranslation.h" 31 #include "llvm/ADT/ArrayRef.h" 32 33 #define DEBUG_TYPE "flang-codegen" 34 35 // fir::LLVMTypeConverter for converting to LLVM IR dialect types. 36 #include "TypeConverter.h" 37 38 // TODO: This should really be recovered from the specified target. 39 static constexpr unsigned defaultAlign = 8; 40 41 /// `fir.box` attribute values as defined for CFI_attribute_t in 42 /// flang/ISO_Fortran_binding.h. 43 static constexpr unsigned kAttrPointer = CFI_attribute_pointer; 44 static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable; 45 46 static inline mlir::Type getVoidPtrType(mlir::MLIRContext *context) { 47 return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8)); 48 } 49 50 static mlir::LLVM::ConstantOp 51 genConstantIndex(mlir::Location loc, mlir::Type ity, 52 mlir::ConversionPatternRewriter &rewriter, 53 std::int64_t offset) { 54 auto cattr = rewriter.getI64IntegerAttr(offset); 55 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 56 } 57 58 static mlir::Block *createBlock(mlir::ConversionPatternRewriter &rewriter, 59 mlir::Block *insertBefore) { 60 assert(insertBefore && "expected valid insertion block"); 61 return rewriter.createBlock(insertBefore->getParent(), 62 mlir::Region::iterator(insertBefore)); 63 } 64 65 namespace { 66 /// FIR conversion pattern template 67 template <typename FromOp> 68 class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> { 69 public: 70 explicit FIROpConversion(fir::LLVMTypeConverter &lowering, 71 const fir::FIRToLLVMPassOptions &options) 72 : mlir::ConvertOpToLLVMPattern<FromOp>(lowering), options(options) {} 73 74 protected: 75 mlir::Type convertType(mlir::Type ty) const { 76 return lowerTy().convertType(ty); 77 } 78 mlir::Type voidPtrTy() const { return getVoidPtrType(); } 79 80 mlir::Type getVoidPtrType() const { 81 return mlir::LLVM::LLVMPointerType::get( 82 mlir::IntegerType::get(&lowerTy().getContext(), 8)); 83 } 84 85 mlir::LLVM::ConstantOp 86 genI32Constant(mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 87 int value) const { 88 mlir::Type i32Ty = rewriter.getI32Type(); 89 mlir::IntegerAttr attr = rewriter.getI32IntegerAttr(value); 90 return rewriter.create<mlir::LLVM::ConstantOp>(loc, i32Ty, attr); 91 } 92 93 mlir::LLVM::ConstantOp 94 genConstantOffset(mlir::Location loc, 95 mlir::ConversionPatternRewriter &rewriter, 96 int offset) const { 97 mlir::Type ity = lowerTy().offsetType(); 98 mlir::IntegerAttr cattr = rewriter.getI32IntegerAttr(offset); 99 return rewriter.create<mlir::LLVM::ConstantOp>(loc, ity, cattr); 100 } 101 102 /// Perform an extension or truncation as needed on an integer value. Lowering 103 /// to the specific target may involve some sign-extending or truncation of 104 /// values, particularly to fit them from abstract box types to the 105 /// appropriate reified structures. 106 mlir::Value integerCast(mlir::Location loc, 107 mlir::ConversionPatternRewriter &rewriter, 108 mlir::Type ty, mlir::Value val) const { 109 auto valTy = val.getType(); 110 // If the value was not yet lowered, lower its type so that it can 111 // be used in getPrimitiveTypeSizeInBits. 112 if (!valTy.isa<mlir::IntegerType>()) 113 valTy = convertType(valTy); 114 auto toSize = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 115 auto fromSize = mlir::LLVM::getPrimitiveTypeSizeInBits(valTy); 116 if (toSize < fromSize) 117 return rewriter.create<mlir::LLVM::TruncOp>(loc, ty, val); 118 if (toSize > fromSize) 119 return rewriter.create<mlir::LLVM::SExtOp>(loc, ty, val); 120 return val; 121 } 122 123 /// Construct code sequence to extract the specifc value from a `fir.box`. 124 mlir::Value getValueFromBox(mlir::Location loc, mlir::Value box, 125 mlir::Type resultTy, 126 mlir::ConversionPatternRewriter &rewriter, 127 unsigned boxValue) const { 128 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 129 mlir::LLVM::ConstantOp cValuePos = 130 genConstantOffset(loc, rewriter, boxValue); 131 auto pty = mlir::LLVM::LLVMPointerType::get(resultTy); 132 auto p = rewriter.create<mlir::LLVM::GEPOp>( 133 loc, pty, box, mlir::ValueRange{c0, cValuePos}); 134 return rewriter.create<mlir::LLVM::LoadOp>(loc, resultTy, p); 135 } 136 137 /// Method to construct code sequence to get the triple for dimension `dim` 138 /// from a box. 139 llvm::SmallVector<mlir::Value, 3> 140 getDimsFromBox(mlir::Location loc, llvm::ArrayRef<mlir::Type> retTys, 141 mlir::Value box, mlir::Value dim, 142 mlir::ConversionPatternRewriter &rewriter) const { 143 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 144 mlir::LLVM::ConstantOp cDims = 145 genConstantOffset(loc, rewriter, kDimsPosInBox); 146 mlir::LLVM::LoadOp l0 = 147 loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter); 148 mlir::LLVM::LoadOp l1 = 149 loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter); 150 mlir::LLVM::LoadOp l2 = 151 loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter); 152 return {l0.getResult(), l1.getResult(), l2.getResult()}; 153 } 154 155 mlir::LLVM::LoadOp 156 loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0, 157 mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off, 158 mlir::Type ty, 159 mlir::ConversionPatternRewriter &rewriter) const { 160 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 161 mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off); 162 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c); 163 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 164 } 165 166 mlir::Value 167 loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim, 168 mlir::ConversionPatternRewriter &rewriter) const { 169 auto idxTy = lowerTy().indexType(); 170 auto c0 = genConstantOffset(loc, rewriter, 0); 171 auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox); 172 auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim); 173 return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy, 174 rewriter); 175 } 176 177 /// Read base address from a fir.box. Returned address has type ty. 178 mlir::Value 179 loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 180 mlir::ConversionPatternRewriter &rewriter) const { 181 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 182 mlir::LLVM::ConstantOp cAddr = 183 genConstantOffset(loc, rewriter, kAddrPosInBox); 184 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 185 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr); 186 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 187 } 188 189 mlir::Value 190 loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, 191 mlir::ConversionPatternRewriter &rewriter) const { 192 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 193 mlir::LLVM::ConstantOp cElemLen = 194 genConstantOffset(loc, rewriter, kElemLenPosInBox); 195 auto pty = mlir::LLVM::LLVMPointerType::get(ty); 196 mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen); 197 return rewriter.create<mlir::LLVM::LoadOp>(loc, ty, p); 198 } 199 200 // Get the element type given an LLVM type that is of the form 201 // [llvm.ptr](array|struct|vector)+ and the provided indexes. 202 static mlir::Type getBoxEleTy(mlir::Type type, 203 llvm::ArrayRef<unsigned> indexes) { 204 if (auto t = type.dyn_cast<mlir::LLVM::LLVMPointerType>()) 205 type = t.getElementType(); 206 for (auto i : indexes) { 207 if (auto t = type.dyn_cast<mlir::LLVM::LLVMStructType>()) { 208 assert(!t.isOpaque() && i < t.getBody().size()); 209 type = t.getBody()[i]; 210 } else if (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 211 type = t.getElementType(); 212 } else if (auto t = type.dyn_cast<mlir::VectorType>()) { 213 type = t.getElementType(); 214 } else { 215 fir::emitFatalError(mlir::UnknownLoc::get(type.getContext()), 216 "request for invalid box element type"); 217 } 218 } 219 return type; 220 } 221 222 // Return LLVM type of the base address given the LLVM type 223 // of the related descriptor (lowered fir.box type). 224 static mlir::Type getBaseAddrTypeFromBox(mlir::Type type) { 225 return getBoxEleTy(type, {kAddrPosInBox}); 226 } 227 228 // Load the attribute from the \p box and perform a check against \p maskValue 229 // The final comparison is implemented as `(attribute & maskValue) != 0`. 230 mlir::Value genBoxAttributeCheck(mlir::Location loc, mlir::Value box, 231 mlir::ConversionPatternRewriter &rewriter, 232 unsigned maskValue) const { 233 mlir::Type attrTy = rewriter.getI32Type(); 234 mlir::Value attribute = 235 getValueFromBox(loc, box, attrTy, rewriter, kAttributePosInBox); 236 mlir::LLVM::ConstantOp attrMask = 237 genConstantOffset(loc, rewriter, maskValue); 238 auto maskRes = 239 rewriter.create<mlir::LLVM::AndOp>(loc, attrTy, attribute, attrMask); 240 mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); 241 return rewriter.create<mlir::LLVM::ICmpOp>( 242 loc, mlir::LLVM::ICmpPredicate::ne, maskRes, c0); 243 } 244 245 template <typename... ARGS> 246 mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty, 247 mlir::ConversionPatternRewriter &rewriter, 248 mlir::Value base, ARGS... args) const { 249 llvm::SmallVector<mlir::Value> cv = {args...}; 250 return rewriter.create<mlir::LLVM::GEPOp>(loc, ty, base, cv); 251 } 252 253 fir::LLVMTypeConverter &lowerTy() const { 254 return *static_cast<fir::LLVMTypeConverter *>(this->getTypeConverter()); 255 } 256 257 const fir::FIRToLLVMPassOptions &options; 258 }; 259 260 /// FIR conversion pattern template 261 template <typename FromOp> 262 class FIROpAndTypeConversion : public FIROpConversion<FromOp> { 263 public: 264 using FIROpConversion<FromOp>::FIROpConversion; 265 using OpAdaptor = typename FromOp::Adaptor; 266 267 mlir::LogicalResult 268 matchAndRewrite(FromOp op, OpAdaptor adaptor, 269 mlir::ConversionPatternRewriter &rewriter) const final { 270 mlir::Type ty = this->convertType(op.getType()); 271 return doRewrite(op, ty, adaptor, rewriter); 272 } 273 274 virtual mlir::LogicalResult 275 doRewrite(FromOp addr, mlir::Type ty, OpAdaptor adaptor, 276 mlir::ConversionPatternRewriter &rewriter) const = 0; 277 }; 278 } // namespace 279 280 namespace { 281 /// Lower `fir.address_of` operation to `llvm.address_of` operation. 282 struct AddrOfOpConversion : public FIROpConversion<fir::AddrOfOp> { 283 using FIROpConversion::FIROpConversion; 284 285 mlir::LogicalResult 286 matchAndRewrite(fir::AddrOfOp addr, OpAdaptor adaptor, 287 mlir::ConversionPatternRewriter &rewriter) const override { 288 auto ty = convertType(addr.getType()); 289 rewriter.replaceOpWithNewOp<mlir::LLVM::AddressOfOp>( 290 addr, ty, addr.getSymbol().getRootReference().getValue()); 291 return mlir::success(); 292 } 293 }; 294 } // namespace 295 296 /// Lookup the function to compute the memory size of this parametric derived 297 /// type. The size of the object may depend on the LEN type parameters of the 298 /// derived type. 299 static mlir::LLVM::LLVMFuncOp 300 getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op, 301 mlir::ConversionPatternRewriter &rewriter) { 302 auto module = op->getParentOfType<mlir::ModuleOp>(); 303 std::string name = recTy.getName().str() + "P.mem.size"; 304 if (auto memSizeFunc = module.lookupSymbol<mlir::LLVM::LLVMFuncOp>(name)) 305 return memSizeFunc; 306 TODO(op.getLoc(), "did not find allocation function"); 307 } 308 309 // Compute the alloc scale size (constant factors encoded in the array type). 310 // We do this for arrays without a constant interior or arrays of character with 311 // dynamic length arrays, since those are the only ones that get decayed to a 312 // pointer to the element type. 313 template <typename OP> 314 static mlir::Value 315 genAllocationScaleSize(OP op, mlir::Type ity, 316 mlir::ConversionPatternRewriter &rewriter) { 317 mlir::Location loc = op.getLoc(); 318 mlir::Type dataTy = op.getInType(); 319 mlir::Type scalarType = fir::unwrapSequenceType(dataTy); 320 auto seqTy = dataTy.dyn_cast<fir::SequenceType>(); 321 if ((op.hasShapeOperands() && seqTy && !seqTy.hasConstantInterior()) || 322 (seqTy && fir::characterWithDynamicLen(scalarType))) { 323 fir::SequenceType::Extent constSize = 1; 324 for (auto extent : seqTy.getShape()) 325 if (extent != fir::SequenceType::getUnknownExtent()) 326 constSize *= extent; 327 if (constSize != 1) { 328 mlir::Value constVal{ 329 genConstantIndex(loc, ity, rewriter, constSize).getResult()}; 330 return constVal; 331 } 332 } 333 return nullptr; 334 } 335 336 namespace { 337 /// convert to LLVM IR dialect `alloca` 338 struct AllocaOpConversion : public FIROpConversion<fir::AllocaOp> { 339 using FIROpConversion::FIROpConversion; 340 341 mlir::LogicalResult 342 matchAndRewrite(fir::AllocaOp alloc, OpAdaptor adaptor, 343 mlir::ConversionPatternRewriter &rewriter) const override { 344 mlir::ValueRange operands = adaptor.getOperands(); 345 auto loc = alloc.getLoc(); 346 mlir::Type ity = lowerTy().indexType(); 347 unsigned i = 0; 348 mlir::Value size = genConstantIndex(loc, ity, rewriter, 1).getResult(); 349 mlir::Type ty = convertType(alloc.getType()); 350 mlir::Type resultTy = ty; 351 if (alloc.hasLenParams()) { 352 unsigned end = alloc.numLenParams(); 353 llvm::SmallVector<mlir::Value> lenParams; 354 for (; i < end; ++i) 355 lenParams.push_back(operands[i]); 356 mlir::Type scalarType = fir::unwrapSequenceType(alloc.getInType()); 357 if (auto chrTy = scalarType.dyn_cast<fir::CharacterType>()) { 358 fir::CharacterType rawCharTy = fir::CharacterType::getUnknownLen( 359 chrTy.getContext(), chrTy.getFKind()); 360 ty = mlir::LLVM::LLVMPointerType::get(convertType(rawCharTy)); 361 assert(end == 1); 362 size = integerCast(loc, rewriter, ity, lenParams[0]); 363 } else if (auto recTy = scalarType.dyn_cast<fir::RecordType>()) { 364 mlir::LLVM::LLVMFuncOp memSizeFn = 365 getDependentTypeMemSizeFn(recTy, alloc, rewriter); 366 if (!memSizeFn) 367 emitError(loc, "did not find allocation function"); 368 mlir::NamedAttribute attr = rewriter.getNamedAttr( 369 "callee", mlir::SymbolRefAttr::get(memSizeFn)); 370 auto call = rewriter.create<mlir::LLVM::CallOp>( 371 loc, ity, lenParams, llvm::ArrayRef<mlir::NamedAttribute>{attr}); 372 size = call.getResult(0); 373 ty = ::getVoidPtrType(alloc.getContext()); 374 } else { 375 return emitError(loc, "unexpected type ") 376 << scalarType << " with type parameters"; 377 } 378 } 379 if (auto scaleSize = genAllocationScaleSize(alloc, ity, rewriter)) 380 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize); 381 if (alloc.hasShapeOperands()) { 382 unsigned end = operands.size(); 383 for (; i < end; ++i) 384 size = rewriter.create<mlir::LLVM::MulOp>( 385 loc, ity, size, integerCast(loc, rewriter, ity, operands[i])); 386 } 387 if (ty == resultTy) { 388 // Do not emit the bitcast if ty and resultTy are the same. 389 rewriter.replaceOpWithNewOp<mlir::LLVM::AllocaOp>(alloc, ty, size, 390 alloc->getAttrs()); 391 } else { 392 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, ty, size, 393 alloc->getAttrs()); 394 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(alloc, resultTy, al); 395 } 396 return mlir::success(); 397 } 398 }; 399 } // namespace 400 401 /// Construct an `llvm.extractvalue` instruction. It will return value at 402 /// element \p x from \p tuple. 403 static mlir::LLVM::ExtractValueOp 404 genExtractValueWithIndex(mlir::Location loc, mlir::Value tuple, mlir::Type ty, 405 mlir::ConversionPatternRewriter &rewriter, 406 mlir::MLIRContext *ctx, int x) { 407 auto cx = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(x)); 408 auto xty = ty.cast<mlir::LLVM::LLVMStructType>().getBody()[x]; 409 return rewriter.create<mlir::LLVM::ExtractValueOp>(loc, xty, tuple, cx); 410 } 411 412 namespace { 413 /// Lower `fir.box_addr` to the sequence of operations to extract the first 414 /// element of the box. 415 struct BoxAddrOpConversion : public FIROpConversion<fir::BoxAddrOp> { 416 using FIROpConversion::FIROpConversion; 417 418 mlir::LogicalResult 419 matchAndRewrite(fir::BoxAddrOp boxaddr, OpAdaptor adaptor, 420 mlir::ConversionPatternRewriter &rewriter) const override { 421 mlir::Value a = adaptor.getOperands()[0]; 422 auto loc = boxaddr.getLoc(); 423 mlir::Type ty = convertType(boxaddr.getType()); 424 if (auto argty = boxaddr.getVal().getType().dyn_cast<fir::BoxType>()) { 425 rewriter.replaceOp(boxaddr, loadBaseAddrFromBox(loc, ty, a, rewriter)); 426 } else { 427 auto c0attr = rewriter.getI32IntegerAttr(0); 428 auto c0 = mlir::ArrayAttr::get(boxaddr.getContext(), c0attr); 429 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>(boxaddr, ty, a, 430 c0); 431 } 432 return mlir::success(); 433 } 434 }; 435 436 /// Convert `!fir.boxchar_len` to `!llvm.extractvalue` for the 2nd part of the 437 /// boxchar. 438 struct BoxCharLenOpConversion : public FIROpConversion<fir::BoxCharLenOp> { 439 using FIROpConversion::FIROpConversion; 440 441 mlir::LogicalResult 442 matchAndRewrite(fir::BoxCharLenOp boxCharLen, OpAdaptor adaptor, 443 mlir::ConversionPatternRewriter &rewriter) const override { 444 mlir::Value boxChar = adaptor.getOperands()[0]; 445 mlir::Location loc = boxChar.getLoc(); 446 mlir::MLIRContext *ctx = boxChar.getContext(); 447 mlir::Type returnValTy = boxCharLen.getResult().getType(); 448 449 constexpr int boxcharLenIdx = 1; 450 mlir::LLVM::ExtractValueOp len = genExtractValueWithIndex( 451 loc, boxChar, boxChar.getType(), rewriter, ctx, boxcharLenIdx); 452 mlir::Value lenAfterCast = integerCast(loc, rewriter, returnValTy, len); 453 rewriter.replaceOp(boxCharLen, lenAfterCast); 454 455 return mlir::success(); 456 } 457 }; 458 459 /// Lower `fir.box_dims` to a sequence of operations to extract the requested 460 /// dimension infomartion from the boxed value. 461 /// Result in a triple set of GEPs and loads. 462 struct BoxDimsOpConversion : public FIROpConversion<fir::BoxDimsOp> { 463 using FIROpConversion::FIROpConversion; 464 465 mlir::LogicalResult 466 matchAndRewrite(fir::BoxDimsOp boxdims, OpAdaptor adaptor, 467 mlir::ConversionPatternRewriter &rewriter) const override { 468 llvm::SmallVector<mlir::Type, 3> resultTypes = { 469 convertType(boxdims.getResult(0).getType()), 470 convertType(boxdims.getResult(1).getType()), 471 convertType(boxdims.getResult(2).getType()), 472 }; 473 auto results = 474 getDimsFromBox(boxdims.getLoc(), resultTypes, adaptor.getOperands()[0], 475 adaptor.getOperands()[1], rewriter); 476 rewriter.replaceOp(boxdims, results); 477 return mlir::success(); 478 } 479 }; 480 481 /// Lower `fir.box_elesize` to a sequence of operations ro extract the size of 482 /// an element in the boxed value. 483 struct BoxEleSizeOpConversion : public FIROpConversion<fir::BoxEleSizeOp> { 484 using FIROpConversion::FIROpConversion; 485 486 mlir::LogicalResult 487 matchAndRewrite(fir::BoxEleSizeOp boxelesz, OpAdaptor adaptor, 488 mlir::ConversionPatternRewriter &rewriter) const override { 489 mlir::Value a = adaptor.getOperands()[0]; 490 auto loc = boxelesz.getLoc(); 491 auto ty = convertType(boxelesz.getType()); 492 auto elemSize = getValueFromBox(loc, a, ty, rewriter, kElemLenPosInBox); 493 rewriter.replaceOp(boxelesz, elemSize); 494 return mlir::success(); 495 } 496 }; 497 498 /// Lower `fir.box_isalloc` to a sequence of operations to determine if the 499 /// boxed value was from an ALLOCATABLE entity. 500 struct BoxIsAllocOpConversion : public FIROpConversion<fir::BoxIsAllocOp> { 501 using FIROpConversion::FIROpConversion; 502 503 mlir::LogicalResult 504 matchAndRewrite(fir::BoxIsAllocOp boxisalloc, OpAdaptor adaptor, 505 mlir::ConversionPatternRewriter &rewriter) const override { 506 mlir::Value box = adaptor.getOperands()[0]; 507 auto loc = boxisalloc.getLoc(); 508 mlir::Value check = 509 genBoxAttributeCheck(loc, box, rewriter, kAttrAllocatable); 510 rewriter.replaceOp(boxisalloc, check); 511 return mlir::success(); 512 } 513 }; 514 515 /// Lower `fir.box_isarray` to a sequence of operations to determine if the 516 /// boxed is an array. 517 struct BoxIsArrayOpConversion : public FIROpConversion<fir::BoxIsArrayOp> { 518 using FIROpConversion::FIROpConversion; 519 520 mlir::LogicalResult 521 matchAndRewrite(fir::BoxIsArrayOp boxisarray, OpAdaptor adaptor, 522 mlir::ConversionPatternRewriter &rewriter) const override { 523 mlir::Value a = adaptor.getOperands()[0]; 524 auto loc = boxisarray.getLoc(); 525 auto rank = 526 getValueFromBox(loc, a, rewriter.getI32Type(), rewriter, kRankPosInBox); 527 auto c0 = genConstantOffset(loc, rewriter, 0); 528 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 529 boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0); 530 return mlir::success(); 531 } 532 }; 533 534 /// Lower `fir.box_isptr` to a sequence of operations to determined if the 535 /// boxed value was from a POINTER entity. 536 struct BoxIsPtrOpConversion : public FIROpConversion<fir::BoxIsPtrOp> { 537 using FIROpConversion::FIROpConversion; 538 539 mlir::LogicalResult 540 matchAndRewrite(fir::BoxIsPtrOp boxisptr, OpAdaptor adaptor, 541 mlir::ConversionPatternRewriter &rewriter) const override { 542 mlir::Value box = adaptor.getOperands()[0]; 543 auto loc = boxisptr.getLoc(); 544 mlir::Value check = genBoxAttributeCheck(loc, box, rewriter, kAttrPointer); 545 rewriter.replaceOp(boxisptr, check); 546 return mlir::success(); 547 } 548 }; 549 550 /// Lower `fir.box_rank` to the sequence of operation to extract the rank from 551 /// the box. 552 struct BoxRankOpConversion : public FIROpConversion<fir::BoxRankOp> { 553 using FIROpConversion::FIROpConversion; 554 555 mlir::LogicalResult 556 matchAndRewrite(fir::BoxRankOp boxrank, OpAdaptor adaptor, 557 mlir::ConversionPatternRewriter &rewriter) const override { 558 mlir::Value a = adaptor.getOperands()[0]; 559 auto loc = boxrank.getLoc(); 560 mlir::Type ty = convertType(boxrank.getType()); 561 auto result = getValueFromBox(loc, a, ty, rewriter, kRankPosInBox); 562 rewriter.replaceOp(boxrank, result); 563 return mlir::success(); 564 } 565 }; 566 567 /// Lower `fir.boxproc_host` operation. Extracts the host pointer from the 568 /// boxproc. 569 /// TODO: Part of supporting Fortran 2003 procedure pointers. 570 struct BoxProcHostOpConversion : public FIROpConversion<fir::BoxProcHostOp> { 571 using FIROpConversion::FIROpConversion; 572 573 mlir::LogicalResult 574 matchAndRewrite(fir::BoxProcHostOp boxprochost, OpAdaptor adaptor, 575 mlir::ConversionPatternRewriter &rewriter) const override { 576 TODO(boxprochost.getLoc(), "fir.boxproc_host codegen"); 577 return mlir::failure(); 578 } 579 }; 580 581 /// Lower `fir.box_tdesc` to the sequence of operations to extract the type 582 /// descriptor from the box. 583 struct BoxTypeDescOpConversion : public FIROpConversion<fir::BoxTypeDescOp> { 584 using FIROpConversion::FIROpConversion; 585 586 mlir::LogicalResult 587 matchAndRewrite(fir::BoxTypeDescOp boxtypedesc, OpAdaptor adaptor, 588 mlir::ConversionPatternRewriter &rewriter) const override { 589 mlir::Value box = adaptor.getOperands()[0]; 590 auto loc = boxtypedesc.getLoc(); 591 mlir::Type typeTy = 592 fir::getDescFieldTypeModel<kTypePosInBox>()(boxtypedesc.getContext()); 593 auto result = getValueFromBox(loc, box, typeTy, rewriter, kTypePosInBox); 594 auto typePtrTy = mlir::LLVM::LLVMPointerType::get(typeTy); 595 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(boxtypedesc, typePtrTy, 596 result); 597 return mlir::success(); 598 } 599 }; 600 601 /// Lower `fir.string_lit` to LLVM IR dialect operation. 602 struct StringLitOpConversion : public FIROpConversion<fir::StringLitOp> { 603 using FIROpConversion::FIROpConversion; 604 605 mlir::LogicalResult 606 matchAndRewrite(fir::StringLitOp constop, OpAdaptor adaptor, 607 mlir::ConversionPatternRewriter &rewriter) const override { 608 auto ty = convertType(constop.getType()); 609 auto attr = constop.getValue(); 610 if (attr.isa<mlir::StringAttr>()) { 611 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(constop, ty, attr); 612 return mlir::success(); 613 } 614 615 auto charTy = constop.getType().cast<fir::CharacterType>(); 616 unsigned bits = lowerTy().characterBitsize(charTy); 617 mlir::Type intTy = rewriter.getIntegerType(bits); 618 mlir::Location loc = constop.getLoc(); 619 mlir::Value cst = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 620 if (auto arr = attr.dyn_cast<mlir::DenseElementsAttr>()) { 621 cst = rewriter.create<mlir::LLVM::ConstantOp>(loc, ty, arr); 622 } else if (auto arr = attr.dyn_cast<mlir::ArrayAttr>()) { 623 for (auto a : llvm::enumerate(arr.getValue())) { 624 // convert each character to a precise bitsize 625 auto elemAttr = mlir::IntegerAttr::get( 626 intTy, 627 a.value().cast<mlir::IntegerAttr>().getValue().zextOrTrunc(bits)); 628 auto elemCst = 629 rewriter.create<mlir::LLVM::ConstantOp>(loc, intTy, elemAttr); 630 auto index = mlir::ArrayAttr::get( 631 constop.getContext(), rewriter.getI32IntegerAttr(a.index())); 632 cst = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, cst, elemCst, 633 index); 634 } 635 } else { 636 return mlir::failure(); 637 } 638 rewriter.replaceOp(constop, cst); 639 return mlir::success(); 640 } 641 }; 642 643 /// `fir.call` -> `llvm.call` 644 struct CallOpConversion : public FIROpConversion<fir::CallOp> { 645 using FIROpConversion::FIROpConversion; 646 647 mlir::LogicalResult 648 matchAndRewrite(fir::CallOp call, OpAdaptor adaptor, 649 mlir::ConversionPatternRewriter &rewriter) const override { 650 llvm::SmallVector<mlir::Type> resultTys; 651 for (auto r : call.getResults()) 652 resultTys.push_back(convertType(r.getType())); 653 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 654 call, resultTys, adaptor.getOperands(), call->getAttrs()); 655 return mlir::success(); 656 } 657 }; 658 } // namespace 659 660 static mlir::Type getComplexEleTy(mlir::Type complex) { 661 if (auto cc = complex.dyn_cast<mlir::ComplexType>()) 662 return cc.getElementType(); 663 return complex.cast<fir::ComplexType>().getElementType(); 664 } 665 666 namespace { 667 /// Compare complex values 668 /// 669 /// Per 10.1, the only comparisons available are .EQ. (oeq) and .NE. (une). 670 /// 671 /// For completeness, all other comparison are done on the real component only. 672 struct CmpcOpConversion : public FIROpConversion<fir::CmpcOp> { 673 using FIROpConversion::FIROpConversion; 674 675 mlir::LogicalResult 676 matchAndRewrite(fir::CmpcOp cmp, OpAdaptor adaptor, 677 mlir::ConversionPatternRewriter &rewriter) const override { 678 mlir::ValueRange operands = adaptor.getOperands(); 679 mlir::MLIRContext *ctxt = cmp.getContext(); 680 mlir::Type eleTy = convertType(getComplexEleTy(cmp.getLhs().getType())); 681 mlir::Type resTy = convertType(cmp.getType()); 682 mlir::Location loc = cmp.getLoc(); 683 auto pos0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 684 llvm::SmallVector<mlir::Value, 2> rp = { 685 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 686 pos0), 687 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 688 pos0)}; 689 auto rcp = 690 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, rp, cmp->getAttrs()); 691 auto pos1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 692 llvm::SmallVector<mlir::Value, 2> ip = { 693 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[0], 694 pos1), 695 rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, operands[1], 696 pos1)}; 697 auto icp = 698 rewriter.create<mlir::LLVM::FCmpOp>(loc, resTy, ip, cmp->getAttrs()); 699 llvm::SmallVector<mlir::Value, 2> cp = {rcp, icp}; 700 switch (cmp.getPredicate()) { 701 case mlir::arith::CmpFPredicate::OEQ: // .EQ. 702 rewriter.replaceOpWithNewOp<mlir::LLVM::AndOp>(cmp, resTy, cp); 703 break; 704 case mlir::arith::CmpFPredicate::UNE: // .NE. 705 rewriter.replaceOpWithNewOp<mlir::LLVM::OrOp>(cmp, resTy, cp); 706 break; 707 default: 708 rewriter.replaceOp(cmp, rcp.getResult()); 709 break; 710 } 711 return mlir::success(); 712 } 713 }; 714 715 /// Lower complex constants 716 struct ConstcOpConversion : public FIROpConversion<fir::ConstcOp> { 717 using FIROpConversion::FIROpConversion; 718 719 mlir::LogicalResult 720 matchAndRewrite(fir::ConstcOp conc, OpAdaptor, 721 mlir::ConversionPatternRewriter &rewriter) const override { 722 mlir::Location loc = conc.getLoc(); 723 mlir::MLIRContext *ctx = conc.getContext(); 724 mlir::Type ty = convertType(conc.getType()); 725 mlir::Type ety = convertType(getComplexEleTy(conc.getType())); 726 auto realFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getReal())); 727 auto realPart = 728 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, realFloatAttr); 729 auto imFloatAttr = mlir::FloatAttr::get(ety, getValue(conc.getImaginary())); 730 auto imPart = 731 rewriter.create<mlir::LLVM::ConstantOp>(loc, ety, imFloatAttr); 732 auto realIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 733 auto imIndex = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 734 auto undef = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 735 auto setReal = rewriter.create<mlir::LLVM::InsertValueOp>( 736 loc, ty, undef, realPart, realIndex); 737 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(conc, ty, setReal, 738 imPart, imIndex); 739 return mlir::success(); 740 } 741 742 inline llvm::APFloat getValue(mlir::Attribute attr) const { 743 return attr.cast<fir::RealAttr>().getValue(); 744 } 745 }; 746 747 /// convert value of from-type to value of to-type 748 struct ConvertOpConversion : public FIROpConversion<fir::ConvertOp> { 749 using FIROpConversion::FIROpConversion; 750 751 static bool isFloatingPointTy(mlir::Type ty) { 752 return ty.isa<mlir::FloatType>(); 753 } 754 755 mlir::LogicalResult 756 matchAndRewrite(fir::ConvertOp convert, OpAdaptor adaptor, 757 mlir::ConversionPatternRewriter &rewriter) const override { 758 auto fromFirTy = convert.getValue().getType(); 759 auto toFirTy = convert.getRes().getType(); 760 auto fromTy = convertType(fromFirTy); 761 auto toTy = convertType(toFirTy); 762 mlir::Value op0 = adaptor.getOperands()[0]; 763 if (fromTy == toTy) { 764 rewriter.replaceOp(convert, op0); 765 return mlir::success(); 766 } 767 auto loc = convert.getLoc(); 768 auto convertFpToFp = [&](mlir::Value val, unsigned fromBits, 769 unsigned toBits, mlir::Type toTy) -> mlir::Value { 770 if (fromBits == toBits) { 771 // TODO: Converting between two floating-point representations with the 772 // same bitwidth is not allowed for now. 773 mlir::emitError(loc, 774 "cannot implicitly convert between two floating-point " 775 "representations of the same bitwidth"); 776 return {}; 777 } 778 if (fromBits > toBits) 779 return rewriter.create<mlir::LLVM::FPTruncOp>(loc, toTy, val); 780 return rewriter.create<mlir::LLVM::FPExtOp>(loc, toTy, val); 781 }; 782 // Complex to complex conversion. 783 if (fir::isa_complex(fromFirTy) && fir::isa_complex(toFirTy)) { 784 // Special case: handle the conversion of a complex such that both the 785 // real and imaginary parts are converted together. 786 auto zero = mlir::ArrayAttr::get(convert.getContext(), 787 rewriter.getI32IntegerAttr(0)); 788 auto one = mlir::ArrayAttr::get(convert.getContext(), 789 rewriter.getI32IntegerAttr(1)); 790 auto ty = convertType(getComplexEleTy(convert.getValue().getType())); 791 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, zero); 792 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, op0, one); 793 auto nt = convertType(getComplexEleTy(convert.getRes().getType())); 794 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(ty); 795 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(nt); 796 auto rc = convertFpToFp(rp, fromBits, toBits, nt); 797 auto ic = convertFpToFp(ip, fromBits, toBits, nt); 798 auto un = rewriter.create<mlir::LLVM::UndefOp>(loc, toTy); 799 auto i1 = 800 rewriter.create<mlir::LLVM::InsertValueOp>(loc, toTy, un, rc, zero); 801 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(convert, toTy, i1, 802 ic, one); 803 return mlir::success(); 804 } 805 806 // Follow UNIX F77 convention for logicals: 807 // 1. underlying integer is not zero => logical is .TRUE. 808 // 2. logical is .TRUE. => set underlying integer to 1. 809 auto i1Type = mlir::IntegerType::get(convert.getContext(), 1); 810 if (fromFirTy.isa<fir::LogicalType>() && toFirTy == i1Type) { 811 mlir::Value zero = genConstantIndex(loc, fromTy, rewriter, 0); 812 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 813 convert, mlir::LLVM::ICmpPredicate::ne, op0, zero); 814 return mlir::success(); 815 } 816 if (fromFirTy == i1Type && toFirTy.isa<fir::LogicalType>()) { 817 rewriter.replaceOpWithNewOp<mlir::LLVM::ZExtOp>(convert, toTy, op0); 818 return mlir::success(); 819 } 820 821 // Floating point to floating point conversion. 822 if (isFloatingPointTy(fromTy)) { 823 if (isFloatingPointTy(toTy)) { 824 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 825 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 826 auto v = convertFpToFp(op0, fromBits, toBits, toTy); 827 rewriter.replaceOp(convert, v); 828 return mlir::success(); 829 } 830 if (toTy.isa<mlir::IntegerType>()) { 831 rewriter.replaceOpWithNewOp<mlir::LLVM::FPToSIOp>(convert, toTy, op0); 832 return mlir::success(); 833 } 834 } else if (fromTy.isa<mlir::IntegerType>()) { 835 // Integer to integer conversion. 836 if (toTy.isa<mlir::IntegerType>()) { 837 auto fromBits = mlir::LLVM::getPrimitiveTypeSizeInBits(fromTy); 838 auto toBits = mlir::LLVM::getPrimitiveTypeSizeInBits(toTy); 839 assert(fromBits != toBits); 840 if (fromBits > toBits) { 841 rewriter.replaceOpWithNewOp<mlir::LLVM::TruncOp>(convert, toTy, op0); 842 return mlir::success(); 843 } 844 rewriter.replaceOpWithNewOp<mlir::LLVM::SExtOp>(convert, toTy, op0); 845 return mlir::success(); 846 } 847 // Integer to floating point conversion. 848 if (isFloatingPointTy(toTy)) { 849 rewriter.replaceOpWithNewOp<mlir::LLVM::SIToFPOp>(convert, toTy, op0); 850 return mlir::success(); 851 } 852 // Integer to pointer conversion. 853 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 854 rewriter.replaceOpWithNewOp<mlir::LLVM::IntToPtrOp>(convert, toTy, op0); 855 return mlir::success(); 856 } 857 } else if (fromTy.isa<mlir::LLVM::LLVMPointerType>()) { 858 // Pointer to integer conversion. 859 if (toTy.isa<mlir::IntegerType>()) { 860 rewriter.replaceOpWithNewOp<mlir::LLVM::PtrToIntOp>(convert, toTy, op0); 861 return mlir::success(); 862 } 863 // Pointer to pointer conversion. 864 if (toTy.isa<mlir::LLVM::LLVMPointerType>()) { 865 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(convert, toTy, op0); 866 return mlir::success(); 867 } 868 } 869 return emitError(loc) << "cannot convert " << fromTy << " to " << toTy; 870 } 871 }; 872 873 /// Lower `fir.dispatch` operation. A virtual call to a method in a dispatch 874 /// table. 875 struct DispatchOpConversion : public FIROpConversion<fir::DispatchOp> { 876 using FIROpConversion::FIROpConversion; 877 878 mlir::LogicalResult 879 matchAndRewrite(fir::DispatchOp dispatch, OpAdaptor adaptor, 880 mlir::ConversionPatternRewriter &rewriter) const override { 881 TODO(dispatch.getLoc(), "fir.dispatch codegen"); 882 return mlir::failure(); 883 } 884 }; 885 886 /// Lower `fir.dispatch_table` operation. The dispatch table for a Fortran 887 /// derived type. 888 struct DispatchTableOpConversion 889 : public FIROpConversion<fir::DispatchTableOp> { 890 using FIROpConversion::FIROpConversion; 891 892 mlir::LogicalResult 893 matchAndRewrite(fir::DispatchTableOp dispTab, OpAdaptor adaptor, 894 mlir::ConversionPatternRewriter &rewriter) const override { 895 TODO(dispTab.getLoc(), "fir.dispatch_table codegen"); 896 return mlir::failure(); 897 } 898 }; 899 900 /// Lower `fir.dt_entry` operation. An entry in a dispatch table; binds a 901 /// method-name to a function. 902 struct DTEntryOpConversion : public FIROpConversion<fir::DTEntryOp> { 903 using FIROpConversion::FIROpConversion; 904 905 mlir::LogicalResult 906 matchAndRewrite(fir::DTEntryOp dtEnt, OpAdaptor adaptor, 907 mlir::ConversionPatternRewriter &rewriter) const override { 908 TODO(dtEnt.getLoc(), "fir.dt_entry codegen"); 909 return mlir::failure(); 910 } 911 }; 912 913 /// Lower `fir.global_len` operation. 914 struct GlobalLenOpConversion : public FIROpConversion<fir::GlobalLenOp> { 915 using FIROpConversion::FIROpConversion; 916 917 mlir::LogicalResult 918 matchAndRewrite(fir::GlobalLenOp globalLen, OpAdaptor adaptor, 919 mlir::ConversionPatternRewriter &rewriter) const override { 920 TODO(globalLen.getLoc(), "fir.global_len codegen"); 921 return mlir::failure(); 922 } 923 }; 924 925 /// Lower fir.len_param_index 926 struct LenParamIndexOpConversion 927 : public FIROpConversion<fir::LenParamIndexOp> { 928 using FIROpConversion::FIROpConversion; 929 930 // FIXME: this should be specialized by the runtime target 931 mlir::LogicalResult 932 matchAndRewrite(fir::LenParamIndexOp lenp, OpAdaptor, 933 mlir::ConversionPatternRewriter &rewriter) const override { 934 TODO(lenp.getLoc(), "fir.len_param_index codegen"); 935 } 936 }; 937 938 /// Convert `!fir.emboxchar<!fir.char<KIND, ?>, #n>` into a sequence of 939 /// instructions that generate `!llvm.struct<(ptr<ik>, i64)>`. The 1st element 940 /// in this struct is a pointer. Its type is determined from `KIND`. The 2nd 941 /// element is the length of the character buffer (`#n`). 942 struct EmboxCharOpConversion : public FIROpConversion<fir::EmboxCharOp> { 943 using FIROpConversion::FIROpConversion; 944 945 mlir::LogicalResult 946 matchAndRewrite(fir::EmboxCharOp emboxChar, OpAdaptor adaptor, 947 mlir::ConversionPatternRewriter &rewriter) const override { 948 mlir::ValueRange operands = adaptor.getOperands(); 949 auto *ctx = emboxChar.getContext(); 950 951 mlir::Value charBuffer = operands[0]; 952 mlir::Value charBufferLen = operands[1]; 953 954 mlir::Location loc = emboxChar.getLoc(); 955 mlir::Type llvmStructTy = convertType(emboxChar.getType()); 956 auto llvmStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, llvmStructTy); 957 958 mlir::Type lenTy = 959 llvmStructTy.cast<mlir::LLVM::LLVMStructType>().getBody()[1]; 960 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, charBufferLen); 961 962 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 963 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 964 auto insertBufferOp = rewriter.create<mlir::LLVM::InsertValueOp>( 965 loc, llvmStructTy, llvmStruct, charBuffer, c0); 966 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 967 emboxChar, llvmStructTy, insertBufferOp, lenAfterCast, c1); 968 969 return mlir::success(); 970 } 971 }; 972 } // namespace 973 974 /// Return the LLVMFuncOp corresponding to the standard malloc call. 975 static mlir::LLVM::LLVMFuncOp 976 getMalloc(fir::AllocMemOp op, mlir::ConversionPatternRewriter &rewriter) { 977 auto module = op->getParentOfType<mlir::ModuleOp>(); 978 if (mlir::LLVM::LLVMFuncOp mallocFunc = 979 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("malloc")) 980 return mallocFunc; 981 mlir::OpBuilder moduleBuilder( 982 op->getParentOfType<mlir::ModuleOp>().getBodyRegion()); 983 auto indexType = mlir::IntegerType::get(op.getContext(), 64); 984 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 985 rewriter.getUnknownLoc(), "malloc", 986 mlir::LLVM::LLVMFunctionType::get(getVoidPtrType(op.getContext()), 987 indexType, 988 /*isVarArg=*/false)); 989 } 990 991 /// Helper function for generating the LLVM IR that computes the size 992 /// in bytes for a derived type. 993 static mlir::Value 994 computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy, 995 mlir::ConversionPatternRewriter &rewriter) { 996 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 997 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 998 llvm::SmallVector<mlir::Value> args = {one}; 999 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, args); 1000 return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep); 1001 } 1002 1003 namespace { 1004 /// Lower a `fir.allocmem` instruction into `llvm.call @malloc` 1005 struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> { 1006 using FIROpConversion::FIROpConversion; 1007 1008 mlir::LogicalResult 1009 matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor, 1010 mlir::ConversionPatternRewriter &rewriter) const override { 1011 mlir::Type heapTy = heap.getType(); 1012 mlir::Type ty = convertType(heapTy); 1013 mlir::LLVM::LLVMFuncOp mallocFunc = getMalloc(heap, rewriter); 1014 mlir::Location loc = heap.getLoc(); 1015 auto ity = lowerTy().indexType(); 1016 mlir::Type dataTy = fir::unwrapRefType(heapTy); 1017 if (fir::isRecordWithTypeParameters(fir::unwrapSequenceType(dataTy))) 1018 TODO(loc, "fir.allocmem codegen of derived type with length parameters"); 1019 mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, ty); 1020 if (auto scaleSize = genAllocationScaleSize(heap, ity, rewriter)) 1021 size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize); 1022 for (mlir::Value opnd : adaptor.getOperands()) 1023 size = rewriter.create<mlir::LLVM::MulOp>( 1024 loc, ity, size, integerCast(loc, rewriter, ity, opnd)); 1025 heap->setAttr("callee", mlir::SymbolRefAttr::get(mallocFunc)); 1026 auto malloc = rewriter.create<mlir::LLVM::CallOp>( 1027 loc, ::getVoidPtrType(heap.getContext()), size, heap->getAttrs()); 1028 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(heap, ty, 1029 malloc.getResult(0)); 1030 return mlir::success(); 1031 } 1032 1033 // Compute the (allocation) size of the allocmem type in bytes. 1034 mlir::Value genTypeSizeInBytes(mlir::Location loc, mlir::Type idxTy, 1035 mlir::ConversionPatternRewriter &rewriter, 1036 mlir::Type llTy) const { 1037 // Use the primitive size, if available. 1038 auto ptrTy = llTy.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1039 if (auto size = 1040 mlir::LLVM::getPrimitiveTypeSizeInBits(ptrTy.getElementType())) 1041 return genConstantIndex(loc, idxTy, rewriter, size / 8); 1042 1043 // Otherwise, generate the GEP trick in LLVM IR to compute the size. 1044 return computeDerivedTypeSize(loc, ptrTy, idxTy, rewriter); 1045 } 1046 }; 1047 } // namespace 1048 1049 /// Return the LLVMFuncOp corresponding to the standard free call. 1050 static mlir::LLVM::LLVMFuncOp 1051 getFree(fir::FreeMemOp op, mlir::ConversionPatternRewriter &rewriter) { 1052 auto module = op->getParentOfType<mlir::ModuleOp>(); 1053 if (mlir::LLVM::LLVMFuncOp freeFunc = 1054 module.lookupSymbol<mlir::LLVM::LLVMFuncOp>("free")) 1055 return freeFunc; 1056 mlir::OpBuilder moduleBuilder(module.getBodyRegion()); 1057 auto voidType = mlir::LLVM::LLVMVoidType::get(op.getContext()); 1058 return moduleBuilder.create<mlir::LLVM::LLVMFuncOp>( 1059 rewriter.getUnknownLoc(), "free", 1060 mlir::LLVM::LLVMFunctionType::get(voidType, 1061 getVoidPtrType(op.getContext()), 1062 /*isVarArg=*/false)); 1063 } 1064 1065 namespace { 1066 /// Lower a `fir.freemem` instruction into `llvm.call @free` 1067 struct FreeMemOpConversion : public FIROpConversion<fir::FreeMemOp> { 1068 using FIROpConversion::FIROpConversion; 1069 1070 mlir::LogicalResult 1071 matchAndRewrite(fir::FreeMemOp freemem, OpAdaptor adaptor, 1072 mlir::ConversionPatternRewriter &rewriter) const override { 1073 mlir::LLVM::LLVMFuncOp freeFunc = getFree(freemem, rewriter); 1074 mlir::Location loc = freemem.getLoc(); 1075 auto bitcast = rewriter.create<mlir::LLVM::BitcastOp>( 1076 freemem.getLoc(), voidPtrTy(), adaptor.getOperands()[0]); 1077 freemem->setAttr("callee", mlir::SymbolRefAttr::get(freeFunc)); 1078 rewriter.create<mlir::LLVM::CallOp>( 1079 loc, mlir::TypeRange{}, mlir::ValueRange{bitcast}, freemem->getAttrs()); 1080 rewriter.eraseOp(freemem); 1081 return mlir::success(); 1082 } 1083 }; 1084 } // namespace 1085 1086 /// Common base class for embox to descriptor conversion. 1087 template <typename OP> 1088 struct EmboxCommonConversion : public FIROpConversion<OP> { 1089 using FIROpConversion<OP>::FIROpConversion; 1090 1091 // Find the LLVMFuncOp in whose entry block the alloca should be inserted. 1092 // The order to find the LLVMFuncOp is as follows: 1093 // 1. The parent operation of the current block if it is a LLVMFuncOp. 1094 // 2. The first ancestor that is a LLVMFuncOp. 1095 mlir::LLVM::LLVMFuncOp 1096 getFuncForAllocaInsert(mlir::ConversionPatternRewriter &rewriter) const { 1097 mlir::Operation *parentOp = rewriter.getInsertionBlock()->getParentOp(); 1098 return mlir::isa<mlir::LLVM::LLVMFuncOp>(parentOp) 1099 ? mlir::cast<mlir::LLVM::LLVMFuncOp>(parentOp) 1100 : parentOp->getParentOfType<mlir::LLVM::LLVMFuncOp>(); 1101 } 1102 1103 // Generate an alloca of size 1 and type \p toTy. 1104 mlir::LLVM::AllocaOp 1105 genAllocaWithType(mlir::Location loc, mlir::Type toTy, unsigned alignment, 1106 mlir::ConversionPatternRewriter &rewriter) const { 1107 auto thisPt = rewriter.saveInsertionPoint(); 1108 mlir::LLVM::LLVMFuncOp func = getFuncForAllocaInsert(rewriter); 1109 rewriter.setInsertionPointToStart(&func.front()); 1110 auto size = this->genI32Constant(loc, rewriter, 1); 1111 auto al = rewriter.create<mlir::LLVM::AllocaOp>(loc, toTy, size, alignment); 1112 rewriter.restoreInsertionPoint(thisPt); 1113 return al; 1114 } 1115 1116 static int getCFIAttr(fir::BoxType boxTy) { 1117 auto eleTy = boxTy.getEleTy(); 1118 if (eleTy.isa<fir::PointerType>()) 1119 return CFI_attribute_pointer; 1120 if (eleTy.isa<fir::HeapType>()) 1121 return CFI_attribute_allocatable; 1122 return CFI_attribute_other; 1123 } 1124 1125 static fir::RecordType unwrapIfDerived(fir::BoxType boxTy) { 1126 return fir::unwrapSequenceType(fir::dyn_cast_ptrOrBoxEleTy(boxTy)) 1127 .template dyn_cast<fir::RecordType>(); 1128 } 1129 static bool isDerivedTypeWithLenParams(fir::BoxType boxTy) { 1130 auto recTy = unwrapIfDerived(boxTy); 1131 return recTy && recTy.getNumLenParams() > 0; 1132 } 1133 static bool isDerivedType(fir::BoxType boxTy) { 1134 return static_cast<bool>(unwrapIfDerived(boxTy)); 1135 } 1136 1137 // Get the element size and CFI type code of the boxed value. 1138 std::tuple<mlir::Value, mlir::Value> getSizeAndTypeCode( 1139 mlir::Location loc, mlir::ConversionPatternRewriter &rewriter, 1140 mlir::Type boxEleTy, mlir::ValueRange lenParams = {}) const { 1141 auto doInteger = 1142 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1143 int typeCode = fir::integerBitsToTypeCode(width); 1144 return {this->genConstantOffset(loc, rewriter, width / 8), 1145 this->genConstantOffset(loc, rewriter, typeCode)}; 1146 }; 1147 auto doLogical = 1148 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1149 int typeCode = fir::logicalBitsToTypeCode(width); 1150 return {this->genConstantOffset(loc, rewriter, width / 8), 1151 this->genConstantOffset(loc, rewriter, typeCode)}; 1152 }; 1153 auto doFloat = [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1154 int typeCode = fir::realBitsToTypeCode(width); 1155 return {this->genConstantOffset(loc, rewriter, width / 8), 1156 this->genConstantOffset(loc, rewriter, typeCode)}; 1157 }; 1158 auto doComplex = 1159 [&](unsigned width) -> std::tuple<mlir::Value, mlir::Value> { 1160 auto typeCode = fir::complexBitsToTypeCode(width); 1161 return {this->genConstantOffset(loc, rewriter, width / 8 * 2), 1162 this->genConstantOffset(loc, rewriter, typeCode)}; 1163 }; 1164 auto doCharacter = 1165 [&](unsigned width, 1166 mlir::Value len) -> std::tuple<mlir::Value, mlir::Value> { 1167 auto typeCode = fir::characterBitsToTypeCode(width); 1168 auto typeCodeVal = this->genConstantOffset(loc, rewriter, typeCode); 1169 if (width == 8) 1170 return {len, typeCodeVal}; 1171 auto byteWidth = this->genConstantOffset(loc, rewriter, width / 8); 1172 auto i64Ty = mlir::IntegerType::get(&this->lowerTy().getContext(), 64); 1173 auto size = 1174 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, byteWidth, len); 1175 return {size, typeCodeVal}; 1176 }; 1177 auto getKindMap = [&]() -> fir::KindMapping & { 1178 return this->lowerTy().getKindMap(); 1179 }; 1180 // Pointer-like types. 1181 if (auto eleTy = fir::dyn_cast_ptrEleTy(boxEleTy)) 1182 boxEleTy = eleTy; 1183 // Integer types. 1184 if (fir::isa_integer(boxEleTy)) { 1185 if (auto ty = boxEleTy.dyn_cast<mlir::IntegerType>()) 1186 return doInteger(ty.getWidth()); 1187 auto ty = boxEleTy.cast<fir::IntegerType>(); 1188 return doInteger(getKindMap().getIntegerBitsize(ty.getFKind())); 1189 } 1190 // Floating point types. 1191 if (fir::isa_real(boxEleTy)) { 1192 if (auto ty = boxEleTy.dyn_cast<mlir::FloatType>()) 1193 return doFloat(ty.getWidth()); 1194 auto ty = boxEleTy.cast<fir::RealType>(); 1195 return doFloat(getKindMap().getRealBitsize(ty.getFKind())); 1196 } 1197 // Complex types. 1198 if (fir::isa_complex(boxEleTy)) { 1199 if (auto ty = boxEleTy.dyn_cast<mlir::ComplexType>()) 1200 return doComplex( 1201 ty.getElementType().cast<mlir::FloatType>().getWidth()); 1202 auto ty = boxEleTy.cast<fir::ComplexType>(); 1203 return doComplex(getKindMap().getRealBitsize(ty.getFKind())); 1204 } 1205 // Character types. 1206 if (auto ty = boxEleTy.dyn_cast<fir::CharacterType>()) { 1207 auto charWidth = getKindMap().getCharacterBitsize(ty.getFKind()); 1208 if (ty.getLen() != fir::CharacterType::unknownLen()) { 1209 auto len = this->genConstantOffset(loc, rewriter, ty.getLen()); 1210 return doCharacter(charWidth, len); 1211 } 1212 assert(!lenParams.empty()); 1213 return doCharacter(charWidth, lenParams.back()); 1214 } 1215 // Logical type. 1216 if (auto ty = boxEleTy.dyn_cast<fir::LogicalType>()) 1217 return doLogical(getKindMap().getLogicalBitsize(ty.getFKind())); 1218 // Array types. 1219 if (auto seqTy = boxEleTy.dyn_cast<fir::SequenceType>()) 1220 return getSizeAndTypeCode(loc, rewriter, seqTy.getEleTy(), lenParams); 1221 // Derived-type types. 1222 if (boxEleTy.isa<fir::RecordType>()) { 1223 auto ptrTy = mlir::LLVM::LLVMPointerType::get( 1224 this->lowerTy().convertType(boxEleTy)); 1225 auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy); 1226 auto one = 1227 genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1); 1228 auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, nullPtr, 1229 mlir::ValueRange{one}); 1230 auto eleSize = rewriter.create<mlir::LLVM::PtrToIntOp>( 1231 loc, this->lowerTy().indexType(), gep); 1232 return {eleSize, 1233 this->genConstantOffset(loc, rewriter, fir::derivedToTypeCode())}; 1234 } 1235 // Reference type. 1236 if (fir::isa_ref_type(boxEleTy)) { 1237 // FIXME: use the target pointer size rather than sizeof(void*) 1238 return {this->genConstantOffset(loc, rewriter, sizeof(void *)), 1239 this->genConstantOffset(loc, rewriter, CFI_type_cptr)}; 1240 } 1241 fir::emitFatalError(loc, "unhandled type in fir.box code generation"); 1242 } 1243 1244 /// Basic pattern to write a field in the descriptor 1245 mlir::Value insertField(mlir::ConversionPatternRewriter &rewriter, 1246 mlir::Location loc, mlir::Value dest, 1247 llvm::ArrayRef<unsigned> fldIndexes, 1248 mlir::Value value, bool bitcast = false) const { 1249 auto boxTy = dest.getType(); 1250 auto fldTy = this->getBoxEleTy(boxTy, fldIndexes); 1251 if (bitcast) 1252 value = rewriter.create<mlir::LLVM::BitcastOp>(loc, fldTy, value); 1253 else 1254 value = this->integerCast(loc, rewriter, fldTy, value); 1255 llvm::SmallVector<mlir::Attribute, 2> attrs; 1256 for (auto i : fldIndexes) 1257 attrs.push_back(rewriter.getI32IntegerAttr(i)); 1258 auto indexesAttr = mlir::ArrayAttr::get(rewriter.getContext(), attrs); 1259 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, boxTy, dest, value, 1260 indexesAttr); 1261 } 1262 1263 inline mlir::Value 1264 insertBaseAddress(mlir::ConversionPatternRewriter &rewriter, 1265 mlir::Location loc, mlir::Value dest, 1266 mlir::Value base) const { 1267 return insertField(rewriter, loc, dest, {kAddrPosInBox}, base, 1268 /*bitCast=*/true); 1269 } 1270 1271 inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter, 1272 mlir::Location loc, mlir::Value dest, 1273 unsigned dim, mlir::Value lb) const { 1274 return insertField(rewriter, loc, dest, 1275 {kDimsPosInBox, dim, kDimLowerBoundPos}, lb); 1276 } 1277 1278 inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter, 1279 mlir::Location loc, mlir::Value dest, 1280 unsigned dim, mlir::Value extent) const { 1281 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos}, 1282 extent); 1283 } 1284 1285 inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter, 1286 mlir::Location loc, mlir::Value dest, 1287 unsigned dim, mlir::Value stride) const { 1288 return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos}, 1289 stride); 1290 } 1291 1292 /// Get the address of the type descriptor global variable that was created by 1293 /// lowering for derived type \p recType. 1294 template <typename BOX> 1295 mlir::Value 1296 getTypeDescriptor(BOX box, mlir::ConversionPatternRewriter &rewriter, 1297 mlir::Location loc, fir::RecordType recType) const { 1298 std::string name = 1299 fir::NameUniquer::getTypeDescriptorName(recType.getName()); 1300 auto module = box->template getParentOfType<mlir::ModuleOp>(); 1301 if (auto global = module.template lookupSymbol<fir::GlobalOp>(name)) { 1302 auto ty = mlir::LLVM::LLVMPointerType::get( 1303 this->lowerTy().convertType(global.getType())); 1304 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1305 global.getSymName()); 1306 } 1307 if (auto global = 1308 module.template lookupSymbol<mlir::LLVM::GlobalOp>(name)) { 1309 // The global may have already been translated to LLVM. 1310 auto ty = mlir::LLVM::LLVMPointerType::get(global.getType()); 1311 return rewriter.create<mlir::LLVM::AddressOfOp>(loc, ty, 1312 global.getSymName()); 1313 } 1314 // Type info derived types do not have type descriptors since they are the 1315 // types defining type descriptors. 1316 if (!this->options.ignoreMissingTypeDescriptors && 1317 !fir::NameUniquer::belongsToModule( 1318 name, Fortran::semantics::typeInfoBuiltinModule)) 1319 fir::emitFatalError( 1320 loc, "runtime derived type info descriptor was not generated"); 1321 return rewriter.create<mlir::LLVM::NullOp>( 1322 loc, ::getVoidPtrType(box.getContext())); 1323 } 1324 1325 template <typename BOX> 1326 std::tuple<fir::BoxType, mlir::Value, mlir::Value> 1327 consDescriptorPrefix(BOX box, mlir::ConversionPatternRewriter &rewriter, 1328 unsigned rank, mlir::ValueRange lenParams) const { 1329 auto loc = box.getLoc(); 1330 auto boxTy = box.getType().template dyn_cast<fir::BoxType>(); 1331 auto convTy = this->lowerTy().convertBoxType(boxTy, rank); 1332 auto llvmBoxPtrTy = convTy.template cast<mlir::LLVM::LLVMPointerType>(); 1333 auto llvmBoxTy = llvmBoxPtrTy.getElementType(); 1334 mlir::Value descriptor = 1335 rewriter.create<mlir::LLVM::UndefOp>(loc, llvmBoxTy); 1336 1337 llvm::SmallVector<mlir::Value> typeparams = lenParams; 1338 if constexpr (!std::is_same_v<BOX, fir::EmboxOp>) { 1339 if (!box.substr().empty() && fir::hasDynamicSize(boxTy.getEleTy())) 1340 typeparams.push_back(box.substr()[1]); 1341 } 1342 1343 // Write each of the fields with the appropriate values 1344 auto [eleSize, cfiTy] = 1345 getSizeAndTypeCode(loc, rewriter, boxTy.getEleTy(), typeparams); 1346 descriptor = 1347 insertField(rewriter, loc, descriptor, {kElemLenPosInBox}, eleSize); 1348 descriptor = insertField(rewriter, loc, descriptor, {kVersionPosInBox}, 1349 this->genI32Constant(loc, rewriter, CFI_VERSION)); 1350 descriptor = insertField(rewriter, loc, descriptor, {kRankPosInBox}, 1351 this->genI32Constant(loc, rewriter, rank)); 1352 descriptor = insertField(rewriter, loc, descriptor, {kTypePosInBox}, cfiTy); 1353 descriptor = 1354 insertField(rewriter, loc, descriptor, {kAttributePosInBox}, 1355 this->genI32Constant(loc, rewriter, getCFIAttr(boxTy))); 1356 const bool hasAddendum = isDerivedType(boxTy); 1357 descriptor = 1358 insertField(rewriter, loc, descriptor, {kF18AddendumPosInBox}, 1359 this->genI32Constant(loc, rewriter, hasAddendum ? 1 : 0)); 1360 1361 if (hasAddendum) { 1362 auto isArray = 1363 fir::dyn_cast_ptrOrBoxEleTy(boxTy).template isa<fir::SequenceType>(); 1364 unsigned typeDescFieldId = isArray ? kOptTypePtrPosInBox : kDimsPosInBox; 1365 auto typeDesc = 1366 getTypeDescriptor(box, rewriter, loc, unwrapIfDerived(boxTy)); 1367 descriptor = 1368 insertField(rewriter, loc, descriptor, {typeDescFieldId}, typeDesc, 1369 /*bitCast=*/true); 1370 } 1371 1372 return {boxTy, descriptor, eleSize}; 1373 } 1374 1375 /// Compute the base address of a substring given the base address of a scalar 1376 /// string and the zero based string lower bound. 1377 mlir::Value shiftSubstringBase(mlir::ConversionPatternRewriter &rewriter, 1378 mlir::Location loc, mlir::Value base, 1379 mlir::Value lowerBound) const { 1380 llvm::SmallVector<mlir::Value> gepOperands; 1381 auto baseType = 1382 base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType(); 1383 if (baseType.isa<mlir::LLVM::LLVMArrayType>()) { 1384 auto idxTy = this->lowerTy().indexType(); 1385 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1386 gepOperands.push_back(zero); 1387 } 1388 gepOperands.push_back(lowerBound); 1389 return this->genGEP(loc, base.getType(), rewriter, base, gepOperands); 1390 } 1391 1392 /// If the embox is not in a globalOp body, allocate storage for the box; 1393 /// store the value inside and return the generated alloca. Return the input 1394 /// value otherwise. 1395 mlir::Value 1396 placeInMemoryIfNotGlobalInit(mlir::ConversionPatternRewriter &rewriter, 1397 mlir::Location loc, mlir::Value boxValue) const { 1398 auto *thisBlock = rewriter.getInsertionBlock(); 1399 if (thisBlock && mlir::isa<mlir::LLVM::GlobalOp>(thisBlock->getParentOp())) 1400 return boxValue; 1401 auto boxPtrTy = mlir::LLVM::LLVMPointerType::get(boxValue.getType()); 1402 auto alloca = genAllocaWithType(loc, boxPtrTy, defaultAlign, rewriter); 1403 rewriter.create<mlir::LLVM::StoreOp>(loc, boxValue, alloca); 1404 return alloca; 1405 } 1406 }; 1407 1408 /// Compute the extent of a triplet slice (lb:ub:step). 1409 static mlir::Value 1410 computeTripletExtent(mlir::ConversionPatternRewriter &rewriter, 1411 mlir::Location loc, mlir::Value lb, mlir::Value ub, 1412 mlir::Value step, mlir::Value zero, mlir::Type type) { 1413 mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb); 1414 extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step); 1415 extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step); 1416 // If the resulting extent is negative (`ub-lb` and `step` have different 1417 // signs), zero must be returned instead. 1418 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 1419 loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero); 1420 return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero); 1421 } 1422 1423 /// Create a generic box on a memory reference. This conversions lowers the 1424 /// abstract box to the appropriate, initialized descriptor. 1425 struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> { 1426 using EmboxCommonConversion::EmboxCommonConversion; 1427 1428 mlir::LogicalResult 1429 matchAndRewrite(fir::EmboxOp embox, OpAdaptor adaptor, 1430 mlir::ConversionPatternRewriter &rewriter) const override { 1431 assert(!embox.getShape() && "There should be no dims on this embox op"); 1432 auto [boxTy, dest, eleSize] = 1433 consDescriptorPrefix(embox, rewriter, /*rank=*/0, 1434 /*lenParams=*/adaptor.getOperands().drop_front(1)); 1435 dest = insertBaseAddress(rewriter, embox.getLoc(), dest, 1436 adaptor.getOperands()[0]); 1437 if (isDerivedTypeWithLenParams(boxTy)) { 1438 TODO(embox.getLoc(), 1439 "fir.embox codegen of derived with length parameters"); 1440 return mlir::failure(); 1441 } 1442 auto result = placeInMemoryIfNotGlobalInit(rewriter, embox.getLoc(), dest); 1443 rewriter.replaceOp(embox, result); 1444 return mlir::success(); 1445 } 1446 }; 1447 1448 /// Create a generic box on a memory reference. 1449 struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> { 1450 using EmboxCommonConversion::EmboxCommonConversion; 1451 1452 mlir::LogicalResult 1453 matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor, 1454 mlir::ConversionPatternRewriter &rewriter) const override { 1455 auto [boxTy, dest, eleSize] = consDescriptorPrefix( 1456 xbox, rewriter, xbox.getOutRank(), 1457 adaptor.getOperands().drop_front(xbox.lenParamOffset())); 1458 // Generate the triples in the dims field of the descriptor 1459 mlir::ValueRange operands = adaptor.getOperands(); 1460 auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64); 1461 mlir::Value base = operands[0]; 1462 assert(!xbox.shape().empty() && "must have a shape"); 1463 unsigned shapeOffset = xbox.shapeOffset(); 1464 bool hasShift = !xbox.shift().empty(); 1465 unsigned shiftOffset = xbox.shiftOffset(); 1466 bool hasSlice = !xbox.slice().empty(); 1467 unsigned sliceOffset = xbox.sliceOffset(); 1468 mlir::Location loc = xbox.getLoc(); 1469 mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0); 1470 mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1); 1471 mlir::Value prevDim = integerCast(loc, rewriter, i64Ty, eleSize); 1472 mlir::Value prevPtrOff = one; 1473 mlir::Type eleTy = boxTy.getEleTy(); 1474 const unsigned rank = xbox.getRank(); 1475 llvm::SmallVector<mlir::Value> gepArgs; 1476 unsigned constRows = 0; 1477 mlir::Value ptrOffset = zero; 1478 if (auto memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType())) 1479 if (auto seqTy = memEleTy.dyn_cast<fir::SequenceType>()) { 1480 mlir::Type seqEleTy = seqTy.getEleTy(); 1481 // Adjust the element scaling factor if the element is a dependent type. 1482 if (fir::hasDynamicSize(seqEleTy)) { 1483 if (fir::isa_char(seqEleTy)) { 1484 assert(xbox.lenParams().size() == 1); 1485 prevPtrOff = integerCast(loc, rewriter, i64Ty, 1486 operands[xbox.lenParamOffset()]); 1487 } else if (seqEleTy.isa<fir::RecordType>()) { 1488 TODO(loc, "generate call to calculate size of PDT"); 1489 } else { 1490 return rewriter.notifyMatchFailure(xbox, "unexpected dynamic type"); 1491 } 1492 } else { 1493 constRows = seqTy.getConstantRows(); 1494 } 1495 } 1496 1497 bool hasSubcomp = !xbox.subcomponent().empty(); 1498 if (!xbox.substr().empty()) 1499 TODO(loc, "codegen of fir.embox with substring"); 1500 1501 mlir::Value stepExpr; 1502 if (hasSubcomp) { 1503 // We have a subcomponent. The step value needs to be the number of 1504 // bytes per element (which is a derived type). 1505 mlir::Type ty0 = base.getType(); 1506 [[maybe_unused]] auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 1507 assert(ptrTy && "expected pointer type"); 1508 mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()); 1509 assert(memEleTy && "expected fir pointer type"); 1510 auto seqTy = memEleTy.dyn_cast<fir::SequenceType>(); 1511 assert(seqTy && "expected sequence type"); 1512 mlir::Type seqEleTy = seqTy.getEleTy(); 1513 auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy)); 1514 stepExpr = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter); 1515 } 1516 1517 // Process the array subspace arguments (shape, shift, etc.), if any, 1518 // translating everything to values in the descriptor wherever the entity 1519 // has a dynamic array dimension. 1520 for (unsigned di = 0, descIdx = 0; di < rank; ++di) { 1521 mlir::Value extent = operands[shapeOffset]; 1522 mlir::Value outerExtent = extent; 1523 bool skipNext = false; 1524 if (hasSlice) { 1525 mlir::Value off = operands[sliceOffset]; 1526 mlir::Value adj = one; 1527 if (hasShift) 1528 adj = operands[shiftOffset]; 1529 auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj); 1530 if (constRows > 0) { 1531 gepArgs.push_back(ao); 1532 } else { 1533 auto dimOff = 1534 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff); 1535 ptrOffset = 1536 rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset); 1537 } 1538 if (mlir::isa_and_nonnull<fir::UndefOp>( 1539 xbox.slice()[3 * di + 1].getDefiningOp())) { 1540 // This dimension contains a scalar expression in the array slice op. 1541 // The dimension is loop invariant, will be dropped, and will not 1542 // appear in the descriptor. 1543 skipNext = true; 1544 } 1545 } 1546 if (!skipNext) { 1547 if (hasSlice) 1548 extent = computeTripletExtent(rewriter, loc, operands[sliceOffset], 1549 operands[sliceOffset + 1], 1550 operands[sliceOffset + 2], zero, i64Ty); 1551 // store lower bound (normally 0) for BIND(C) interoperability. 1552 mlir::Value lb = zero; 1553 const bool isaPointerOrAllocatable = 1554 eleTy.isa<fir::PointerType>() || eleTy.isa<fir::HeapType>(); 1555 // Lower bound is defaults to 1 for POINTER, ALLOCATABLE, and 1556 // denormalized descriptors. 1557 if (isaPointerOrAllocatable || !normalizedLowerBound(xbox)) { 1558 lb = one; 1559 // If there is a shifted origin, and no fir.slice, and this is not 1560 // a normalized descriptor then use the value from the shift op as 1561 // the lower bound. 1562 if (hasShift && !(hasSlice || hasSubcomp)) { 1563 lb = operands[shiftOffset]; 1564 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1565 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1566 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, 1567 lb); 1568 } 1569 } 1570 dest = insertLowerBound(rewriter, loc, dest, descIdx, lb); 1571 1572 dest = insertExtent(rewriter, loc, dest, descIdx, extent); 1573 1574 // store step (scaled by shaped extent) 1575 1576 mlir::Value step = hasSubcomp ? stepExpr : prevDim; 1577 if (hasSlice) 1578 step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step, 1579 operands[sliceOffset + 2]); 1580 dest = insertStride(rewriter, loc, dest, descIdx, step); 1581 ++descIdx; 1582 } 1583 1584 // compute the stride and offset for the next natural dimension 1585 prevDim = 1586 rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevDim, outerExtent); 1587 if (constRows == 0) 1588 prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff, 1589 outerExtent); 1590 else 1591 --constRows; 1592 1593 // increment iterators 1594 ++shapeOffset; 1595 if (hasShift) 1596 ++shiftOffset; 1597 if (hasSlice) 1598 sliceOffset += 3; 1599 } 1600 if (hasSlice || hasSubcomp || !xbox.substr().empty()) { 1601 llvm::SmallVector<mlir::Value> args = {ptrOffset}; 1602 args.append(gepArgs.rbegin(), gepArgs.rend()); 1603 if (hasSubcomp) { 1604 // For each field in the path add the offset to base via the args list. 1605 // In the most general case, some offsets must be computed since 1606 // they are not be known until runtime. 1607 if (fir::hasDynamicSize(fir::unwrapSequenceType( 1608 fir::unwrapPassByRefType(xbox.memref().getType())))) 1609 TODO(loc, "fir.embox codegen dynamic size component in derived type"); 1610 args.append(operands.begin() + xbox.subcomponentOffset(), 1611 operands.begin() + xbox.subcomponentOffset() + 1612 xbox.subcomponent().size()); 1613 } 1614 base = 1615 rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), base, args); 1616 if (!xbox.substr().empty()) 1617 base = shiftSubstringBase(rewriter, loc, base, 1618 operands[xbox.substrOffset()]); 1619 } 1620 dest = insertBaseAddress(rewriter, loc, dest, base); 1621 if (isDerivedTypeWithLenParams(boxTy)) 1622 TODO(loc, "fir.embox codegen of derived with length parameters"); 1623 1624 mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest); 1625 rewriter.replaceOp(xbox, result); 1626 return mlir::success(); 1627 } 1628 1629 /// Return true if `xbox` has a normalized lower bounds attribute. A box value 1630 /// that is neither a POINTER nor an ALLOCATABLE should be normalized to a 1631 /// zero origin lower bound for interoperability with BIND(C). 1632 inline static bool normalizedLowerBound(fir::cg::XEmboxOp xbox) { 1633 return xbox->hasAttr(fir::getNormalizedLowerBoundAttrName()); 1634 } 1635 }; 1636 1637 /// Create a new box given a box reference. 1638 struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> { 1639 using EmboxCommonConversion::EmboxCommonConversion; 1640 1641 mlir::LogicalResult 1642 matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor, 1643 mlir::ConversionPatternRewriter &rewriter) const override { 1644 mlir::Location loc = rebox.getLoc(); 1645 mlir::Type idxTy = lowerTy().indexType(); 1646 mlir::Value loweredBox = adaptor.getOperands()[0]; 1647 mlir::ValueRange operands = adaptor.getOperands(); 1648 1649 // Create new descriptor and fill its non-shape related data. 1650 llvm::SmallVector<mlir::Value, 2> lenParams; 1651 mlir::Type inputEleTy = getInputEleTy(rebox); 1652 if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) { 1653 mlir::Value len = 1654 loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter); 1655 if (charTy.getFKind() != 1) { 1656 mlir::Value width = 1657 genConstantIndex(loc, idxTy, rewriter, charTy.getFKind()); 1658 len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width); 1659 } 1660 lenParams.emplace_back(len); 1661 } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) { 1662 if (recTy.getNumLenParams() != 0) 1663 TODO(loc, "reboxing descriptor of derived type with length parameters"); 1664 } 1665 auto [boxTy, dest, eleSize] = 1666 consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams); 1667 1668 // Read input extents, strides, and base address 1669 llvm::SmallVector<mlir::Value> inputExtents; 1670 llvm::SmallVector<mlir::Value> inputStrides; 1671 const unsigned inputRank = rebox.getRank(); 1672 for (unsigned i = 0; i < inputRank; ++i) { 1673 mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i); 1674 llvm::SmallVector<mlir::Value, 3> dimInfo = 1675 getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter); 1676 inputExtents.emplace_back(dimInfo[1]); 1677 inputStrides.emplace_back(dimInfo[2]); 1678 } 1679 1680 mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType()); 1681 mlir::Value baseAddr = 1682 loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter); 1683 1684 if (!rebox.slice().empty() || !rebox.subcomponent().empty()) 1685 return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1686 operands, rewriter); 1687 return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides, 1688 operands, rewriter); 1689 } 1690 1691 private: 1692 /// Write resulting shape and base address in descriptor, and replace rebox 1693 /// op. 1694 mlir::LogicalResult 1695 finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1696 mlir::ValueRange lbounds, mlir::ValueRange extents, 1697 mlir::ValueRange strides, 1698 mlir::ConversionPatternRewriter &rewriter) const { 1699 mlir::Location loc = rebox.getLoc(); 1700 mlir::Value zero = 1701 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 1702 mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1); 1703 for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) { 1704 mlir::Value extent = std::get<0>(iter.value()); 1705 unsigned dim = iter.index(); 1706 mlir::Value lb = one; 1707 if (!lbounds.empty()) { 1708 lb = lbounds[dim]; 1709 auto extentIsEmpty = rewriter.create<mlir::LLVM::ICmpOp>( 1710 loc, mlir::LLVM::ICmpPredicate::eq, extent, zero); 1711 lb = rewriter.create<mlir::LLVM::SelectOp>(loc, extentIsEmpty, one, lb); 1712 }; 1713 dest = insertLowerBound(rewriter, loc, dest, dim, lb); 1714 dest = insertExtent(rewriter, loc, dest, dim, extent); 1715 dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value())); 1716 } 1717 dest = insertBaseAddress(rewriter, loc, dest, base); 1718 mlir::Value result = 1719 placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest); 1720 rewriter.replaceOp(rebox, result); 1721 return mlir::success(); 1722 } 1723 1724 // Apply slice given the base address, extents and strides of the input box. 1725 mlir::LogicalResult 1726 sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1727 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1728 mlir::ValueRange operands, 1729 mlir::ConversionPatternRewriter &rewriter) const { 1730 mlir::Location loc = rebox.getLoc(); 1731 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1732 mlir::Type idxTy = lowerTy().indexType(); 1733 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 1734 // Apply subcomponent and substring shift on base address. 1735 if (!rebox.subcomponent().empty() || !rebox.substr().empty()) { 1736 // Cast to inputEleTy* so that a GEP can be used. 1737 mlir::Type inputEleTy = getInputEleTy(rebox); 1738 auto llvmElePtrTy = 1739 mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy)); 1740 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base); 1741 1742 if (!rebox.subcomponent().empty()) { 1743 llvm::SmallVector<mlir::Value> gepOperands = {zero}; 1744 for (unsigned i = 0; i < rebox.subcomponent().size(); ++i) 1745 gepOperands.push_back(operands[rebox.subcomponentOffset() + i]); 1746 base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands); 1747 } 1748 if (!rebox.substr().empty()) 1749 base = shiftSubstringBase(rewriter, loc, base, 1750 operands[rebox.substrOffset()]); 1751 } 1752 1753 if (rebox.slice().empty()) 1754 // The array section is of the form array[%component][substring], keep 1755 // the input array extents and strides. 1756 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1757 inputExtents, inputStrides, rewriter); 1758 1759 // Strides from the fir.box are in bytes. 1760 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1761 1762 // The slice is of the form array(i:j:k)[%component]. Compute new extents 1763 // and strides. 1764 llvm::SmallVector<mlir::Value> slicedExtents; 1765 llvm::SmallVector<mlir::Value> slicedStrides; 1766 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 1767 const bool sliceHasOrigins = !rebox.shift().empty(); 1768 unsigned sliceOps = rebox.sliceOffset(); 1769 unsigned shiftOps = rebox.shiftOffset(); 1770 auto strideOps = inputStrides.begin(); 1771 const unsigned inputRank = inputStrides.size(); 1772 for (unsigned i = 0; i < inputRank; 1773 ++i, ++strideOps, ++shiftOps, sliceOps += 3) { 1774 mlir::Value sliceLb = 1775 integerCast(loc, rewriter, idxTy, operands[sliceOps]); 1776 mlir::Value inputStride = *strideOps; // already idxTy 1777 // Apply origin shift: base += (lb-shift)*input_stride 1778 mlir::Value sliceOrigin = 1779 sliceHasOrigins 1780 ? integerCast(loc, rewriter, idxTy, operands[shiftOps]) 1781 : one; 1782 mlir::Value diff = 1783 rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin); 1784 mlir::Value offset = 1785 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride); 1786 base = genGEP(loc, voidPtrTy, rewriter, base, offset); 1787 // Apply upper bound and step if this is a triplet. Otherwise, the 1788 // dimension is dropped and no extents/strides are computed. 1789 mlir::Value upper = operands[sliceOps + 1]; 1790 const bool isTripletSlice = 1791 !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp()); 1792 if (isTripletSlice) { 1793 mlir::Value step = 1794 integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]); 1795 // extent = ub-lb+step/step 1796 mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper); 1797 mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb, 1798 sliceUb, step, zero, idxTy); 1799 slicedExtents.emplace_back(extent); 1800 // stride = step*input_stride 1801 mlir::Value stride = 1802 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride); 1803 slicedStrides.emplace_back(stride); 1804 } 1805 } 1806 return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None, 1807 slicedExtents, slicedStrides, rewriter); 1808 } 1809 1810 /// Apply a new shape to the data described by a box given the base address, 1811 /// extents and strides of the box. 1812 mlir::LogicalResult 1813 reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base, 1814 mlir::ValueRange inputExtents, mlir::ValueRange inputStrides, 1815 mlir::ValueRange operands, 1816 mlir::ConversionPatternRewriter &rewriter) const { 1817 mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(), 1818 operands.begin() + rebox.shiftOffset() + 1819 rebox.shift().size()}; 1820 if (rebox.shape().empty()) { 1821 // Only setting new lower bounds. 1822 return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents, 1823 inputStrides, rewriter); 1824 } 1825 1826 mlir::Location loc = rebox.getLoc(); 1827 // Strides from the fir.box are in bytes. 1828 mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext()); 1829 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 1830 1831 llvm::SmallVector<mlir::Value> newStrides; 1832 llvm::SmallVector<mlir::Value> newExtents; 1833 mlir::Type idxTy = lowerTy().indexType(); 1834 // First stride from input box is kept. The rest is assumed contiguous 1835 // (it is not possible to reshape otherwise). If the input is scalar, 1836 // which may be OK if all new extents are ones, the stride does not 1837 // matter, use one. 1838 mlir::Value stride = inputStrides.empty() 1839 ? genConstantIndex(loc, idxTy, rewriter, 1) 1840 : inputStrides[0]; 1841 for (unsigned i = 0; i < rebox.shape().size(); ++i) { 1842 mlir::Value rawExtent = operands[rebox.shapeOffset() + i]; 1843 mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent); 1844 newExtents.emplace_back(extent); 1845 newStrides.emplace_back(stride); 1846 // nextStride = extent * stride; 1847 stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride); 1848 } 1849 return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides, 1850 rewriter); 1851 } 1852 1853 /// Return scalar element type of the input box. 1854 static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) { 1855 auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType()); 1856 if (auto seqTy = ty.dyn_cast<fir::SequenceType>()) 1857 return seqTy.getEleTy(); 1858 return ty; 1859 } 1860 }; 1861 1862 /// Lower `fir.emboxproc` operation. Creates a procedure box. 1863 /// TODO: Part of supporting Fortran 2003 procedure pointers. 1864 struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> { 1865 using FIROpConversion::FIROpConversion; 1866 1867 mlir::LogicalResult 1868 matchAndRewrite(fir::EmboxProcOp emboxproc, OpAdaptor adaptor, 1869 mlir::ConversionPatternRewriter &rewriter) const override { 1870 TODO(emboxproc.getLoc(), "fir.emboxproc codegen"); 1871 return mlir::failure(); 1872 } 1873 }; 1874 1875 // Code shared between insert_value and extract_value Ops. 1876 struct ValueOpCommon { 1877 // Translate the arguments pertaining to any multidimensional array to 1878 // row-major order for LLVM-IR. 1879 static void toRowMajor(llvm::SmallVectorImpl<mlir::Attribute> &attrs, 1880 mlir::Type ty) { 1881 assert(ty && "type is null"); 1882 const auto end = attrs.size(); 1883 for (std::remove_const_t<decltype(end)> i = 0; i < end; ++i) { 1884 if (auto seq = ty.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 1885 const auto dim = getDimension(seq); 1886 if (dim > 1) { 1887 auto ub = std::min(i + dim, end); 1888 std::reverse(attrs.begin() + i, attrs.begin() + ub); 1889 i += dim - 1; 1890 } 1891 ty = getArrayElementType(seq); 1892 } else if (auto st = ty.dyn_cast<mlir::LLVM::LLVMStructType>()) { 1893 ty = st.getBody()[attrs[i].cast<mlir::IntegerAttr>().getInt()]; 1894 } else { 1895 llvm_unreachable("index into invalid type"); 1896 } 1897 } 1898 } 1899 1900 static llvm::SmallVector<mlir::Attribute> 1901 collectIndices(mlir::ConversionPatternRewriter &rewriter, 1902 mlir::ArrayAttr arrAttr) { 1903 llvm::SmallVector<mlir::Attribute> attrs; 1904 for (auto i = arrAttr.begin(), e = arrAttr.end(); i != e; ++i) { 1905 if (i->isa<mlir::IntegerAttr>()) { 1906 attrs.push_back(*i); 1907 } else { 1908 auto fieldName = i->cast<mlir::StringAttr>().getValue(); 1909 ++i; 1910 auto ty = i->cast<mlir::TypeAttr>().getValue(); 1911 auto index = ty.cast<fir::RecordType>().getFieldIndex(fieldName); 1912 attrs.push_back(mlir::IntegerAttr::get(rewriter.getI32Type(), index)); 1913 } 1914 } 1915 return attrs; 1916 } 1917 1918 private: 1919 static unsigned getDimension(mlir::LLVM::LLVMArrayType ty) { 1920 unsigned result = 1; 1921 for (auto eleTy = ty.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>(); 1922 eleTy; 1923 eleTy = eleTy.getElementType().dyn_cast<mlir::LLVM::LLVMArrayType>()) 1924 ++result; 1925 return result; 1926 } 1927 1928 static mlir::Type getArrayElementType(mlir::LLVM::LLVMArrayType ty) { 1929 auto eleTy = ty.getElementType(); 1930 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 1931 eleTy = arrTy.getElementType(); 1932 return eleTy; 1933 } 1934 }; 1935 1936 namespace { 1937 /// Extract a subobject value from an ssa-value of aggregate type 1938 struct ExtractValueOpConversion 1939 : public FIROpAndTypeConversion<fir::ExtractValueOp>, 1940 public ValueOpCommon { 1941 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1942 1943 mlir::LogicalResult 1944 doRewrite(fir::ExtractValueOp extractVal, mlir::Type ty, OpAdaptor adaptor, 1945 mlir::ConversionPatternRewriter &rewriter) const override { 1946 auto attrs = collectIndices(rewriter, extractVal.getCoor()); 1947 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 1948 auto position = mlir::ArrayAttr::get(extractVal.getContext(), attrs); 1949 rewriter.replaceOpWithNewOp<mlir::LLVM::ExtractValueOp>( 1950 extractVal, ty, adaptor.getOperands()[0], position); 1951 return mlir::success(); 1952 } 1953 }; 1954 1955 /// InsertValue is the generalized instruction for the composition of new 1956 /// aggregate type values. 1957 struct InsertValueOpConversion 1958 : public FIROpAndTypeConversion<fir::InsertValueOp>, 1959 public ValueOpCommon { 1960 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1961 1962 mlir::LogicalResult 1963 doRewrite(fir::InsertValueOp insertVal, mlir::Type ty, OpAdaptor adaptor, 1964 mlir::ConversionPatternRewriter &rewriter) const override { 1965 auto attrs = collectIndices(rewriter, insertVal.getCoor()); 1966 toRowMajor(attrs, adaptor.getOperands()[0].getType()); 1967 auto position = mlir::ArrayAttr::get(insertVal.getContext(), attrs); 1968 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 1969 insertVal, ty, adaptor.getOperands()[0], adaptor.getOperands()[1], 1970 position); 1971 return mlir::success(); 1972 } 1973 }; 1974 1975 /// InsertOnRange inserts a value into a sequence over a range of offsets. 1976 struct InsertOnRangeOpConversion 1977 : public FIROpAndTypeConversion<fir::InsertOnRangeOp> { 1978 using FIROpAndTypeConversion::FIROpAndTypeConversion; 1979 1980 // Increments an array of subscripts in a row major fasion. 1981 void incrementSubscripts(const llvm::SmallVector<uint64_t> &dims, 1982 llvm::SmallVector<uint64_t> &subscripts) const { 1983 for (size_t i = dims.size(); i > 0; --i) { 1984 if (++subscripts[i - 1] < dims[i - 1]) { 1985 return; 1986 } 1987 subscripts[i - 1] = 0; 1988 } 1989 } 1990 1991 mlir::LogicalResult 1992 doRewrite(fir::InsertOnRangeOp range, mlir::Type ty, OpAdaptor adaptor, 1993 mlir::ConversionPatternRewriter &rewriter) const override { 1994 1995 llvm::SmallVector<uint64_t> dims; 1996 auto type = adaptor.getOperands()[0].getType(); 1997 1998 // Iteratively extract the array dimensions from the type. 1999 while (auto t = type.dyn_cast<mlir::LLVM::LLVMArrayType>()) { 2000 dims.push_back(t.getNumElements()); 2001 type = t.getElementType(); 2002 } 2003 2004 llvm::SmallVector<std::uint64_t> lBounds; 2005 llvm::SmallVector<std::uint64_t> uBounds; 2006 2007 // Unzip the upper and lower bound and convert to a row major format. 2008 mlir::DenseIntElementsAttr coor = range.getCoor(); 2009 auto reversedCoor = llvm::reverse(coor.getValues<int64_t>()); 2010 for (auto i = reversedCoor.begin(), e = reversedCoor.end(); i != e; ++i) { 2011 uBounds.push_back(*i++); 2012 lBounds.push_back(*i); 2013 } 2014 2015 auto &subscripts = lBounds; 2016 auto loc = range.getLoc(); 2017 mlir::Value lastOp = adaptor.getOperands()[0]; 2018 mlir::Value insertVal = adaptor.getOperands()[1]; 2019 2020 auto i64Ty = rewriter.getI64Type(); 2021 while (subscripts != uBounds) { 2022 // Convert uint64_t's to Attribute's. 2023 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2024 for (const auto &subscript : subscripts) 2025 subscriptAttrs.push_back(mlir::IntegerAttr::get(i64Ty, subscript)); 2026 lastOp = rewriter.create<mlir::LLVM::InsertValueOp>( 2027 loc, ty, lastOp, insertVal, 2028 mlir::ArrayAttr::get(range.getContext(), subscriptAttrs)); 2029 2030 incrementSubscripts(dims, subscripts); 2031 } 2032 2033 // Convert uint64_t's to Attribute's. 2034 llvm::SmallVector<mlir::Attribute> subscriptAttrs; 2035 for (const auto &subscript : subscripts) 2036 subscriptAttrs.push_back( 2037 mlir::IntegerAttr::get(rewriter.getI64Type(), subscript)); 2038 mlir::ArrayRef<mlir::Attribute> arrayRef(subscriptAttrs); 2039 2040 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 2041 range, ty, lastOp, insertVal, 2042 mlir::ArrayAttr::get(range.getContext(), arrayRef)); 2043 2044 return mlir::success(); 2045 } 2046 }; 2047 } // namespace 2048 2049 namespace { 2050 /// XArrayCoor is the address arithmetic on a dynamically shaped, sliced, 2051 /// shifted etc. array. 2052 /// (See the static restriction on coordinate_of.) array_coor determines the 2053 /// coordinate (location) of a specific element. 2054 struct XArrayCoorOpConversion 2055 : public FIROpAndTypeConversion<fir::cg::XArrayCoorOp> { 2056 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2057 2058 mlir::LogicalResult 2059 doRewrite(fir::cg::XArrayCoorOp coor, mlir::Type ty, OpAdaptor adaptor, 2060 mlir::ConversionPatternRewriter &rewriter) const override { 2061 auto loc = coor.getLoc(); 2062 mlir::ValueRange operands = adaptor.getOperands(); 2063 unsigned rank = coor.getRank(); 2064 assert(coor.indices().size() == rank); 2065 assert(coor.shape().empty() || coor.shape().size() == rank); 2066 assert(coor.shift().empty() || coor.shift().size() == rank); 2067 assert(coor.slice().empty() || coor.slice().size() == 3 * rank); 2068 mlir::Type idxTy = lowerTy().indexType(); 2069 mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); 2070 mlir::Value prevExt = one; 2071 mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); 2072 mlir::Value offset = zero; 2073 const bool isShifted = !coor.shift().empty(); 2074 const bool isSliced = !coor.slice().empty(); 2075 const bool baseIsBoxed = coor.memref().getType().isa<fir::BoxType>(); 2076 2077 // For each dimension of the array, generate the offset calculation. 2078 for (unsigned i = 0; i < rank; ++i) { 2079 mlir::Value index = 2080 integerCast(loc, rewriter, idxTy, operands[coor.indicesOffset() + i]); 2081 mlir::Value lb = isShifted ? integerCast(loc, rewriter, idxTy, 2082 operands[coor.shiftOffset() + i]) 2083 : one; 2084 mlir::Value step = one; 2085 bool normalSlice = isSliced; 2086 // Compute zero based index in dimension i of the element, applying 2087 // potential triplets and lower bounds. 2088 if (isSliced) { 2089 mlir::Value ub = operands[coor.sliceOffset() + i + 1]; 2090 normalSlice = !mlir::isa_and_nonnull<fir::UndefOp>(ub.getDefiningOp()); 2091 if (normalSlice) 2092 step = integerCast(loc, rewriter, idxTy, 2093 operands[coor.sliceOffset() + i + 2]); 2094 } 2095 auto idx = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, index, lb); 2096 mlir::Value diff = 2097 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, idx, step); 2098 if (normalSlice) { 2099 mlir::Value sliceLb = 2100 integerCast(loc, rewriter, idxTy, operands[coor.sliceOffset() + i]); 2101 auto adj = rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, lb); 2102 diff = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, diff, adj); 2103 } 2104 // Update the offset given the stride and the zero based index `diff` 2105 // that was just computed. 2106 if (baseIsBoxed) { 2107 // Use stride in bytes from the descriptor. 2108 mlir::Value stride = 2109 loadStrideFromBox(loc, adaptor.getOperands()[0], i, rewriter); 2110 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, stride); 2111 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2112 } else { 2113 // Use stride computed at last iteration. 2114 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, prevExt); 2115 offset = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, offset); 2116 // Compute next stride assuming contiguity of the base array 2117 // (in element number). 2118 auto nextExt = 2119 integerCast(loc, rewriter, idxTy, operands[coor.shapeOffset() + i]); 2120 prevExt = 2121 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, prevExt, nextExt); 2122 } 2123 } 2124 2125 // Add computed offset to the base address. 2126 if (baseIsBoxed) { 2127 // Working with byte offsets. The base address is read from the fir.box. 2128 // and need to be casted to i8* to do the pointer arithmetic. 2129 mlir::Type baseTy = 2130 getBaseAddrTypeFromBox(adaptor.getOperands()[0].getType()); 2131 mlir::Value base = 2132 loadBaseAddrFromBox(loc, baseTy, adaptor.getOperands()[0], rewriter); 2133 mlir::Type voidPtrTy = getVoidPtrType(); 2134 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base); 2135 llvm::SmallVector<mlir::Value> args{offset}; 2136 auto addr = 2137 rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, base, args); 2138 if (coor.subcomponent().empty()) { 2139 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, baseTy, addr); 2140 return mlir::success(); 2141 } 2142 auto casted = rewriter.create<mlir::LLVM::BitcastOp>(loc, baseTy, addr); 2143 args.clear(); 2144 args.push_back(zero); 2145 if (!coor.lenParams().empty()) { 2146 // If type parameters are present, then we don't want to use a GEPOp 2147 // as below, as the LLVM struct type cannot be statically defined. 2148 TODO(loc, "derived type with type parameters"); 2149 } 2150 // TODO: array offset subcomponents must be converted to LLVM's 2151 // row-major layout here. 2152 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2153 args.push_back(operands[i]); 2154 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, baseTy, casted, 2155 args); 2156 return mlir::success(); 2157 } 2158 2159 // The array was not boxed, so it must be contiguous. offset is therefore an 2160 // element offset and the base type is kept in the GEP unless the element 2161 // type size is itself dynamic. 2162 mlir::Value base; 2163 if (coor.subcomponent().empty()) { 2164 // No subcomponent. 2165 if (!coor.lenParams().empty()) { 2166 // Type parameters. Adjust element size explicitly. 2167 auto eleTy = fir::dyn_cast_ptrEleTy(coor.getType()); 2168 assert(eleTy && "result must be a reference-like type"); 2169 if (fir::characterWithDynamicLen(eleTy)) { 2170 assert(coor.lenParams().size() == 1); 2171 auto bitsInChar = lowerTy().getKindMap().getCharacterBitsize( 2172 eleTy.cast<fir::CharacterType>().getFKind()); 2173 auto scaling = genConstantIndex(loc, idxTy, rewriter, bitsInChar / 8); 2174 auto scaledBySize = 2175 rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, offset, scaling); 2176 auto length = 2177 integerCast(loc, rewriter, idxTy, 2178 adaptor.getOperands()[coor.lenParamsOffset()]); 2179 offset = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, scaledBySize, 2180 length); 2181 } else { 2182 TODO(loc, "compute size of derived type with type parameters"); 2183 } 2184 } 2185 // Cast the base address to a pointer to T. 2186 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, ty, 2187 adaptor.getOperands()[0]); 2188 } else { 2189 // Operand #0 must have a pointer type. For subcomponent slicing, we 2190 // want to cast away the array type and have a plain struct type. 2191 mlir::Type ty0 = adaptor.getOperands()[0].getType(); 2192 auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>(); 2193 assert(ptrTy && "expected pointer type"); 2194 mlir::Type eleTy = ptrTy.getElementType(); 2195 while (auto arrTy = eleTy.dyn_cast<mlir::LLVM::LLVMArrayType>()) 2196 eleTy = arrTy.getElementType(); 2197 auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy); 2198 base = rewriter.create<mlir::LLVM::BitcastOp>(loc, newTy, 2199 adaptor.getOperands()[0]); 2200 } 2201 llvm::SmallVector<mlir::Value> args = {offset}; 2202 for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) 2203 args.push_back(operands[i]); 2204 rewriter.replaceOpWithNewOp<mlir::LLVM::GEPOp>(coor, ty, base, args); 2205 return mlir::success(); 2206 } 2207 }; 2208 } // namespace 2209 2210 /// Convert to (memory) reference to a reference to a subobject. 2211 /// The coordinate_of op is a Swiss army knife operation that can be used on 2212 /// (memory) references to records, arrays, complex, etc. as well as boxes. 2213 /// With unboxed arrays, there is the restriction that the array have a static 2214 /// shape in all but the last column. 2215 struct CoordinateOpConversion 2216 : public FIROpAndTypeConversion<fir::CoordinateOp> { 2217 using FIROpAndTypeConversion::FIROpAndTypeConversion; 2218 2219 mlir::LogicalResult 2220 doRewrite(fir::CoordinateOp coor, mlir::Type ty, OpAdaptor adaptor, 2221 mlir::ConversionPatternRewriter &rewriter) const override { 2222 mlir::ValueRange operands = adaptor.getOperands(); 2223 2224 mlir::Location loc = coor.getLoc(); 2225 mlir::Value base = operands[0]; 2226 mlir::Type baseObjectTy = coor.getBaseType(); 2227 mlir::Type objectTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2228 assert(objectTy && "fir.coordinate_of expects a reference type"); 2229 2230 // Complex type - basically, extract the real or imaginary part 2231 if (fir::isa_complex(objectTy)) { 2232 mlir::LLVM::ConstantOp c0 = 2233 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2234 llvm::SmallVector<mlir::Value> offs = {c0, operands[1]}; 2235 mlir::Value gep = genGEP(loc, ty, rewriter, base, offs); 2236 rewriter.replaceOp(coor, gep); 2237 return mlir::success(); 2238 } 2239 2240 // Boxed type - get the base pointer from the box 2241 if (baseObjectTy.dyn_cast<fir::BoxType>()) 2242 return doRewriteBox(coor, ty, operands, loc, rewriter); 2243 2244 // Reference, pointer or a heap type 2245 if (baseObjectTy.isa<fir::ReferenceType, fir::PointerType, fir::HeapType>()) 2246 return doRewriteRefOrPtr(coor, ty, operands, loc, rewriter); 2247 2248 return rewriter.notifyMatchFailure( 2249 coor, "fir.coordinate_of base operand has unsupported type"); 2250 } 2251 2252 static unsigned getFieldNumber(fir::RecordType ty, mlir::Value op) { 2253 return fir::hasDynamicSize(ty) 2254 ? op.getDefiningOp() 2255 ->getAttrOfType<mlir::IntegerAttr>("field") 2256 .getInt() 2257 : getIntValue(op); 2258 } 2259 2260 static int64_t getIntValue(mlir::Value val) { 2261 assert(val && val.dyn_cast<mlir::OpResult>() && "must not be null value"); 2262 mlir::Operation *defop = val.getDefiningOp(); 2263 2264 if (auto constOp = mlir::dyn_cast<mlir::arith::ConstantIntOp>(defop)) 2265 return constOp.value(); 2266 if (auto llConstOp = mlir::dyn_cast<mlir::LLVM::ConstantOp>(defop)) 2267 if (auto attr = llConstOp.getValue().dyn_cast<mlir::IntegerAttr>()) 2268 return attr.getValue().getSExtValue(); 2269 fir::emitFatalError(val.getLoc(), "must be a constant"); 2270 } 2271 2272 static bool hasSubDimensions(mlir::Type type) { 2273 return type.isa<fir::SequenceType, fir::RecordType, mlir::TupleType>(); 2274 } 2275 2276 /// Check whether this form of `!fir.coordinate_of` is supported. These 2277 /// additional checks are required, because we are not yet able to convert 2278 /// all valid forms of `!fir.coordinate_of`. 2279 /// TODO: Either implement the unsupported cases or extend the verifier 2280 /// in FIROps.cpp instead. 2281 static bool supportedCoordinate(mlir::Type type, mlir::ValueRange coors) { 2282 const std::size_t numOfCoors = coors.size(); 2283 std::size_t i = 0; 2284 bool subEle = false; 2285 bool ptrEle = false; 2286 for (; i < numOfCoors; ++i) { 2287 mlir::Value nxtOpnd = coors[i]; 2288 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2289 subEle = true; 2290 i += arrTy.getDimension() - 1; 2291 type = arrTy.getEleTy(); 2292 } else if (auto recTy = type.dyn_cast<fir::RecordType>()) { 2293 subEle = true; 2294 type = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2295 } else if (auto tupTy = type.dyn_cast<mlir::TupleType>()) { 2296 subEle = true; 2297 type = tupTy.getType(getIntValue(nxtOpnd)); 2298 } else { 2299 ptrEle = true; 2300 } 2301 } 2302 if (ptrEle) 2303 return (!subEle) && (numOfCoors == 1); 2304 return subEle && (i >= numOfCoors); 2305 } 2306 2307 /// Walk the abstract memory layout and determine if the path traverses any 2308 /// array types with unknown shape. Return true iff all the array types have a 2309 /// constant shape along the path. 2310 static bool arraysHaveKnownShape(mlir::Type type, mlir::ValueRange coors) { 2311 for (std::size_t i = 0, sz = coors.size(); i < sz; ++i) { 2312 mlir::Value nxtOpnd = coors[i]; 2313 if (auto arrTy = type.dyn_cast<fir::SequenceType>()) { 2314 if (fir::sequenceWithNonConstantShape(arrTy)) 2315 return false; 2316 i += arrTy.getDimension() - 1; 2317 type = arrTy.getEleTy(); 2318 } else if (auto strTy = type.dyn_cast<fir::RecordType>()) { 2319 type = strTy.getType(getFieldNumber(strTy, nxtOpnd)); 2320 } else if (auto strTy = type.dyn_cast<mlir::TupleType>()) { 2321 type = strTy.getType(getIntValue(nxtOpnd)); 2322 } else { 2323 return true; 2324 } 2325 } 2326 return true; 2327 } 2328 2329 private: 2330 mlir::LogicalResult 2331 doRewriteBox(fir::CoordinateOp coor, mlir::Type ty, mlir::ValueRange operands, 2332 mlir::Location loc, 2333 mlir::ConversionPatternRewriter &rewriter) const { 2334 mlir::Type boxObjTy = coor.getBaseType(); 2335 assert(boxObjTy.dyn_cast<fir::BoxType>() && "This is not a `fir.box`"); 2336 2337 mlir::Value boxBaseAddr = operands[0]; 2338 2339 // 1. SPECIAL CASE (uses `fir.len_param_index`): 2340 // %box = ... : !fir.box<!fir.type<derived{len1:i32}>> 2341 // %lenp = fir.len_param_index len1, !fir.type<derived{len1:i32}> 2342 // %addr = coordinate_of %box, %lenp 2343 if (coor.getNumOperands() == 2) { 2344 mlir::Operation *coordinateDef = 2345 (*coor.getCoor().begin()).getDefiningOp(); 2346 if (mlir::isa_and_nonnull<fir::LenParamIndexOp>(coordinateDef)) 2347 TODO(loc, 2348 "fir.coordinate_of - fir.len_param_index is not supported yet"); 2349 } 2350 2351 // 2. GENERAL CASE: 2352 // 2.1. (`fir.array`) 2353 // %box = ... : !fix.box<!fir.array<?xU>> 2354 // %idx = ... : index 2355 // %resultAddr = coordinate_of %box, %idx : !fir.ref<U> 2356 // 2.2 (`fir.derived`) 2357 // %box = ... : !fix.box<!fir.type<derived_type{field_1:i32}>> 2358 // %idx = ... : i32 2359 // %resultAddr = coordinate_of %box, %idx : !fir.ref<i32> 2360 // 2.3 (`fir.derived` inside `fir.array`) 2361 // %box = ... : !fir.box<!fir.array<10 x !fir.type<derived_1{field_1:f32, 2362 // field_2:f32}>>> %idx1 = ... : index %idx2 = ... : i32 %resultAddr = 2363 // coordinate_of %box, %idx1, %idx2 : !fir.ref<f32> 2364 // 2.4. TODO: Either document or disable any other case that the following 2365 // implementation might convert. 2366 mlir::LLVM::ConstantOp c0 = 2367 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2368 mlir::Value resultAddr = 2369 loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()), 2370 boxBaseAddr, rewriter); 2371 // Component Type 2372 auto cpnTy = fir::dyn_cast_ptrOrBoxEleTy(boxObjTy); 2373 mlir::Type voidPtrTy = ::getVoidPtrType(coor.getContext()); 2374 2375 for (unsigned i = 1, last = operands.size(); i < last; ++i) { 2376 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2377 if (i != 1) 2378 TODO(loc, "fir.array nested inside other array and/or derived type"); 2379 // Applies byte strides from the box. Ignore lower bound from box 2380 // since fir.coordinate_of indexes are zero based. Lowering takes care 2381 // of lower bound aspects. This both accounts for dynamically sized 2382 // types and non contiguous arrays. 2383 auto idxTy = lowerTy().indexType(); 2384 mlir::Value off = genConstantIndex(loc, idxTy, rewriter, 0); 2385 for (unsigned index = i, lastIndex = i + arrTy.getDimension(); 2386 index < lastIndex; ++index) { 2387 mlir::Value stride = 2388 loadStrideFromBox(loc, operands[0], index - i, rewriter); 2389 auto sc = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, 2390 operands[index], stride); 2391 off = rewriter.create<mlir::LLVM::AddOp>(loc, idxTy, sc, off); 2392 } 2393 auto voidPtrBase = 2394 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, resultAddr); 2395 llvm::SmallVector<mlir::Value> args = {off}; 2396 resultAddr = rewriter.create<mlir::LLVM::GEPOp>(loc, voidPtrTy, 2397 voidPtrBase, args); 2398 i += arrTy.getDimension() - 1; 2399 cpnTy = arrTy.getEleTy(); 2400 } else if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) { 2401 auto recRefTy = 2402 mlir::LLVM::LLVMPointerType::get(lowerTy().convertType(recTy)); 2403 mlir::Value nxtOpnd = operands[i]; 2404 auto memObj = 2405 rewriter.create<mlir::LLVM::BitcastOp>(loc, recRefTy, resultAddr); 2406 llvm::SmallVector<mlir::Value> args = {c0, nxtOpnd}; 2407 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2408 auto llvmCurrentObjTy = lowerTy().convertType(cpnTy); 2409 auto gep = rewriter.create<mlir::LLVM::GEPOp>( 2410 loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj, 2411 args); 2412 resultAddr = 2413 rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, gep); 2414 } else { 2415 fir::emitFatalError(loc, "unexpected type in coordinate_of"); 2416 } 2417 } 2418 2419 rewriter.replaceOpWithNewOp<mlir::LLVM::BitcastOp>(coor, ty, resultAddr); 2420 return mlir::success(); 2421 } 2422 2423 mlir::LogicalResult 2424 doRewriteRefOrPtr(fir::CoordinateOp coor, mlir::Type ty, 2425 mlir::ValueRange operands, mlir::Location loc, 2426 mlir::ConversionPatternRewriter &rewriter) const { 2427 mlir::Type baseObjectTy = coor.getBaseType(); 2428 2429 // Component Type 2430 mlir::Type cpnTy = fir::dyn_cast_ptrOrBoxEleTy(baseObjectTy); 2431 bool hasSubdimension = hasSubDimensions(cpnTy); 2432 bool columnIsDeferred = !hasSubdimension; 2433 2434 if (!supportedCoordinate(cpnTy, operands.drop_front(1))) 2435 TODO(loc, "unsupported combination of coordinate operands"); 2436 2437 const bool hasKnownShape = 2438 arraysHaveKnownShape(cpnTy, operands.drop_front(1)); 2439 2440 // If only the column is `?`, then we can simply place the column value in 2441 // the 0-th GEP position. 2442 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2443 if (!hasKnownShape) { 2444 const unsigned sz = arrTy.getDimension(); 2445 if (arraysHaveKnownShape(arrTy.getEleTy(), 2446 operands.drop_front(1 + sz))) { 2447 fir::SequenceType::ShapeRef shape = arrTy.getShape(); 2448 bool allConst = true; 2449 for (unsigned i = 0; i < sz - 1; ++i) { 2450 if (shape[i] < 0) { 2451 allConst = false; 2452 break; 2453 } 2454 } 2455 if (allConst) 2456 columnIsDeferred = true; 2457 } 2458 } 2459 } 2460 2461 if (fir::hasDynamicSize(fir::unwrapSequenceType(cpnTy))) 2462 return mlir::emitError( 2463 loc, "fir.coordinate_of with a dynamic element size is unsupported"); 2464 2465 if (hasKnownShape || columnIsDeferred) { 2466 llvm::SmallVector<mlir::Value> offs; 2467 if (hasKnownShape && hasSubdimension) { 2468 mlir::LLVM::ConstantOp c0 = 2469 genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); 2470 offs.push_back(c0); 2471 } 2472 llvm::Optional<int> dims; 2473 llvm::SmallVector<mlir::Value> arrIdx; 2474 for (std::size_t i = 1, sz = operands.size(); i < sz; ++i) { 2475 mlir::Value nxtOpnd = operands[i]; 2476 2477 if (!cpnTy) 2478 return mlir::emitError(loc, "invalid coordinate/check failed"); 2479 2480 // check if the i-th coordinate relates to an array 2481 if (dims.hasValue()) { 2482 arrIdx.push_back(nxtOpnd); 2483 int dimsLeft = *dims; 2484 if (dimsLeft > 1) { 2485 dims = dimsLeft - 1; 2486 continue; 2487 } 2488 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2489 // append array range in reverse (FIR arrays are column-major) 2490 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2491 arrIdx.clear(); 2492 dims.reset(); 2493 continue; 2494 } 2495 if (auto arrTy = cpnTy.dyn_cast<fir::SequenceType>()) { 2496 int d = arrTy.getDimension() - 1; 2497 if (d > 0) { 2498 dims = d; 2499 arrIdx.push_back(nxtOpnd); 2500 continue; 2501 } 2502 cpnTy = cpnTy.cast<fir::SequenceType>().getEleTy(); 2503 offs.push_back(nxtOpnd); 2504 continue; 2505 } 2506 2507 // check if the i-th coordinate relates to a field 2508 if (auto recTy = cpnTy.dyn_cast<fir::RecordType>()) 2509 cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); 2510 else if (auto tupTy = cpnTy.dyn_cast<mlir::TupleType>()) 2511 cpnTy = tupTy.getType(getIntValue(nxtOpnd)); 2512 else 2513 cpnTy = nullptr; 2514 2515 offs.push_back(nxtOpnd); 2516 } 2517 if (dims.hasValue()) 2518 offs.append(arrIdx.rbegin(), arrIdx.rend()); 2519 mlir::Value base = operands[0]; 2520 mlir::Value retval = genGEP(loc, ty, rewriter, base, offs); 2521 rewriter.replaceOp(coor, retval); 2522 return mlir::success(); 2523 } 2524 2525 return mlir::emitError( 2526 loc, "fir.coordinate_of base operand has unsupported type"); 2527 } 2528 }; 2529 2530 /// Convert `fir.field_index`. The conversion depends on whether the size of 2531 /// the record is static or dynamic. 2532 struct FieldIndexOpConversion : public FIROpConversion<fir::FieldIndexOp> { 2533 using FIROpConversion::FIROpConversion; 2534 2535 // NB: most field references should be resolved by this point 2536 mlir::LogicalResult 2537 matchAndRewrite(fir::FieldIndexOp field, OpAdaptor adaptor, 2538 mlir::ConversionPatternRewriter &rewriter) const override { 2539 auto recTy = field.getOnType().cast<fir::RecordType>(); 2540 unsigned index = recTy.getFieldIndex(field.getFieldId()); 2541 2542 if (!fir::hasDynamicSize(recTy)) { 2543 // Derived type has compile-time constant layout. Return index of the 2544 // component type in the parent type (to be used in GEP). 2545 rewriter.replaceOp(field, mlir::ValueRange{genConstantOffset( 2546 field.getLoc(), rewriter, index)}); 2547 return mlir::success(); 2548 } 2549 2550 // Derived type has compile-time constant layout. Call the compiler 2551 // generated function to determine the byte offset of the field at runtime. 2552 // This returns a non-constant. 2553 mlir::FlatSymbolRefAttr symAttr = mlir::SymbolRefAttr::get( 2554 field.getContext(), getOffsetMethodName(recTy, field.getFieldId())); 2555 mlir::NamedAttribute callAttr = rewriter.getNamedAttr("callee", symAttr); 2556 mlir::NamedAttribute fieldAttr = rewriter.getNamedAttr( 2557 "field", mlir::IntegerAttr::get(lowerTy().indexType(), index)); 2558 rewriter.replaceOpWithNewOp<mlir::LLVM::CallOp>( 2559 field, lowerTy().offsetType(), adaptor.getOperands(), 2560 llvm::ArrayRef<mlir::NamedAttribute>{callAttr, fieldAttr}); 2561 return mlir::success(); 2562 } 2563 2564 // Re-Construct the name of the compiler generated method that calculates the 2565 // offset 2566 inline static std::string getOffsetMethodName(fir::RecordType recTy, 2567 llvm::StringRef field) { 2568 return recTy.getName().str() + "P." + field.str() + ".offset"; 2569 } 2570 }; 2571 2572 /// Convert `fir.end` 2573 struct FirEndOpConversion : public FIROpConversion<fir::FirEndOp> { 2574 using FIROpConversion::FIROpConversion; 2575 2576 mlir::LogicalResult 2577 matchAndRewrite(fir::FirEndOp firEnd, OpAdaptor, 2578 mlir::ConversionPatternRewriter &rewriter) const override { 2579 TODO(firEnd.getLoc(), "fir.end codegen"); 2580 return mlir::failure(); 2581 } 2582 }; 2583 2584 /// Lower `fir.gentypedesc` to a global constant. 2585 struct GenTypeDescOpConversion : public FIROpConversion<fir::GenTypeDescOp> { 2586 using FIROpConversion::FIROpConversion; 2587 2588 mlir::LogicalResult 2589 matchAndRewrite(fir::GenTypeDescOp gentypedesc, OpAdaptor adaptor, 2590 mlir::ConversionPatternRewriter &rewriter) const override { 2591 TODO(gentypedesc.getLoc(), "fir.gentypedesc codegen"); 2592 return mlir::failure(); 2593 } 2594 }; 2595 2596 /// Lower `fir.has_value` operation to `llvm.return` operation. 2597 struct HasValueOpConversion : public FIROpConversion<fir::HasValueOp> { 2598 using FIROpConversion::FIROpConversion; 2599 2600 mlir::LogicalResult 2601 matchAndRewrite(fir::HasValueOp op, OpAdaptor adaptor, 2602 mlir::ConversionPatternRewriter &rewriter) const override { 2603 rewriter.replaceOpWithNewOp<mlir::LLVM::ReturnOp>(op, 2604 adaptor.getOperands()); 2605 return mlir::success(); 2606 } 2607 }; 2608 2609 /// Lower `fir.global` operation to `llvm.global` operation. 2610 /// `fir.insert_on_range` operations are replaced with constant dense attribute 2611 /// if they are applied on the full range. 2612 struct GlobalOpConversion : public FIROpConversion<fir::GlobalOp> { 2613 using FIROpConversion::FIROpConversion; 2614 2615 mlir::LogicalResult 2616 matchAndRewrite(fir::GlobalOp global, OpAdaptor adaptor, 2617 mlir::ConversionPatternRewriter &rewriter) const override { 2618 auto tyAttr = convertType(global.getType()); 2619 if (global.getType().isa<fir::BoxType>()) 2620 tyAttr = tyAttr.cast<mlir::LLVM::LLVMPointerType>().getElementType(); 2621 auto loc = global.getLoc(); 2622 mlir::Attribute initAttr; 2623 if (global.getInitVal()) 2624 initAttr = global.getInitVal().getValue(); 2625 auto linkage = convertLinkage(global.getLinkName()); 2626 auto isConst = global.getConstant().hasValue(); 2627 auto g = rewriter.create<mlir::LLVM::GlobalOp>( 2628 loc, tyAttr, isConst, linkage, global.getSymName(), initAttr); 2629 auto &gr = g.getInitializerRegion(); 2630 rewriter.inlineRegionBefore(global.getRegion(), gr, gr.end()); 2631 if (!gr.empty()) { 2632 // Replace insert_on_range with a constant dense attribute if the 2633 // initialization is on the full range. 2634 auto insertOnRangeOps = gr.front().getOps<fir::InsertOnRangeOp>(); 2635 for (auto insertOp : insertOnRangeOps) { 2636 if (isFullRange(insertOp.getCoor(), insertOp.getType())) { 2637 auto seqTyAttr = convertType(insertOp.getType()); 2638 auto *op = insertOp.getVal().getDefiningOp(); 2639 auto constant = mlir::dyn_cast<mlir::arith::ConstantOp>(op); 2640 if (!constant) { 2641 auto convertOp = mlir::dyn_cast<fir::ConvertOp>(op); 2642 if (!convertOp) 2643 continue; 2644 constant = mlir::cast<mlir::arith::ConstantOp>( 2645 convertOp.getValue().getDefiningOp()); 2646 } 2647 mlir::Type vecType = mlir::VectorType::get( 2648 insertOp.getType().getShape(), constant.getType()); 2649 auto denseAttr = mlir::DenseElementsAttr::get( 2650 vecType.cast<mlir::ShapedType>(), constant.getValue()); 2651 rewriter.setInsertionPointAfter(insertOp); 2652 rewriter.replaceOpWithNewOp<mlir::arith::ConstantOp>( 2653 insertOp, seqTyAttr, denseAttr); 2654 } 2655 } 2656 } 2657 rewriter.eraseOp(global); 2658 return mlir::success(); 2659 } 2660 2661 bool isFullRange(mlir::DenseIntElementsAttr indexes, 2662 fir::SequenceType seqTy) const { 2663 auto extents = seqTy.getShape(); 2664 if (indexes.size() / 2 != static_cast<int64_t>(extents.size())) 2665 return false; 2666 auto cur_index = indexes.value_begin<int64_t>(); 2667 for (unsigned i = 0; i < indexes.size(); i += 2) { 2668 if (*(cur_index++) != 0) 2669 return false; 2670 if (*(cur_index++) != extents[i / 2] - 1) 2671 return false; 2672 } 2673 return true; 2674 } 2675 2676 // TODO: String comparaison should be avoided. Replace linkName with an 2677 // enumeration. 2678 mlir::LLVM::Linkage 2679 convertLinkage(llvm::Optional<llvm::StringRef> optLinkage) const { 2680 if (optLinkage.hasValue()) { 2681 auto name = optLinkage.getValue(); 2682 if (name == "internal") 2683 return mlir::LLVM::Linkage::Internal; 2684 if (name == "linkonce") 2685 return mlir::LLVM::Linkage::Linkonce; 2686 if (name == "linkonce_odr") 2687 return mlir::LLVM::Linkage::LinkonceODR; 2688 if (name == "common") 2689 return mlir::LLVM::Linkage::Common; 2690 if (name == "weak") 2691 return mlir::LLVM::Linkage::Weak; 2692 } 2693 return mlir::LLVM::Linkage::External; 2694 } 2695 }; 2696 2697 /// `fir.load` --> `llvm.load` 2698 struct LoadOpConversion : public FIROpConversion<fir::LoadOp> { 2699 using FIROpConversion::FIROpConversion; 2700 2701 mlir::LogicalResult 2702 matchAndRewrite(fir::LoadOp load, OpAdaptor adaptor, 2703 mlir::ConversionPatternRewriter &rewriter) const override { 2704 // fir.box is a special case because it is considered as an ssa values in 2705 // fir, but it is lowered as a pointer to a descriptor. So fir.ref<fir.box> 2706 // and fir.box end up being the same llvm types and loading a 2707 // fir.ref<fir.box> is actually a no op in LLVM. 2708 if (load.getType().isa<fir::BoxType>()) { 2709 rewriter.replaceOp(load, adaptor.getOperands()[0]); 2710 } else { 2711 rewriter.replaceOpWithNewOp<mlir::LLVM::LoadOp>( 2712 load, convertType(load.getType()), adaptor.getOperands(), 2713 load->getAttrs()); 2714 } 2715 return mlir::success(); 2716 } 2717 }; 2718 2719 /// Lower `fir.no_reassoc` to LLVM IR dialect. 2720 /// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast 2721 /// math flags? 2722 struct NoReassocOpConversion : public FIROpConversion<fir::NoReassocOp> { 2723 using FIROpConversion::FIROpConversion; 2724 2725 mlir::LogicalResult 2726 matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor, 2727 mlir::ConversionPatternRewriter &rewriter) const override { 2728 rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]); 2729 return mlir::success(); 2730 } 2731 }; 2732 2733 static void genCondBrOp(mlir::Location loc, mlir::Value cmp, mlir::Block *dest, 2734 llvm::Optional<mlir::ValueRange> destOps, 2735 mlir::ConversionPatternRewriter &rewriter, 2736 mlir::Block *newBlock) { 2737 if (destOps.hasValue()) 2738 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, destOps.getValue(), 2739 newBlock, mlir::ValueRange()); 2740 else 2741 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, dest, newBlock); 2742 } 2743 2744 template <typename A, typename B> 2745 static void genBrOp(A caseOp, mlir::Block *dest, llvm::Optional<B> destOps, 2746 mlir::ConversionPatternRewriter &rewriter) { 2747 if (destOps.hasValue()) 2748 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, destOps.getValue(), 2749 dest); 2750 else 2751 rewriter.replaceOpWithNewOp<mlir::LLVM::BrOp>(caseOp, llvm::None, dest); 2752 } 2753 2754 static void genCaseLadderStep(mlir::Location loc, mlir::Value cmp, 2755 mlir::Block *dest, 2756 llvm::Optional<mlir::ValueRange> destOps, 2757 mlir::ConversionPatternRewriter &rewriter) { 2758 auto *thisBlock = rewriter.getInsertionBlock(); 2759 auto *newBlock = createBlock(rewriter, dest); 2760 rewriter.setInsertionPointToEnd(thisBlock); 2761 genCondBrOp(loc, cmp, dest, destOps, rewriter, newBlock); 2762 rewriter.setInsertionPointToEnd(newBlock); 2763 } 2764 2765 /// Conversion of `fir.select_case` 2766 /// 2767 /// The `fir.select_case` operation is converted to a if-then-else ladder. 2768 /// Depending on the case condition type, one or several comparison and 2769 /// conditional branching can be generated. 2770 /// 2771 /// A a point value case such as `case(4)`, a lower bound case such as 2772 /// `case(5:)` or an upper bound case such as `case(:3)` are converted to a 2773 /// simple comparison between the selector value and the constant value in the 2774 /// case. The block associated with the case condition is then executed if 2775 /// the comparison succeed otherwise it branch to the next block with the 2776 /// comparison for the the next case conditon. 2777 /// 2778 /// A closed interval case condition such as `case(7:10)` is converted with a 2779 /// first comparison and conditional branching for the lower bound. If 2780 /// successful, it branch to a second block with the comparison for the 2781 /// upper bound in the same case condition. 2782 /// 2783 /// TODO: lowering of CHARACTER type cases is not handled yet. 2784 struct SelectCaseOpConversion : public FIROpConversion<fir::SelectCaseOp> { 2785 using FIROpConversion::FIROpConversion; 2786 2787 mlir::LogicalResult 2788 matchAndRewrite(fir::SelectCaseOp caseOp, OpAdaptor adaptor, 2789 mlir::ConversionPatternRewriter &rewriter) const override { 2790 unsigned conds = caseOp.getNumConditions(); 2791 llvm::ArrayRef<mlir::Attribute> cases = caseOp.getCases().getValue(); 2792 // Type can be CHARACTER, INTEGER, or LOGICAL (C1145) 2793 auto ty = caseOp.getSelector().getType(); 2794 if (ty.isa<fir::CharacterType>()) { 2795 TODO(caseOp.getLoc(), "fir.select_case codegen with character type"); 2796 return mlir::failure(); 2797 } 2798 mlir::Value selector = caseOp.getSelector(adaptor.getOperands()); 2799 auto loc = caseOp.getLoc(); 2800 for (unsigned t = 0; t != conds; ++t) { 2801 mlir::Block *dest = caseOp.getSuccessor(t); 2802 llvm::Optional<mlir::ValueRange> destOps = 2803 caseOp.getSuccessorOperands(adaptor.getOperands(), t); 2804 llvm::Optional<mlir::ValueRange> cmpOps = 2805 *caseOp.getCompareOperands(adaptor.getOperands(), t); 2806 mlir::Value caseArg = *(cmpOps.getValue().begin()); 2807 mlir::Attribute attr = cases[t]; 2808 if (attr.isa<fir::PointIntervalAttr>()) { 2809 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2810 loc, mlir::LLVM::ICmpPredicate::eq, selector, caseArg); 2811 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2812 continue; 2813 } 2814 if (attr.isa<fir::LowerBoundAttr>()) { 2815 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2816 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2817 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2818 continue; 2819 } 2820 if (attr.isa<fir::UpperBoundAttr>()) { 2821 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2822 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg); 2823 genCaseLadderStep(loc, cmp, dest, destOps, rewriter); 2824 continue; 2825 } 2826 if (attr.isa<fir::ClosedIntervalAttr>()) { 2827 auto cmp = rewriter.create<mlir::LLVM::ICmpOp>( 2828 loc, mlir::LLVM::ICmpPredicate::sle, caseArg, selector); 2829 auto *thisBlock = rewriter.getInsertionBlock(); 2830 auto *newBlock1 = createBlock(rewriter, dest); 2831 auto *newBlock2 = createBlock(rewriter, dest); 2832 rewriter.setInsertionPointToEnd(thisBlock); 2833 rewriter.create<mlir::LLVM::CondBrOp>(loc, cmp, newBlock1, newBlock2); 2834 rewriter.setInsertionPointToEnd(newBlock1); 2835 mlir::Value caseArg0 = *(cmpOps.getValue().begin() + 1); 2836 auto cmp0 = rewriter.create<mlir::LLVM::ICmpOp>( 2837 loc, mlir::LLVM::ICmpPredicate::sle, selector, caseArg0); 2838 genCondBrOp(loc, cmp0, dest, destOps, rewriter, newBlock2); 2839 rewriter.setInsertionPointToEnd(newBlock2); 2840 continue; 2841 } 2842 assert(attr.isa<mlir::UnitAttr>()); 2843 assert((t + 1 == conds) && "unit must be last"); 2844 genBrOp(caseOp, dest, destOps, rewriter); 2845 } 2846 return mlir::success(); 2847 } 2848 }; 2849 2850 template <typename OP> 2851 static void selectMatchAndRewrite(fir::LLVMTypeConverter &lowering, OP select, 2852 typename OP::Adaptor adaptor, 2853 mlir::ConversionPatternRewriter &rewriter) { 2854 unsigned conds = select.getNumConditions(); 2855 auto cases = select.getCases().getValue(); 2856 mlir::Value selector = adaptor.getSelector(); 2857 auto loc = select.getLoc(); 2858 assert(conds > 0 && "select must have cases"); 2859 2860 llvm::SmallVector<mlir::Block *> destinations; 2861 llvm::SmallVector<mlir::ValueRange> destinationsOperands; 2862 mlir::Block *defaultDestination; 2863 mlir::ValueRange defaultOperands; 2864 llvm::SmallVector<int32_t> caseValues; 2865 2866 for (unsigned t = 0; t != conds; ++t) { 2867 mlir::Block *dest = select.getSuccessor(t); 2868 auto destOps = select.getSuccessorOperands(adaptor.getOperands(), t); 2869 const mlir::Attribute &attr = cases[t]; 2870 if (auto intAttr = attr.template dyn_cast<mlir::IntegerAttr>()) { 2871 destinations.push_back(dest); 2872 destinationsOperands.push_back(destOps.hasValue() ? *destOps 2873 : mlir::ValueRange{}); 2874 caseValues.push_back(intAttr.getInt()); 2875 continue; 2876 } 2877 assert(attr.template dyn_cast_or_null<mlir::UnitAttr>()); 2878 assert((t + 1 == conds) && "unit must be last"); 2879 defaultDestination = dest; 2880 defaultOperands = destOps.hasValue() ? *destOps : mlir::ValueRange{}; 2881 } 2882 2883 // LLVM::SwitchOp takes a i32 type for the selector. 2884 if (select.getSelector().getType() != rewriter.getI32Type()) 2885 selector = rewriter.create<mlir::LLVM::TruncOp>(loc, rewriter.getI32Type(), 2886 selector); 2887 2888 rewriter.replaceOpWithNewOp<mlir::LLVM::SwitchOp>( 2889 select, selector, 2890 /*defaultDestination=*/defaultDestination, 2891 /*defaultOperands=*/defaultOperands, 2892 /*caseValues=*/caseValues, 2893 /*caseDestinations=*/destinations, 2894 /*caseOperands=*/destinationsOperands, 2895 /*branchWeights=*/llvm::ArrayRef<std::int32_t>()); 2896 } 2897 2898 /// conversion of fir::SelectOp to an if-then-else ladder 2899 struct SelectOpConversion : public FIROpConversion<fir::SelectOp> { 2900 using FIROpConversion::FIROpConversion; 2901 2902 mlir::LogicalResult 2903 matchAndRewrite(fir::SelectOp op, OpAdaptor adaptor, 2904 mlir::ConversionPatternRewriter &rewriter) const override { 2905 selectMatchAndRewrite<fir::SelectOp>(lowerTy(), op, adaptor, rewriter); 2906 return mlir::success(); 2907 } 2908 }; 2909 2910 /// conversion of fir::SelectRankOp to an if-then-else ladder 2911 struct SelectRankOpConversion : public FIROpConversion<fir::SelectRankOp> { 2912 using FIROpConversion::FIROpConversion; 2913 2914 mlir::LogicalResult 2915 matchAndRewrite(fir::SelectRankOp op, OpAdaptor adaptor, 2916 mlir::ConversionPatternRewriter &rewriter) const override { 2917 selectMatchAndRewrite<fir::SelectRankOp>(lowerTy(), op, adaptor, rewriter); 2918 return mlir::success(); 2919 } 2920 }; 2921 2922 /// Lower `fir.select_type` to LLVM IR dialect. 2923 struct SelectTypeOpConversion : public FIROpConversion<fir::SelectTypeOp> { 2924 using FIROpConversion::FIROpConversion; 2925 2926 mlir::LogicalResult 2927 matchAndRewrite(fir::SelectTypeOp select, OpAdaptor adaptor, 2928 mlir::ConversionPatternRewriter &rewriter) const override { 2929 mlir::emitError(select.getLoc(), 2930 "fir.select_type should have already been converted"); 2931 return mlir::failure(); 2932 } 2933 }; 2934 2935 /// `fir.store` --> `llvm.store` 2936 struct StoreOpConversion : public FIROpConversion<fir::StoreOp> { 2937 using FIROpConversion::FIROpConversion; 2938 2939 mlir::LogicalResult 2940 matchAndRewrite(fir::StoreOp store, OpAdaptor adaptor, 2941 mlir::ConversionPatternRewriter &rewriter) const override { 2942 if (store.getValue().getType().isa<fir::BoxType>()) { 2943 // fir.box value is actually in memory, load it first before storing it. 2944 mlir::Location loc = store.getLoc(); 2945 mlir::Type boxPtrTy = adaptor.getOperands()[0].getType(); 2946 auto val = rewriter.create<mlir::LLVM::LoadOp>( 2947 loc, boxPtrTy.cast<mlir::LLVM::LLVMPointerType>().getElementType(), 2948 adaptor.getOperands()[0]); 2949 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2950 store, val, adaptor.getOperands()[1]); 2951 } else { 2952 rewriter.replaceOpWithNewOp<mlir::LLVM::StoreOp>( 2953 store, adaptor.getOperands()[0], adaptor.getOperands()[1]); 2954 } 2955 return mlir::success(); 2956 } 2957 }; 2958 2959 namespace { 2960 2961 /// Convert `fir.unboxchar` into two `llvm.extractvalue` instructions. One for 2962 /// the character buffer and one for the buffer length. 2963 struct UnboxCharOpConversion : public FIROpConversion<fir::UnboxCharOp> { 2964 using FIROpConversion::FIROpConversion; 2965 2966 mlir::LogicalResult 2967 matchAndRewrite(fir::UnboxCharOp unboxchar, OpAdaptor adaptor, 2968 mlir::ConversionPatternRewriter &rewriter) const override { 2969 auto *ctx = unboxchar.getContext(); 2970 2971 mlir::Type lenTy = convertType(unboxchar.getType(1)); 2972 mlir::Value tuple = adaptor.getOperands()[0]; 2973 mlir::Type tupleTy = tuple.getType(); 2974 2975 mlir::Location loc = unboxchar.getLoc(); 2976 mlir::Value ptrToBuffer = 2977 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 0); 2978 2979 mlir::LLVM::ExtractValueOp len = 2980 genExtractValueWithIndex(loc, tuple, tupleTy, rewriter, ctx, 1); 2981 mlir::Value lenAfterCast = integerCast(loc, rewriter, lenTy, len); 2982 2983 rewriter.replaceOp(unboxchar, 2984 llvm::ArrayRef<mlir::Value>{ptrToBuffer, lenAfterCast}); 2985 return mlir::success(); 2986 } 2987 }; 2988 2989 /// Lower `fir.unboxproc` operation. Unbox a procedure box value, yielding its 2990 /// components. 2991 /// TODO: Part of supporting Fortran 2003 procedure pointers. 2992 struct UnboxProcOpConversion : public FIROpConversion<fir::UnboxProcOp> { 2993 using FIROpConversion::FIROpConversion; 2994 2995 mlir::LogicalResult 2996 matchAndRewrite(fir::UnboxProcOp unboxproc, OpAdaptor adaptor, 2997 mlir::ConversionPatternRewriter &rewriter) const override { 2998 TODO(unboxproc.getLoc(), "fir.unboxproc codegen"); 2999 return mlir::failure(); 3000 } 3001 }; 3002 3003 /// convert to LLVM IR dialect `undef` 3004 struct UndefOpConversion : public FIROpConversion<fir::UndefOp> { 3005 using FIROpConversion::FIROpConversion; 3006 3007 mlir::LogicalResult 3008 matchAndRewrite(fir::UndefOp undef, OpAdaptor, 3009 mlir::ConversionPatternRewriter &rewriter) const override { 3010 rewriter.replaceOpWithNewOp<mlir::LLVM::UndefOp>( 3011 undef, convertType(undef.getType())); 3012 return mlir::success(); 3013 } 3014 }; 3015 3016 struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> { 3017 using FIROpConversion::FIROpConversion; 3018 3019 mlir::LogicalResult 3020 matchAndRewrite(fir::ZeroOp zero, OpAdaptor, 3021 mlir::ConversionPatternRewriter &rewriter) const override { 3022 mlir::Type ty = convertType(zero.getType()); 3023 if (ty.isa<mlir::LLVM::LLVMPointerType>()) { 3024 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty); 3025 } else if (ty.isa<mlir::IntegerType>()) { 3026 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3027 zero, ty, mlir::IntegerAttr::get(zero.getType(), 0)); 3028 } else if (mlir::LLVM::isCompatibleFloatingPointType(ty)) { 3029 rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>( 3030 zero, ty, mlir::FloatAttr::get(zero.getType(), 0.0)); 3031 } else { 3032 // TODO: create ConstantAggregateZero for FIR aggregate/array types. 3033 return rewriter.notifyMatchFailure( 3034 zero, 3035 "conversion of fir.zero with aggregate type not implemented yet"); 3036 } 3037 return mlir::success(); 3038 } 3039 }; 3040 3041 /// `fir.unreachable` --> `llvm.unreachable` 3042 struct UnreachableOpConversion : public FIROpConversion<fir::UnreachableOp> { 3043 using FIROpConversion::FIROpConversion; 3044 3045 mlir::LogicalResult 3046 matchAndRewrite(fir::UnreachableOp unreach, OpAdaptor adaptor, 3047 mlir::ConversionPatternRewriter &rewriter) const override { 3048 rewriter.replaceOpWithNewOp<mlir::LLVM::UnreachableOp>(unreach); 3049 return mlir::success(); 3050 } 3051 }; 3052 3053 /// `fir.is_present` --> 3054 /// ``` 3055 /// %0 = llvm.mlir.constant(0 : i64) 3056 /// %1 = llvm.ptrtoint %0 3057 /// %2 = llvm.icmp "ne" %1, %0 : i64 3058 /// ``` 3059 struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> { 3060 using FIROpConversion::FIROpConversion; 3061 3062 mlir::LogicalResult 3063 matchAndRewrite(fir::IsPresentOp isPresent, OpAdaptor adaptor, 3064 mlir::ConversionPatternRewriter &rewriter) const override { 3065 mlir::Type idxTy = lowerTy().indexType(); 3066 mlir::Location loc = isPresent.getLoc(); 3067 auto ptr = adaptor.getOperands()[0]; 3068 3069 if (isPresent.getVal().getType().isa<fir::BoxCharType>()) { 3070 auto structTy = ptr.getType().cast<mlir::LLVM::LLVMStructType>(); 3071 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3072 3073 mlir::Type ty = structTy.getBody()[0]; 3074 mlir::MLIRContext *ctx = isPresent.getContext(); 3075 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3076 ptr = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, ty, ptr, c0); 3077 } 3078 mlir::LLVM::ConstantOp c0 = 3079 genConstantIndex(isPresent.getLoc(), idxTy, rewriter, 0); 3080 auto addr = rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, ptr); 3081 rewriter.replaceOpWithNewOp<mlir::LLVM::ICmpOp>( 3082 isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0); 3083 3084 return mlir::success(); 3085 } 3086 }; 3087 3088 /// Create value signaling an absent optional argument in a call, e.g. 3089 /// `fir.absent !fir.ref<i64>` --> `llvm.mlir.null : !llvm.ptr<i64>` 3090 struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> { 3091 using FIROpConversion::FIROpConversion; 3092 3093 mlir::LogicalResult 3094 matchAndRewrite(fir::AbsentOp absent, OpAdaptor, 3095 mlir::ConversionPatternRewriter &rewriter) const override { 3096 mlir::Type ty = convertType(absent.getType()); 3097 mlir::Location loc = absent.getLoc(); 3098 3099 if (absent.getType().isa<fir::BoxCharType>()) { 3100 auto structTy = ty.cast<mlir::LLVM::LLVMStructType>(); 3101 assert(!structTy.isOpaque() && !structTy.getBody().empty()); 3102 auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3103 auto nullField = 3104 rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]); 3105 mlir::MLIRContext *ctx = absent.getContext(); 3106 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3107 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>( 3108 absent, ty, undefStruct, nullField, c0); 3109 } else { 3110 rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty); 3111 } 3112 return mlir::success(); 3113 } 3114 }; 3115 3116 // 3117 // Primitive operations on Complex types 3118 // 3119 3120 /// Generate inline code for complex addition/subtraction 3121 template <typename LLVMOP, typename OPTY> 3122 static mlir::LLVM::InsertValueOp 3123 complexSum(OPTY sumop, mlir::ValueRange opnds, 3124 mlir::ConversionPatternRewriter &rewriter, 3125 fir::LLVMTypeConverter &lowering) { 3126 mlir::Value a = opnds[0]; 3127 mlir::Value b = opnds[1]; 3128 auto loc = sumop.getLoc(); 3129 auto ctx = sumop.getContext(); 3130 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3131 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3132 mlir::Type eleTy = lowering.convertType(getComplexEleTy(sumop.getType())); 3133 mlir::Type ty = lowering.convertType(sumop.getType()); 3134 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3135 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3136 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3137 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3138 auto rx = rewriter.create<LLVMOP>(loc, eleTy, x0, x1); 3139 auto ry = rewriter.create<LLVMOP>(loc, eleTy, y0, y1); 3140 auto r0 = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3141 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r0, rx, c0); 3142 return rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ry, c1); 3143 } 3144 } // namespace 3145 3146 namespace { 3147 struct AddcOpConversion : public FIROpConversion<fir::AddcOp> { 3148 using FIROpConversion::FIROpConversion; 3149 3150 mlir::LogicalResult 3151 matchAndRewrite(fir::AddcOp addc, OpAdaptor adaptor, 3152 mlir::ConversionPatternRewriter &rewriter) const override { 3153 // given: (x + iy) + (x' + iy') 3154 // result: (x + x') + i(y + y') 3155 auto r = complexSum<mlir::LLVM::FAddOp>(addc, adaptor.getOperands(), 3156 rewriter, lowerTy()); 3157 rewriter.replaceOp(addc, r.getResult()); 3158 return mlir::success(); 3159 } 3160 }; 3161 3162 struct SubcOpConversion : public FIROpConversion<fir::SubcOp> { 3163 using FIROpConversion::FIROpConversion; 3164 3165 mlir::LogicalResult 3166 matchAndRewrite(fir::SubcOp subc, OpAdaptor adaptor, 3167 mlir::ConversionPatternRewriter &rewriter) const override { 3168 // given: (x + iy) - (x' + iy') 3169 // result: (x - x') + i(y - y') 3170 auto r = complexSum<mlir::LLVM::FSubOp>(subc, adaptor.getOperands(), 3171 rewriter, lowerTy()); 3172 rewriter.replaceOp(subc, r.getResult()); 3173 return mlir::success(); 3174 } 3175 }; 3176 3177 /// Inlined complex multiply 3178 struct MulcOpConversion : public FIROpConversion<fir::MulcOp> { 3179 using FIROpConversion::FIROpConversion; 3180 3181 mlir::LogicalResult 3182 matchAndRewrite(fir::MulcOp mulc, OpAdaptor adaptor, 3183 mlir::ConversionPatternRewriter &rewriter) const override { 3184 // TODO: Can we use a call to __muldc3 ? 3185 // given: (x + iy) * (x' + iy') 3186 // result: (xx'-yy')+i(xy'+yx') 3187 mlir::Value a = adaptor.getOperands()[0]; 3188 mlir::Value b = adaptor.getOperands()[1]; 3189 auto loc = mulc.getLoc(); 3190 auto *ctx = mulc.getContext(); 3191 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3192 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3193 mlir::Type eleTy = convertType(getComplexEleTy(mulc.getType())); 3194 mlir::Type ty = convertType(mulc.getType()); 3195 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3196 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3197 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3198 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3199 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3200 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3201 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3202 auto ri = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xy, yx); 3203 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3204 auto rr = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, xx, yy); 3205 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3206 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3207 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3208 rewriter.replaceOp(mulc, r0.getResult()); 3209 return mlir::success(); 3210 } 3211 }; 3212 3213 /// Inlined complex division 3214 struct DivcOpConversion : public FIROpConversion<fir::DivcOp> { 3215 using FIROpConversion::FIROpConversion; 3216 3217 mlir::LogicalResult 3218 matchAndRewrite(fir::DivcOp divc, OpAdaptor adaptor, 3219 mlir::ConversionPatternRewriter &rewriter) const override { 3220 // TODO: Can we use a call to __divdc3 instead? 3221 // Just generate inline code for now. 3222 // given: (x + iy) / (x' + iy') 3223 // result: ((xx'+yy')/d) + i((yx'-xy')/d) where d = x'x' + y'y' 3224 mlir::Value a = adaptor.getOperands()[0]; 3225 mlir::Value b = adaptor.getOperands()[1]; 3226 auto loc = divc.getLoc(); 3227 auto *ctx = divc.getContext(); 3228 auto c0 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(0)); 3229 auto c1 = mlir::ArrayAttr::get(ctx, rewriter.getI32IntegerAttr(1)); 3230 mlir::Type eleTy = convertType(getComplexEleTy(divc.getType())); 3231 mlir::Type ty = convertType(divc.getType()); 3232 auto x0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c0); 3233 auto y0 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, a, c1); 3234 auto x1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c0); 3235 auto y1 = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, b, c1); 3236 auto xx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, x1); 3237 auto x1x1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x1, x1); 3238 auto yx = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, x1); 3239 auto xy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, x0, y1); 3240 auto yy = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y0, y1); 3241 auto y1y1 = rewriter.create<mlir::LLVM::FMulOp>(loc, eleTy, y1, y1); 3242 auto d = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, x1x1, y1y1); 3243 auto rrn = rewriter.create<mlir::LLVM::FAddOp>(loc, eleTy, xx, yy); 3244 auto rin = rewriter.create<mlir::LLVM::FSubOp>(loc, eleTy, yx, xy); 3245 auto rr = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rrn, d); 3246 auto ri = rewriter.create<mlir::LLVM::FDivOp>(loc, eleTy, rin, d); 3247 auto ra = rewriter.create<mlir::LLVM::UndefOp>(loc, ty); 3248 auto r1 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, ra, rr, c0); 3249 auto r0 = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, r1, ri, c1); 3250 rewriter.replaceOp(divc, r0.getResult()); 3251 return mlir::success(); 3252 } 3253 }; 3254 3255 /// Inlined complex negation 3256 struct NegcOpConversion : public FIROpConversion<fir::NegcOp> { 3257 using FIROpConversion::FIROpConversion; 3258 3259 mlir::LogicalResult 3260 matchAndRewrite(fir::NegcOp neg, OpAdaptor adaptor, 3261 mlir::ConversionPatternRewriter &rewriter) const override { 3262 // given: -(x + iy) 3263 // result: -x - iy 3264 auto *ctxt = neg.getContext(); 3265 auto eleTy = convertType(getComplexEleTy(neg.getType())); 3266 auto ty = convertType(neg.getType()); 3267 auto loc = neg.getLoc(); 3268 mlir::Value o0 = adaptor.getOperands()[0]; 3269 auto c0 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(0)); 3270 auto c1 = mlir::ArrayAttr::get(ctxt, rewriter.getI32IntegerAttr(1)); 3271 auto rp = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c0); 3272 auto ip = rewriter.create<mlir::LLVM::ExtractValueOp>(loc, eleTy, o0, c1); 3273 auto nrp = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, rp); 3274 auto nip = rewriter.create<mlir::LLVM::FNegOp>(loc, eleTy, ip); 3275 auto r = rewriter.create<mlir::LLVM::InsertValueOp>(loc, ty, o0, nrp, c0); 3276 rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(neg, ty, r, nip, c1); 3277 return mlir::success(); 3278 } 3279 }; 3280 3281 /// Conversion pattern for operation that must be dead. The information in these 3282 /// operations is used by other operation. At this point they should not have 3283 /// anymore uses. 3284 /// These operations are normally dead after the pre-codegen pass. 3285 template <typename FromOp> 3286 struct MustBeDeadConversion : public FIROpConversion<FromOp> { 3287 explicit MustBeDeadConversion(fir::LLVMTypeConverter &lowering, 3288 const fir::FIRToLLVMPassOptions &options) 3289 : FIROpConversion<FromOp>(lowering, options) {} 3290 using OpAdaptor = typename FromOp::Adaptor; 3291 3292 mlir::LogicalResult 3293 matchAndRewrite(FromOp op, OpAdaptor adaptor, 3294 mlir::ConversionPatternRewriter &rewriter) const final { 3295 if (!op->getUses().empty()) 3296 return rewriter.notifyMatchFailure(op, "op must be dead"); 3297 rewriter.eraseOp(op); 3298 return mlir::success(); 3299 } 3300 }; 3301 3302 struct ShapeOpConversion : public MustBeDeadConversion<fir::ShapeOp> { 3303 using MustBeDeadConversion::MustBeDeadConversion; 3304 }; 3305 3306 struct ShapeShiftOpConversion : public MustBeDeadConversion<fir::ShapeShiftOp> { 3307 using MustBeDeadConversion::MustBeDeadConversion; 3308 }; 3309 3310 struct ShiftOpConversion : public MustBeDeadConversion<fir::ShiftOp> { 3311 using MustBeDeadConversion::MustBeDeadConversion; 3312 }; 3313 3314 struct SliceOpConversion : public MustBeDeadConversion<fir::SliceOp> { 3315 using MustBeDeadConversion::MustBeDeadConversion; 3316 }; 3317 3318 } // namespace 3319 3320 namespace { 3321 /// Convert FIR dialect to LLVM dialect 3322 /// 3323 /// This pass lowers all FIR dialect operations to LLVM IR dialect. An 3324 /// MLIR pass is used to lower residual Std dialect to LLVM IR dialect. 3325 class FIRToLLVMLowering : public fir::FIRToLLVMLoweringBase<FIRToLLVMLowering> { 3326 public: 3327 FIRToLLVMLowering() = default; 3328 FIRToLLVMLowering(fir::FIRToLLVMPassOptions options) : options{options} {} 3329 mlir::ModuleOp getModule() { return getOperation(); } 3330 3331 void runOnOperation() override final { 3332 auto mod = getModule(); 3333 if (!forcedTargetTriple.empty()) 3334 fir::setTargetTriple(mod, forcedTargetTriple); 3335 3336 auto *context = getModule().getContext(); 3337 fir::LLVMTypeConverter typeConverter{getModule()}; 3338 mlir::RewritePatternSet pattern(context); 3339 pattern.insert< 3340 AbsentOpConversion, AddcOpConversion, AddrOfOpConversion, 3341 AllocaOpConversion, AllocMemOpConversion, BoxAddrOpConversion, 3342 BoxCharLenOpConversion, BoxDimsOpConversion, BoxEleSizeOpConversion, 3343 BoxIsAllocOpConversion, BoxIsArrayOpConversion, BoxIsPtrOpConversion, 3344 BoxProcHostOpConversion, BoxRankOpConversion, BoxTypeDescOpConversion, 3345 CallOpConversion, CmpcOpConversion, ConstcOpConversion, 3346 ConvertOpConversion, CoordinateOpConversion, DispatchOpConversion, 3347 DispatchTableOpConversion, DTEntryOpConversion, DivcOpConversion, 3348 EmboxOpConversion, EmboxCharOpConversion, EmboxProcOpConversion, 3349 ExtractValueOpConversion, FieldIndexOpConversion, FirEndOpConversion, 3350 FreeMemOpConversion, GenTypeDescOpConversion, GlobalLenOpConversion, 3351 GlobalOpConversion, HasValueOpConversion, InsertOnRangeOpConversion, 3352 InsertValueOpConversion, IsPresentOpConversion, 3353 LenParamIndexOpConversion, LoadOpConversion, MulcOpConversion, 3354 NegcOpConversion, NoReassocOpConversion, SelectCaseOpConversion, 3355 SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion, 3356 ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion, 3357 SliceOpConversion, StoreOpConversion, StringLitOpConversion, 3358 SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion, 3359 UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion, 3360 XEmboxOpConversion, XReboxOpConversion, ZeroOpConversion>(typeConverter, 3361 options); 3362 mlir::populateFuncToLLVMConversionPatterns(typeConverter, pattern); 3363 mlir::populateOpenMPToLLVMConversionPatterns(typeConverter, pattern); 3364 mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter, 3365 pattern); 3366 mlir::cf::populateControlFlowToLLVMConversionPatterns(typeConverter, 3367 pattern); 3368 mlir::ConversionTarget target{*context}; 3369 target.addLegalDialect<mlir::LLVM::LLVMDialect>(); 3370 // The OpenMP dialect is legal for Operations without regions, for those 3371 // which contains regions it is legal if the region contains only the 3372 // LLVM dialect. Add OpenMP dialect as a legal dialect for conversion and 3373 // legalize conversion of OpenMP operations without regions. 3374 mlir::configureOpenMPToLLVMConversionLegality(target, typeConverter); 3375 target.addLegalDialect<mlir::omp::OpenMPDialect>(); 3376 3377 // required NOPs for applying a full conversion 3378 target.addLegalOp<mlir::ModuleOp>(); 3379 3380 // apply the patterns 3381 if (mlir::failed(mlir::applyFullConversion(getModule(), target, 3382 std::move(pattern)))) { 3383 signalPassFailure(); 3384 } 3385 } 3386 3387 private: 3388 fir::FIRToLLVMPassOptions options; 3389 }; 3390 3391 /// Lower from LLVM IR dialect to proper LLVM-IR and dump the module 3392 struct LLVMIRLoweringPass 3393 : public mlir::PassWrapper<LLVMIRLoweringPass, 3394 mlir::OperationPass<mlir::ModuleOp>> { 3395 MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(LLVMIRLoweringPass) 3396 3397 LLVMIRLoweringPass(llvm::raw_ostream &output, fir::LLVMIRLoweringPrinter p) 3398 : output{output}, printer{p} {} 3399 3400 mlir::ModuleOp getModule() { return getOperation(); } 3401 3402 void runOnOperation() override final { 3403 auto *ctx = getModule().getContext(); 3404 auto optName = getModule().getName(); 3405 llvm::LLVMContext llvmCtx; 3406 if (auto llvmModule = mlir::translateModuleToLLVMIR( 3407 getModule(), llvmCtx, optName ? *optName : "FIRModule")) { 3408 printer(*llvmModule, output); 3409 return; 3410 } 3411 3412 mlir::emitError(mlir::UnknownLoc::get(ctx), "could not emit LLVM-IR\n"); 3413 signalPassFailure(); 3414 } 3415 3416 private: 3417 llvm::raw_ostream &output; 3418 fir::LLVMIRLoweringPrinter printer; 3419 }; 3420 3421 } // namespace 3422 3423 std::unique_ptr<mlir::Pass> fir::createFIRToLLVMPass() { 3424 return std::make_unique<FIRToLLVMLowering>(); 3425 } 3426 3427 std::unique_ptr<mlir::Pass> 3428 fir::createFIRToLLVMPass(fir::FIRToLLVMPassOptions options) { 3429 return std::make_unique<FIRToLLVMLowering>(options); 3430 } 3431 3432 std::unique_ptr<mlir::Pass> 3433 fir::createLLVMDialectToLLVMPass(llvm::raw_ostream &output, 3434 fir::LLVMIRLoweringPrinter printer) { 3435 return std::make_unique<LLVMIRLoweringPass>(output, printer); 3436 } 3437